commit
stringlengths
40
40
old_file
stringlengths
5
117
new_file
stringlengths
5
117
old_contents
stringlengths
0
1.93k
new_contents
stringlengths
19
3.3k
subject
stringlengths
17
320
message
stringlengths
18
3.28k
lang
stringclasses
1 value
license
stringclasses
13 values
repos
stringlengths
7
42.4k
completion
stringlengths
152
6.66k
prompt
stringlengths
21
3.65k
80f35ad0d3a6a1f04eb0339bb1088ebe6eb27af5
mongomock/results.py
mongomock/results.py
try: from pymongo.results import InsertOneResult from pymongo.results import InsertManyResult from pymongo.results import UpdateResult from pymongo.results import DeleteResult except ImportError: class _WriteResult(object): def __init__(self, acknowledged=True): self.__acknowledged = acknowledged @property def acknowledged(self): return self.__acknowledged class InsertOneResult(_WriteResult): __slots__ = ('__inserted_id', '__acknowledged') def __init__(self, inserted_id, acknowledged=True): self.__inserted_id = inserted_id super(InsertOneResult, self).__init__(acknowledged) @property def inserted_id(self): return self.__inserted_id class InsertManyResult(_WriteResult): __slots__ = ('__inserted_ids', '__acknowledged') def __init__(self, inserted_ids, acknowledged=True): self.__inserted_ids = inserted_ids super(InsertManyResult, self).__init__(acknowledged) @property def inserted_ids(self): return self.__inserted_ids class UpdateResult(_WriteResult): __slots__ = ('__raw_result', '__acknowledged') def __init__(self, raw_result, acknowledged=True): self.__raw_result = raw_result super(UpdateResult, self).__init__(acknowledged) @property def raw_result(self): return self.__raw_result @property def matched_count(self): if self.upserted_id is not None: return 0 self.__raw_result.get('n', 0) @property def modified_count(self): return self.__raw_result.get('nModified') @property def upserted_id(self): return self.__raw_result.get('upserted') class DeleteResult(_WriteResult): __slots__ = ('__raw_result', '__acknowledged') def __init__(self, raw_result, acknowledged=True): self.__raw_result = raw_result super(DeleteResult, self).__init__(acknowledged) @property def raw_result(self): return self.__raw_result @property def deleted_count(self): return self.__raw_result.get('n', 0)
Add result classes for update/insert/delete ops
Add result classes for update/insert/delete ops
Python
bsd-3-clause
vmalloc/mongomock,marcinbarczynski/mongomock,mdomke/mongomock,drorasaf/mongomock,magaman384/mongomock,StarfishStorage/mongomock,julianhille/mongomock
<REPLACE_OLD> <REPLACE_NEW> try: from pymongo.results import InsertOneResult from pymongo.results import InsertManyResult from pymongo.results import UpdateResult from pymongo.results import DeleteResult except ImportError: class _WriteResult(object): def __init__(self, acknowledged=True): self.__acknowledged = acknowledged @property def acknowledged(self): return self.__acknowledged class InsertOneResult(_WriteResult): __slots__ = ('__inserted_id', '__acknowledged') def __init__(self, inserted_id, acknowledged=True): self.__inserted_id = inserted_id super(InsertOneResult, self).__init__(acknowledged) @property def inserted_id(self): return self.__inserted_id class InsertManyResult(_WriteResult): __slots__ = ('__inserted_ids', '__acknowledged') def __init__(self, inserted_ids, acknowledged=True): self.__inserted_ids = inserted_ids super(InsertManyResult, self).__init__(acknowledged) @property def inserted_ids(self): return self.__inserted_ids class UpdateResult(_WriteResult): __slots__ = ('__raw_result', '__acknowledged') def __init__(self, raw_result, acknowledged=True): self.__raw_result = raw_result super(UpdateResult, self).__init__(acknowledged) @property def raw_result(self): return self.__raw_result @property def matched_count(self): if self.upserted_id is not None: return 0 self.__raw_result.get('n', 0) @property def modified_count(self): return self.__raw_result.get('nModified') @property def upserted_id(self): return self.__raw_result.get('upserted') class DeleteResult(_WriteResult): __slots__ = ('__raw_result', '__acknowledged') def __init__(self, raw_result, acknowledged=True): self.__raw_result = raw_result super(DeleteResult, self).__init__(acknowledged) @property def raw_result(self): return self.__raw_result @property def deleted_count(self): return self.__raw_result.get('n', 0) <REPLACE_END> <|endoftext|> try: from pymongo.results import InsertOneResult from pymongo.results import InsertManyResult from pymongo.results import UpdateResult from pymongo.results import DeleteResult except ImportError: class _WriteResult(object): def __init__(self, acknowledged=True): self.__acknowledged = acknowledged @property def acknowledged(self): return self.__acknowledged class InsertOneResult(_WriteResult): __slots__ = ('__inserted_id', '__acknowledged') def __init__(self, inserted_id, acknowledged=True): self.__inserted_id = inserted_id super(InsertOneResult, self).__init__(acknowledged) @property def inserted_id(self): return self.__inserted_id class InsertManyResult(_WriteResult): __slots__ = ('__inserted_ids', '__acknowledged') def __init__(self, inserted_ids, acknowledged=True): self.__inserted_ids = inserted_ids super(InsertManyResult, self).__init__(acknowledged) @property def inserted_ids(self): return self.__inserted_ids class UpdateResult(_WriteResult): __slots__ = ('__raw_result', '__acknowledged') def __init__(self, raw_result, acknowledged=True): self.__raw_result = raw_result super(UpdateResult, self).__init__(acknowledged) @property def raw_result(self): return self.__raw_result @property def matched_count(self): if self.upserted_id is not None: return 0 self.__raw_result.get('n', 0) @property def modified_count(self): return self.__raw_result.get('nModified') @property def upserted_id(self): return self.__raw_result.get('upserted') class DeleteResult(_WriteResult): __slots__ = ('__raw_result', '__acknowledged') def __init__(self, raw_result, acknowledged=True): self.__raw_result = raw_result super(DeleteResult, self).__init__(acknowledged) @property def raw_result(self): return self.__raw_result @property def deleted_count(self): return self.__raw_result.get('n', 0)
Add result classes for update/insert/delete ops
aab9efbcec0bbded807bf207e2324266573fa3a6
tensorflow/python/tf2.py
tensorflow/python/tf2.py
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tools to help with the TensorFlow 2.0 transition. This module is meant for TensorFlow internal implementation, not for users of the TensorFlow library. For that see tf.compat instead. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os _force_enable = None def enable(): """Enables v2 behaviors.""" global _force_enable _force_enable = True def disable(): """Disables v2 behaviors.""" global _force_enable _force_enable = False def enabled(): """Returns True iff TensorFlow 2.0 behavior should be enabled.""" if _force_enable is None: return os.getenv("TF2_BEHAVIOR", "0") != "0" else: return _force_enable
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tools to help with the TensorFlow 2.0 transition. This module is meant for TensorFlow internal implementation, not for users of the TensorFlow library. For that see tf.compat instead. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os _force_enable = None def enable(): """Enables v2 behaviors.""" global _force_enable _force_enable = True def disable(): """Disables v2 behaviors.""" global _force_enable _force_enable = False def enabled(): """Returns True iff TensorFlow 2.0 behavior should be enabled.""" if _force_enable is None: return os.getenv("TF2_BEHAVIOR", "0") != "0" return _force_enable
Remove the redundant `else` condition.
Remove the redundant `else` condition. PiperOrigin-RevId: 302901741 Change-Id: I65281a07fc2789fbc13775c1365fd01789a1bb7e
Python
apache-2.0
petewarden/tensorflow,yongtang/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,cxxgtxy/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,yongtang/tensorflow,petewarden/tensorflow,paolodedios/tensorflow,aldian/tensorflow,yongtang/tensorflow,davidzchen/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,karllessard/tensorflow,gunan/tensorflow,sarvex/tensorflow,gunan/tensorflow,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,freedomtan/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,frreiss/tensorflow-fred,aldian/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,tensorflow/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow,petewarden/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,tensorflow/tensorflow,karllessard/tensorflow,aam-at/tensorflow,davidzchen/tensorflow,karllessard/tensorflow,aam-at/tensorflow,frreiss/tensorflow-fred,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,paolodedios/tensorflow,davidzchen/tensorflow,annarev/tensorflow,gautam1858/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,Intel-Corporation/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_saved_model,karllessard/tensorflow,aam-at/tensorflow,frreiss/tensorflow-fred,paolodedios/tensorflow,annarev/tensorflow,frreiss/tensorflow-fred,cxxgtxy/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-tensorflow/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,annarev/tensorflow,davidzchen/tensorflow,aam-at/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_saved_model,sarvex/tensorflow,Intel-tensorflow/tensorflow,annarev/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,petewarden/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gunan/tensorflow,freedomtan/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,Intel-Corporation/tensorflow,aam-at/tensorflow,aldian/tensorflow,gunan/tensorflow,paolodedios/tensorflow,davidzchen/tensorflow,freedomtan/tensorflow,yongtang/tensorflow,gautam1858/tensorflow,gunan/tensorflow,annarev/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,freedomtan/tensorflow,gunan/tensorflow,sarvex/tensorflow,paolodedios/tensorflow,yongtang/tensorflow,tensorflow/tensorflow,freedomtan/tensorflow,gunan/tensorflow,karllessard/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,gunan/tensorflow,tensorflow/tensorflow,aam-at/tensorflow,aldian/tensorflow,aam-at/tensorflow,aldian/tensorflow,gunan/tensorflow,aam-at/tensorflow,sarvex/tensorflow,karllessard/tensorflow,sarvex/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-pywrap_saved_model,davidzchen/tensorflow,Intel-Corporation/tensorflow,tensorflow/tensorflow,gautam1858/tensorflow,gautam1858/tensorflow,petewarden/tensorflow,frreiss/tensorflow-fred,aldian/tensorflow,freedomtan/tensorflow,frreiss/tensorflow-fred,yongtang/tensorflow,aldian/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_tf_optimizer,tensorflow/tensorflow-pywrap_saved_model,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,karllessard/tensorflow,freedomtan/tensorflow,davidzchen/tensorflow,karllessard/tensorflow,freedomtan/tensorflow,gautam1858/tensorflow,aam-at/tensorflow,Intel-Corporation/tensorflow,davidzchen/tensorflow,freedomtan/tensorflow,yongtang/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_saved_model,Intel-Corporation/tensorflow,karllessard/tensorflow,cxxgtxy/tensorflow,gautam1858/tensorflow,sarvex/tensorflow,frreiss/tensorflow-fred,davidzchen/tensorflow,annarev/tensorflow,tensorflow/tensorflow-pywrap_saved_model,frreiss/tensorflow-fred,petewarden/tensorflow,Intel-Corporation/tensorflow,davidzchen/tensorflow,frreiss/tensorflow-fred,tensorflow/tensorflow-pywrap_saved_model,yongtang/tensorflow,Intel-tensorflow/tensorflow,petewarden/tensorflow,annarev/tensorflow,gautam1858/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,cxxgtxy/tensorflow,petewarden/tensorflow,aldian/tensorflow,tensorflow/tensorflow,yongtang/tensorflow,gunan/tensorflow,aam-at/tensorflow,tensorflow/tensorflow-pywrap_saved_model,annarev/tensorflow,karllessard/tensorflow,frreiss/tensorflow-fred,aam-at/tensorflow,tensorflow/tensorflow-experimental_link_static_libraries_once,tensorflow/tensorflow-experimental_link_static_libraries_once,annarev/tensorflow,Intel-Corporation/tensorflow,gautam1858/tensorflow,annarev/tensorflow,Intel-tensorflow/tensorflow,cxxgtxy/tensorflow,cxxgtxy/tensorflow,tensorflow/tensorflow,gunan/tensorflow,gautam1858/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,petewarden/tensorflow,gunan/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,tensorflow/tensorflow-pywrap_tf_optimizer,yongtang/tensorflow,Intel-tensorflow/tensorflow,aam-at/tensorflow,freedomtan/tensorflow,Intel-tensorflow/tensorflow,paolodedios/tensorflow,Intel-Corporation/tensorflow,paolodedios/tensorflow,davidzchen/tensorflow,petewarden/tensorflow,davidzchen/tensorflow,petewarden/tensorflow,Intel-tensorflow/tensorflow,tensorflow/tensorflow,Intel-tensorflow/tensorflow,cxxgtxy/tensorflow
<REPLACE_OLD> os _force_enable <REPLACE_NEW> os _force_enable <REPLACE_END> <REPLACE_OLD> "0" else: <REPLACE_NEW> "0" <REPLACE_END> <|endoftext|> # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tools to help with the TensorFlow 2.0 transition. This module is meant for TensorFlow internal implementation, not for users of the TensorFlow library. For that see tf.compat instead. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os _force_enable = None def enable(): """Enables v2 behaviors.""" global _force_enable _force_enable = True def disable(): """Disables v2 behaviors.""" global _force_enable _force_enable = False def enabled(): """Returns True iff TensorFlow 2.0 behavior should be enabled.""" if _force_enable is None: return os.getenv("TF2_BEHAVIOR", "0") != "0" return _force_enable
Remove the redundant `else` condition. PiperOrigin-RevId: 302901741 Change-Id: I65281a07fc2789fbc13775c1365fd01789a1bb7e # Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tools to help with the TensorFlow 2.0 transition. This module is meant for TensorFlow internal implementation, not for users of the TensorFlow library. For that see tf.compat instead. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function import os _force_enable = None def enable(): """Enables v2 behaviors.""" global _force_enable _force_enable = True def disable(): """Disables v2 behaviors.""" global _force_enable _force_enable = False def enabled(): """Returns True iff TensorFlow 2.0 behavior should be enabled.""" if _force_enable is None: return os.getenv("TF2_BEHAVIOR", "0") != "0" else: return _force_enable
ac111399e390a5f62b35467b9cf5b9af613317b2
setup.py
setup.py
from setuptools import setup from setuptools import find_packages setup(name='DSPP-Keras', version='0.0.3', description='Integration of DSPP database with Keral Machine Learning Library', author='Jan Domanski', author_email='jan@peptone.io', url='https://github.com/PeptoneInc/dspp-keras', download_url='https://github.com/PeptoneInc/dspp-keras/archive/v0.0.3.tar.gz', license='MIT', install_requires=['keras', 'numpy', 'h5py'], packages=find_packages())
from setuptools import setup from setuptools import find_packages setup(name='DSPP-Keras', version='0.0.3', description='Integration of Database of structural propensities of proteins (dSPP) with Keras Machine Learning Library', author='Jan Domanski', author_email='jan@peptone.io', url='https://github.com/PeptoneInc/dspp-keras', download_url='https://github.com/PeptoneInc/dspp-keras/archive/v0.0.3.tar.gz', license='MIT', install_requires=['keras', 'numpy', 'h5py'], packages=find_packages())
Change title and fix spelling for pip package
Change title and fix spelling for pip package
Python
agpl-3.0
PeptoneInc/dspp-keras
<REPLACE_OLD> DSPP database <REPLACE_NEW> Database of structural propensities of proteins (dSPP) <REPLACE_END> <REPLACE_OLD> Keral <REPLACE_NEW> Keras <REPLACE_END> <|endoftext|> from setuptools import setup from setuptools import find_packages setup(name='DSPP-Keras', version='0.0.3', description='Integration of Database of structural propensities of proteins (dSPP) with Keras Machine Learning Library', author='Jan Domanski', author_email='jan@peptone.io', url='https://github.com/PeptoneInc/dspp-keras', download_url='https://github.com/PeptoneInc/dspp-keras/archive/v0.0.3.tar.gz', license='MIT', install_requires=['keras', 'numpy', 'h5py'], packages=find_packages())
Change title and fix spelling for pip package from setuptools import setup from setuptools import find_packages setup(name='DSPP-Keras', version='0.0.3', description='Integration of DSPP database with Keral Machine Learning Library', author='Jan Domanski', author_email='jan@peptone.io', url='https://github.com/PeptoneInc/dspp-keras', download_url='https://github.com/PeptoneInc/dspp-keras/archive/v0.0.3.tar.gz', license='MIT', install_requires=['keras', 'numpy', 'h5py'], packages=find_packages())
f58a9c6aff57ccd157d8734b6d89411fc29da706
src/poliastro/tests/test_patched_conics.py
src/poliastro/tests/test_patched_conics.py
# coding: utf-8 from astropy import units as u from astropy.tests.helper import assert_quantity_allclose from poliastro.bodies import Sun, Mercury, Venus, Earth, Moon, Mars from poliastro.bodies import Jupiter, Saturn, Uranus, Neptune, Pluto from poliastro.patched_conics import compute_soi def test_compute_soi(): # Data from Table A.2., Curtis "Orbital Mechanics for Engineering Students" data = [ # body, SOI radius (m) # (Sun, None), (Mercury, 1.12e8), (Venus, 6.16e8), (Earth, 9.25e8), (Moon, 6.61e7), (Mars, 5.77e8), (Jupiter, 4.82e10), (Saturn, 5.48e10), (Uranus, 5.18e10), (Neptune, 8.66e10), (Pluto, 3.08e9) ] for row in data: body, expected_r_SOI = row expected_r_SOI = expected_r_SOI * u.m r_SOI = compute_soi(body) assert_quantity_allclose(r_SOI, expected_r_SOI, rtol=1e-6)
Add test to r_SOI computation
Add test to r_SOI computation
Python
mit
Juanlu001/poliastro,newlawrence/poliastro,poliastro/poliastro,newlawrence/poliastro,anhiga/poliastro,anhiga/poliastro,Juanlu001/poliastro,anhiga/poliastro,Juanlu001/poliastro,newlawrence/poliastro
<REPLACE_OLD> <REPLACE_NEW> # coding: utf-8 from astropy import units as u from astropy.tests.helper import assert_quantity_allclose from poliastro.bodies import Sun, Mercury, Venus, Earth, Moon, Mars from poliastro.bodies import Jupiter, Saturn, Uranus, Neptune, Pluto from poliastro.patched_conics import compute_soi def test_compute_soi(): # Data from Table A.2., Curtis "Orbital Mechanics for Engineering Students" data = [ # body, SOI radius (m) # (Sun, None), (Mercury, 1.12e8), (Venus, 6.16e8), (Earth, 9.25e8), (Moon, 6.61e7), (Mars, 5.77e8), (Jupiter, 4.82e10), (Saturn, 5.48e10), (Uranus, 5.18e10), (Neptune, 8.66e10), (Pluto, 3.08e9) ] for row in data: body, expected_r_SOI = row expected_r_SOI = expected_r_SOI * u.m r_SOI = compute_soi(body) assert_quantity_allclose(r_SOI, expected_r_SOI, rtol=1e-6) <REPLACE_END> <|endoftext|> # coding: utf-8 from astropy import units as u from astropy.tests.helper import assert_quantity_allclose from poliastro.bodies import Sun, Mercury, Venus, Earth, Moon, Mars from poliastro.bodies import Jupiter, Saturn, Uranus, Neptune, Pluto from poliastro.patched_conics import compute_soi def test_compute_soi(): # Data from Table A.2., Curtis "Orbital Mechanics for Engineering Students" data = [ # body, SOI radius (m) # (Sun, None), (Mercury, 1.12e8), (Venus, 6.16e8), (Earth, 9.25e8), (Moon, 6.61e7), (Mars, 5.77e8), (Jupiter, 4.82e10), (Saturn, 5.48e10), (Uranus, 5.18e10), (Neptune, 8.66e10), (Pluto, 3.08e9) ] for row in data: body, expected_r_SOI = row expected_r_SOI = expected_r_SOI * u.m r_SOI = compute_soi(body) assert_quantity_allclose(r_SOI, expected_r_SOI, rtol=1e-6)
Add test to r_SOI computation
73e3cee19d0330154f36157b762cd1a69e055b19
setup.py
setup.py
from setuptools import setup, find_packages with open('README.rst') as f: readme = f.read() with open('LICENSE') as f: license = f.read() setup( name='pycc', version='0.0.1', url='https://github.com/kevinconway/pycc', license=license, description='Python code optimizer..', author='Kevin Conway', author_email='kevinjacobconway@gmail.com', long_description=readme, classifiers=[], packages=find_packages(exclude=['tests', 'build', 'dist', 'docs']), requires=['astkit'], entry_points = { 'console_scripts': [ 'pycc-lint = pycc.cli.lint:main', 'pycc-transform = pycc.cli.transform:main', 'pycc-compile = pycc.cli.compile:main', ], }, )
from setuptools import setup, find_packages with open('README.rst') as f: readme = f.read() with open('LICENSE') as f: license = f.read() setup( name='pycc', version='0.0.1', url='https://github.com/kevinconway/pycc', license=license, description='Python code optimizer..', author='Kevin Conway', author_email='kevinjacobconway@gmail.com', long_description=readme, classifiers=[], packages=find_packages(exclude=['tests', 'build', 'dist', 'docs']), requires=['astkit', 'pytest'], entry_points={ 'console_scripts': [ 'pycc-lint = pycc.cli.lint:main', 'pycc-transform = pycc.cli.transform:main', 'pycc-compile = pycc.cli.compile:main', ], }, )
Add package dependencies for printing and testing
Add package dependencies for printing and testing Signed-off-by: Kevin Conway <3473c1f185ca03eadc40ad288d84425b54fd7d57@gmail.com>
Python
apache-2.0
kevinconway/pycc,kevinconway/pycc
<REPLACE_OLD> requires=['astkit'], <REPLACE_NEW> requires=['astkit', 'pytest'], <REPLACE_END> <REPLACE_OLD> entry_points = { <REPLACE_NEW> entry_points={ <REPLACE_END> <|endoftext|> from setuptools import setup, find_packages with open('README.rst') as f: readme = f.read() with open('LICENSE') as f: license = f.read() setup( name='pycc', version='0.0.1', url='https://github.com/kevinconway/pycc', license=license, description='Python code optimizer..', author='Kevin Conway', author_email='kevinjacobconway@gmail.com', long_description=readme, classifiers=[], packages=find_packages(exclude=['tests', 'build', 'dist', 'docs']), requires=['astkit', 'pytest'], entry_points={ 'console_scripts': [ 'pycc-lint = pycc.cli.lint:main', 'pycc-transform = pycc.cli.transform:main', 'pycc-compile = pycc.cli.compile:main', ], }, )
Add package dependencies for printing and testing Signed-off-by: Kevin Conway <3473c1f185ca03eadc40ad288d84425b54fd7d57@gmail.com> from setuptools import setup, find_packages with open('README.rst') as f: readme = f.read() with open('LICENSE') as f: license = f.read() setup( name='pycc', version='0.0.1', url='https://github.com/kevinconway/pycc', license=license, description='Python code optimizer..', author='Kevin Conway', author_email='kevinjacobconway@gmail.com', long_description=readme, classifiers=[], packages=find_packages(exclude=['tests', 'build', 'dist', 'docs']), requires=['astkit'], entry_points = { 'console_scripts': [ 'pycc-lint = pycc.cli.lint:main', 'pycc-transform = pycc.cli.transform:main', 'pycc-compile = pycc.cli.compile:main', ], }, )
e946f239695f74d83fcb1b4929ed2281846add4c
avalon/fusion/pipeline.py
avalon/fusion/pipeline.py
def imprint_container(tool, name, namespace, context, loader=None): """Imprint a Loader with metadata Containerisation enables a tracking of version, author and origin for loaded assets. Arguments: tool (object): The node in Fusion to imprint as container, usually a Loader. name (str): Name of resulting assembly namespace (str): Namespace under which to host container context (dict): Asset information loader (str, optional): Name of loader used to produce this container. Returns: None """ data = [ ("schema", "avalon-core:container-2.0"), ("id", "pyblish.avalon.container"), ("name", str(name)), ("namespace", str(namespace)), ("loader", str(loader)), ("representation", str(context["representation"]["_id"])), ] for key, value in data: tool.SetData("avalon.{}".format(key), value) def parse_container(tool): """Returns imprinted container data of a tool This reads the imprinted data from `imprint_container`. """ container = {} for key in ['schema', 'id', 'name', 'namespace', 'loader', 'representation']: value = tool.GetData('avalon.{}'.format(key)) container[key] = value return container
def imprint_container(tool, name, namespace, context, loader=None): """Imprint a Loader with metadata Containerisation enables a tracking of version, author and origin for loaded assets. Arguments: tool (object): The node in Fusion to imprint as container, usually a Loader. name (str): Name of resulting assembly namespace (str): Namespace under which to host container context (dict): Asset information loader (str, optional): Name of loader used to produce this container. Returns: None """ data = [ ("schema", "avalon-core:container-2.0"), ("id", "pyblish.avalon.container"), ("name", str(name)), ("namespace", str(namespace)), ("loader", str(loader)), ("representation", str(context["representation"]["_id"])), ] for key, value in data: tool.SetData("avalon.{}".format(key), value) def parse_container(tool): """Returns imprinted container data of a tool This reads the imprinted data from `imprint_container`. """ container = {} for key in ['schema', 'id', 'name', 'namespace', 'loader', 'representation']: value = tool.GetData('avalon.{}'.format(key)) container[key] = value # Store the tool's name container["objectName"] = tool.Name return container
Store tool's name when parsing container
Store tool's name when parsing container
Python
mit
MoonShineVFX/core,MoonShineVFX/core,getavalon/core,getavalon/core,mindbender-studio/core,mindbender-studio/core
<REPLACE_OLD> tool <REPLACE_NEW> tool <REPLACE_END> <REPLACE_OLD> `imprint_container`. <REPLACE_NEW> `imprint_container`. <REPLACE_END> <INSERT> # Store the tool's name container["objectName"] = tool.Name <INSERT_END> <|endoftext|> def imprint_container(tool, name, namespace, context, loader=None): """Imprint a Loader with metadata Containerisation enables a tracking of version, author and origin for loaded assets. Arguments: tool (object): The node in Fusion to imprint as container, usually a Loader. name (str): Name of resulting assembly namespace (str): Namespace under which to host container context (dict): Asset information loader (str, optional): Name of loader used to produce this container. Returns: None """ data = [ ("schema", "avalon-core:container-2.0"), ("id", "pyblish.avalon.container"), ("name", str(name)), ("namespace", str(namespace)), ("loader", str(loader)), ("representation", str(context["representation"]["_id"])), ] for key, value in data: tool.SetData("avalon.{}".format(key), value) def parse_container(tool): """Returns imprinted container data of a tool This reads the imprinted data from `imprint_container`. """ container = {} for key in ['schema', 'id', 'name', 'namespace', 'loader', 'representation']: value = tool.GetData('avalon.{}'.format(key)) container[key] = value # Store the tool's name container["objectName"] = tool.Name return container
Store tool's name when parsing container def imprint_container(tool, name, namespace, context, loader=None): """Imprint a Loader with metadata Containerisation enables a tracking of version, author and origin for loaded assets. Arguments: tool (object): The node in Fusion to imprint as container, usually a Loader. name (str): Name of resulting assembly namespace (str): Namespace under which to host container context (dict): Asset information loader (str, optional): Name of loader used to produce this container. Returns: None """ data = [ ("schema", "avalon-core:container-2.0"), ("id", "pyblish.avalon.container"), ("name", str(name)), ("namespace", str(namespace)), ("loader", str(loader)), ("representation", str(context["representation"]["_id"])), ] for key, value in data: tool.SetData("avalon.{}".format(key), value) def parse_container(tool): """Returns imprinted container data of a tool This reads the imprinted data from `imprint_container`. """ container = {} for key in ['schema', 'id', 'name', 'namespace', 'loader', 'representation']: value = tool.GetData('avalon.{}'.format(key)) container[key] = value return container
ccf285c30a0110f2ff59b91ec0166f9b5306239d
dukpy/evaljs.py
dukpy/evaljs.py
import json from . import _dukpy try: from collections.abc import Iterable except ImportError: from collections import Iterable try: # pragma: no cover unicode string_types = (str, unicode) except NameError: # pragma: no cover string_types = (bytes, str) class JSInterpreter(object): """JavaScript Interpreter""" def __init__(self): self._ctx = _dukpy.create_context() def evaljs(self, code, **kwargs): """Runs JavaScript code in the context of the interpreter. All arguments will be converted to plain javascript objects through the JSON encoder and will be available in `dukpy` global object. Returns the last object on javascript stack. """ jsvars = json.dumps(kwargs) jscode = code if not isinstance(code, string_types): jscode = ';\n'.join(code) if not isinstance(jscode, bytes): jscode = jscode.encode('utf-8') res = _dukpy.eval_string(self._ctx, jscode, jsvars) if res is None: return None return json.loads(res.decode('utf-8')) def evaljs(code, **kwargs): """Evaluates the given ``code`` as JavaScript and returns the result""" return JSInterpreter().evaljs(code, **kwargs)
import json from . import _dukpy try: from collections.abc import Iterable except ImportError: from collections import Iterable try: # pragma: no cover unicode string_types = (str, unicode) jscode_type = str except NameError: # pragma: no cover string_types = (bytes, str) jscode_type = str class JSInterpreter(object): """JavaScript Interpreter""" def __init__(self): self._ctx = _dukpy.create_context() def evaljs(self, code, **kwargs): """Runs JavaScript code in the context of the interpreter. All arguments will be converted to plain javascript objects through the JSON encoder and will be available in `dukpy` global object. Returns the last object on javascript stack. """ jsvars = json.dumps(kwargs) jscode = code if not isinstance(code, string_types): jscode = ';\n'.join(code) if not isinstance(jscode, str): # Source code must be str on both Py2 and Py3 # so it must be encoded on Py2 and decoded on Py3 if isinstance(jscode, bytes): jscode = jscode.decode('utf-8') else: jscode = jscode.encode('utf-8') res = _dukpy.eval_string(self._ctx, jscode, jsvars) if res is None: return None return json.loads(res.decode('utf-8')) def evaljs(code, **kwargs): """Evaluates the given ``code`` as JavaScript and returns the result""" return JSInterpreter().evaljs(code, **kwargs)
Fix unicode source code on py3
Fix unicode source code on py3
Python
mit
amol-/dukpy,amol-/dukpy,amol-/dukpy
<REPLACE_OLD> unicode) except <REPLACE_NEW> unicode) jscode_type = str except <REPLACE_END> <REPLACE_OLD> str) class <REPLACE_NEW> str) jscode_type = str class <REPLACE_END> <INSERT> str): # Source code must be str on both Py2 and Py3 # so it must be encoded on Py2 and decoded on Py3 if isinstance(jscode, <INSERT_END> <INSERT> jscode = jscode.decode('utf-8') else: <INSERT_END> <|endoftext|> import json from . import _dukpy try: from collections.abc import Iterable except ImportError: from collections import Iterable try: # pragma: no cover unicode string_types = (str, unicode) jscode_type = str except NameError: # pragma: no cover string_types = (bytes, str) jscode_type = str class JSInterpreter(object): """JavaScript Interpreter""" def __init__(self): self._ctx = _dukpy.create_context() def evaljs(self, code, **kwargs): """Runs JavaScript code in the context of the interpreter. All arguments will be converted to plain javascript objects through the JSON encoder and will be available in `dukpy` global object. Returns the last object on javascript stack. """ jsvars = json.dumps(kwargs) jscode = code if not isinstance(code, string_types): jscode = ';\n'.join(code) if not isinstance(jscode, str): # Source code must be str on both Py2 and Py3 # so it must be encoded on Py2 and decoded on Py3 if isinstance(jscode, bytes): jscode = jscode.decode('utf-8') else: jscode = jscode.encode('utf-8') res = _dukpy.eval_string(self._ctx, jscode, jsvars) if res is None: return None return json.loads(res.decode('utf-8')) def evaljs(code, **kwargs): """Evaluates the given ``code`` as JavaScript and returns the result""" return JSInterpreter().evaljs(code, **kwargs)
Fix unicode source code on py3 import json from . import _dukpy try: from collections.abc import Iterable except ImportError: from collections import Iterable try: # pragma: no cover unicode string_types = (str, unicode) except NameError: # pragma: no cover string_types = (bytes, str) class JSInterpreter(object): """JavaScript Interpreter""" def __init__(self): self._ctx = _dukpy.create_context() def evaljs(self, code, **kwargs): """Runs JavaScript code in the context of the interpreter. All arguments will be converted to plain javascript objects through the JSON encoder and will be available in `dukpy` global object. Returns the last object on javascript stack. """ jsvars = json.dumps(kwargs) jscode = code if not isinstance(code, string_types): jscode = ';\n'.join(code) if not isinstance(jscode, bytes): jscode = jscode.encode('utf-8') res = _dukpy.eval_string(self._ctx, jscode, jsvars) if res is None: return None return json.loads(res.decode('utf-8')) def evaljs(code, **kwargs): """Evaluates the given ``code`` as JavaScript and returns the result""" return JSInterpreter().evaljs(code, **kwargs)
101de91508d918b90c6254dc0b1cc5e0744bbd71
setup.py
setup.py
from setuptools import setup import sys setup( # Basic package information. name = 'zdesk', author = 'Brent Woodruff', version = '2.4.0', author_email = 'brent@fprimex.com', packages = ['zdesk'], include_package_data = True, install_requires = ['requests'], license='LICENSE.txt', url = 'https://github.com/fprimex/zdesk', keywords = 'zendesk api helpdesk', description = 'Zendesk API generated directly from developer.zendesk.com', classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Internet', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], )
from setuptools import setup import sys setup( # Basic package information. name = 'zdesk', author = 'Brent Woodruff', version = '2.3.0', author_email = 'brent@fprimex.com', packages = ['zdesk'], include_package_data = True, install_requires = ['requests'], license='LICENSE.txt', url = 'https://github.com/fprimex/zdesk', keywords = 'zendesk api helpdesk', description = 'Zendesk API generated directly from developer.zendesk.com', classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Internet', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], )
Revert "bump version for impending release"
Revert "bump version for impending release" This reverts commit c9c46f1bd4593cd1b13df404b2dba89c75c4f1ec.
Python
mit
fprimex/zdesk,blade2005/zdesk,fprimex/zdgen
<REPLACE_OLD> '2.4.0', <REPLACE_NEW> '2.3.0', <REPLACE_END> <|endoftext|> from setuptools import setup import sys setup( # Basic package information. name = 'zdesk', author = 'Brent Woodruff', version = '2.3.0', author_email = 'brent@fprimex.com', packages = ['zdesk'], include_package_data = True, install_requires = ['requests'], license='LICENSE.txt', url = 'https://github.com/fprimex/zdesk', keywords = 'zendesk api helpdesk', description = 'Zendesk API generated directly from developer.zendesk.com', classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Internet', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], )
Revert "bump version for impending release" This reverts commit c9c46f1bd4593cd1b13df404b2dba89c75c4f1ec. from setuptools import setup import sys setup( # Basic package information. name = 'zdesk', author = 'Brent Woodruff', version = '2.4.0', author_email = 'brent@fprimex.com', packages = ['zdesk'], include_package_data = True, install_requires = ['requests'], license='LICENSE.txt', url = 'https://github.com/fprimex/zdesk', keywords = 'zendesk api helpdesk', description = 'Zendesk API generated directly from developer.zendesk.com', classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'License :: OSI Approved :: MIT License', 'Topic :: Software Development :: Libraries :: Python Modules', 'Topic :: Internet', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 3', ], )
8552542f6e23f886bae467f96e847b00327fa164
scripts/ci/guideline_check.py
scripts/ci/guideline_check.py
#!/usr/bin/env python3 # SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2021 Intel Corporation import os import sh import argparse import re from unidiff import PatchSet if "ZEPHYR_BASE" not in os.environ: exit("$ZEPHYR_BASE environment variable undefined.") repository_path = os.environ['ZEPHYR_BASE'] sh_special_args = { '_tty_out': False, '_cwd': repository_path } coccinelle_scripts = ["/scripts/coccinelle/reserved_names.cocci", "/scripts/coccinelle/same_identifier.cocci", "/scripts/coccinelle/identifier_length.cocci", ] def parse_coccinelle(contents: str, violations: dict): reg = re.compile("([a-zA-Z0-9/]*\\.[ch]:[0-9]*)(:[0-9\\-]*: )(.*)") for line in contents.split("\n"): r = reg.match(line) if r: f = r.group(1) if f in violations: violations[f].append(r.group(3)) else: violations[r.group(1)] = [r.group(3)] def parse_args(): parser = argparse.ArgumentParser( description="Check if change requires full twister") parser.add_argument('-c', '--commits', default=None, help="Commit range in the form: a..b") return parser.parse_args() def main(): args = parse_args() if not args.commits: exit("missing commit range") commit = sh.git("diff", args.commits, **sh_special_args) patch_set = PatchSet(commit) zephyr_base = os.getenv("ZEPHYR_BASE") violations = {} numViolations = 0 for f in patch_set: if not f.path.endswith(".c") and not f.path.endswith(".h") or not os.path.exists(zephyr_base + "/" + f.path): continue for script in coccinelle_scripts: script_path = os.getenv("ZEPHYR_BASE") + "/" + script cocci = sh.coccicheck( "--mode=report", "--cocci=" + script_path, f.path, **sh_special_args) parse_coccinelle(cocci, violations) for hunk in f: for line in hunk: if line.is_added: violation = "{}:{}".format(f.path, line.target_line_no) if violation in violations: numViolations += 1 print( "{}:{}".format( violation, "\t\n".join( violations[violation]))) return numViolations if __name__ == "__main__": ret = main() exit(ret)
Apply coccinelle scripts in git diffs
ci: Apply coccinelle scripts in git diffs This scripts receives the same parameter of what_changed.py. And run coccinelle scripts for code guideline compliance in the given git commits. e.g: ./guideline_check.py --commits origin/master..HEAD Signed-off-by: Flavio Ceolin <979b9165500b0741b9d0500e2efd74fc1547bff7@intel.com>
Python
apache-2.0
zephyrproject-rtos/zephyr,finikorg/zephyr,zephyrproject-rtos/zephyr,nashif/zephyr,nashif/zephyr,galak/zephyr,finikorg/zephyr,finikorg/zephyr,zephyrproject-rtos/zephyr,Vudentz/zephyr,zephyrproject-rtos/zephyr,galak/zephyr,nashif/zephyr,Vudentz/zephyr,zephyrproject-rtos/zephyr,galak/zephyr,nashif/zephyr,galak/zephyr,Vudentz/zephyr,galak/zephyr,Vudentz/zephyr,finikorg/zephyr,Vudentz/zephyr,nashif/zephyr,Vudentz/zephyr,finikorg/zephyr
<REPLACE_OLD> <REPLACE_NEW> #!/usr/bin/env python3 # SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2021 Intel Corporation import os import sh import argparse import re from unidiff import PatchSet if "ZEPHYR_BASE" not in os.environ: exit("$ZEPHYR_BASE environment variable undefined.") repository_path = os.environ['ZEPHYR_BASE'] sh_special_args = { '_tty_out': False, '_cwd': repository_path } coccinelle_scripts = ["/scripts/coccinelle/reserved_names.cocci", "/scripts/coccinelle/same_identifier.cocci", "/scripts/coccinelle/identifier_length.cocci", ] def parse_coccinelle(contents: str, violations: dict): reg = re.compile("([a-zA-Z0-9/]*\\.[ch]:[0-9]*)(:[0-9\\-]*: )(.*)") for line in contents.split("\n"): r = reg.match(line) if r: f = r.group(1) if f in violations: violations[f].append(r.group(3)) else: violations[r.group(1)] = [r.group(3)] def parse_args(): parser = argparse.ArgumentParser( description="Check if change requires full twister") parser.add_argument('-c', '--commits', default=None, help="Commit range in the form: a..b") return parser.parse_args() def main(): args = parse_args() if not args.commits: exit("missing commit range") commit = sh.git("diff", args.commits, **sh_special_args) patch_set = PatchSet(commit) zephyr_base = os.getenv("ZEPHYR_BASE") violations = {} numViolations = 0 for f in patch_set: if not f.path.endswith(".c") and not f.path.endswith(".h") or not os.path.exists(zephyr_base + "/" + f.path): continue for script in coccinelle_scripts: script_path = os.getenv("ZEPHYR_BASE") + "/" + script cocci = sh.coccicheck( "--mode=report", "--cocci=" + script_path, f.path, **sh_special_args) parse_coccinelle(cocci, violations) for hunk in f: for line in hunk: if line.is_added: violation = "{}:{}".format(f.path, line.target_line_no) if violation in violations: numViolations += 1 print( "{}:{}".format( violation, "\t\n".join( violations[violation]))) return numViolations if __name__ == "__main__": ret = main() exit(ret) <REPLACE_END> <|endoftext|> #!/usr/bin/env python3 # SPDX-License-Identifier: Apache-2.0 # Copyright (c) 2021 Intel Corporation import os import sh import argparse import re from unidiff import PatchSet if "ZEPHYR_BASE" not in os.environ: exit("$ZEPHYR_BASE environment variable undefined.") repository_path = os.environ['ZEPHYR_BASE'] sh_special_args = { '_tty_out': False, '_cwd': repository_path } coccinelle_scripts = ["/scripts/coccinelle/reserved_names.cocci", "/scripts/coccinelle/same_identifier.cocci", "/scripts/coccinelle/identifier_length.cocci", ] def parse_coccinelle(contents: str, violations: dict): reg = re.compile("([a-zA-Z0-9/]*\\.[ch]:[0-9]*)(:[0-9\\-]*: )(.*)") for line in contents.split("\n"): r = reg.match(line) if r: f = r.group(1) if f in violations: violations[f].append(r.group(3)) else: violations[r.group(1)] = [r.group(3)] def parse_args(): parser = argparse.ArgumentParser( description="Check if change requires full twister") parser.add_argument('-c', '--commits', default=None, help="Commit range in the form: a..b") return parser.parse_args() def main(): args = parse_args() if not args.commits: exit("missing commit range") commit = sh.git("diff", args.commits, **sh_special_args) patch_set = PatchSet(commit) zephyr_base = os.getenv("ZEPHYR_BASE") violations = {} numViolations = 0 for f in patch_set: if not f.path.endswith(".c") and not f.path.endswith(".h") or not os.path.exists(zephyr_base + "/" + f.path): continue for script in coccinelle_scripts: script_path = os.getenv("ZEPHYR_BASE") + "/" + script cocci = sh.coccicheck( "--mode=report", "--cocci=" + script_path, f.path, **sh_special_args) parse_coccinelle(cocci, violations) for hunk in f: for line in hunk: if line.is_added: violation = "{}:{}".format(f.path, line.target_line_no) if violation in violations: numViolations += 1 print( "{}:{}".format( violation, "\t\n".join( violations[violation]))) return numViolations if __name__ == "__main__": ret = main() exit(ret)
ci: Apply coccinelle scripts in git diffs This scripts receives the same parameter of what_changed.py. And run coccinelle scripts for code guideline compliance in the given git commits. e.g: ./guideline_check.py --commits origin/master..HEAD Signed-off-by: Flavio Ceolin <979b9165500b0741b9d0500e2efd74fc1547bff7@intel.com>
5bcb267761e6c2694111757ee4fcf2a050f6c556
byceps/blueprints/site/guest_server/forms.py
byceps/blueprints/site/guest_server/forms.py
""" byceps.blueprints.site.guest_server.forms ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ from flask_babel import lazy_gettext from wtforms import StringField, TextAreaField from wtforms.validators import Optional from ....util.l10n import LocalizedForm class CreateForm(LocalizedForm): hostname = StringField(lazy_gettext('Hostname'), validators=[Optional()]) notes = TextAreaField(lazy_gettext('Notes'), validators=[Optional()])
""" byceps.blueprints.site.guest_server.forms ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ import re from flask_babel import lazy_gettext from wtforms import StringField, TextAreaField from wtforms.validators import Length, Optional, Regexp from ....util.l10n import LocalizedForm HOSTNAME_REGEX = re.compile('^[a-z][a-z0-9-\.]+$') class CreateForm(LocalizedForm): hostname = StringField( lazy_gettext('Hostname'), validators=[Optional(), Length(max=32), Regexp(HOSTNAME_REGEX)], ) notes = TextAreaField( lazy_gettext('Notes'), validators=[Optional(), Length(max=1000)] )
Make guest server form validation more strict
Make guest server form validation more strict
Python
bsd-3-clause
homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps
<REPLACE_OLD> details) """ from <REPLACE_NEW> details) """ import re from <REPLACE_END> <REPLACE_OLD> Optional from <REPLACE_NEW> Length, Optional, Regexp from <REPLACE_END> <REPLACE_OLD> LocalizedForm class <REPLACE_NEW> LocalizedForm HOSTNAME_REGEX = re.compile('^[a-z][a-z0-9-\.]+$') class <REPLACE_END> <REPLACE_OLD> StringField(lazy_gettext('Hostname'), validators=[Optional()]) <REPLACE_NEW> StringField( lazy_gettext('Hostname'), validators=[Optional(), Length(max=32), Regexp(HOSTNAME_REGEX)], ) <REPLACE_END> <REPLACE_OLD> TextAreaField(lazy_gettext('Notes'), validators=[Optional()]) <REPLACE_NEW> TextAreaField( lazy_gettext('Notes'), validators=[Optional(), Length(max=1000)] ) <REPLACE_END> <|endoftext|> """ byceps.blueprints.site.guest_server.forms ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ import re from flask_babel import lazy_gettext from wtforms import StringField, TextAreaField from wtforms.validators import Length, Optional, Regexp from ....util.l10n import LocalizedForm HOSTNAME_REGEX = re.compile('^[a-z][a-z0-9-\.]+$') class CreateForm(LocalizedForm): hostname = StringField( lazy_gettext('Hostname'), validators=[Optional(), Length(max=32), Regexp(HOSTNAME_REGEX)], ) notes = TextAreaField( lazy_gettext('Notes'), validators=[Optional(), Length(max=1000)] )
Make guest server form validation more strict """ byceps.blueprints.site.guest_server.forms ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ :Copyright: 2006-2021 Jochen Kupperschmidt :License: Revised BSD (see `LICENSE` file for details) """ from flask_babel import lazy_gettext from wtforms import StringField, TextAreaField from wtforms.validators import Optional from ....util.l10n import LocalizedForm class CreateForm(LocalizedForm): hostname = StringField(lazy_gettext('Hostname'), validators=[Optional()]) notes = TextAreaField(lazy_gettext('Notes'), validators=[Optional()])
8a577edcc723ad30cc1b84c00435474e980353d3
gaphor/diagram/profiles/extension.py
gaphor/diagram/profiles/extension.py
""" ExtensionItem -- Graphical representation of an association. """ # TODO: for Extension.postload(): in some cases where the association ends # are connected to the same Class, the head_end property is connected to the # tail end and visa versa. from gaphor import UML from gaphor.diagram.diagramline import NamedLine class ExtensionItem(NamedLine): """ ExtensionItem represents associations. An ExtensionItem has two ExtensionEnd items. Each ExtensionEnd item represents a Property (with Property.association == my association). """ __uml__ = UML.Extension def __init__(self, id=None, model=None): NamedLine.__init__(self, id, model) self.watch("subject<Extension>.ownedEnd") def draw_head(self, context): cr = context.cairo cr.move_to(0, 0) cr.line_to(15, -10) cr.line_to(15, 10) cr.line_to(0, 0) cr.set_source_rgb(0, 0, 0) cr.fill() cr.move_to(15, 0)
""" ExtensionItem -- Graphical representation of an association. """ # TODO: for Extension.postload(): in some cases where the association ends # are connected to the same Class, the head_end property is connected to the # tail end and visa versa. from gaphor import UML from gaphor.UML.modelfactory import stereotypes_str from gaphor.diagram.presentation import LinePresentation from gaphor.diagram.shapes import Box, EditableText, Text from gaphor.diagram.support import represents @represents(UML.Extension) class ExtensionItem(LinePresentation): """ ExtensionItem represents associations. An ExtensionItem has two ExtensionEnd items. Each ExtensionEnd item represents a Property (with Property.association == my association). """ def __init__(self, id=None, model=None): super().__init__(id, model) self.shape_middle = Box( Text( text=lambda: stereotypes_str(self.subject), style={"min-width": 0, "min-height": 0}, ), EditableText(text=lambda: self.subject and self.subject.name or ""), ) self.watch("subject<NamedElement>.name") self.watch("subject.appliedStereotype.classifier.name") def draw_head(self, context): cr = context.cairo cr.move_to(0, 0) cr.line_to(15, -10) cr.line_to(15, 10) cr.line_to(0, 0) cr.set_source_rgb(0, 0, 0) cr.fill() cr.move_to(15, 0)
Convert Extension item to new line style
Convert Extension item to new line style
Python
lgpl-2.1
amolenaar/gaphor,amolenaar/gaphor
<REPLACE_OLD> gaphor.diagram.diagramline import NamedLine class ExtensionItem(NamedLine): <REPLACE_NEW> gaphor.UML.modelfactory import stereotypes_str from gaphor.diagram.presentation import LinePresentation from gaphor.diagram.shapes import Box, EditableText, Text from gaphor.diagram.support import represents @represents(UML.Extension) class ExtensionItem(LinePresentation): <REPLACE_END> <DELETE> __uml__ = UML.Extension <DELETE_END> <REPLACE_OLD> NamedLine.__init__(self, id, model) self.watch("subject<Extension>.ownedEnd") <REPLACE_NEW> super().__init__(id, model) self.shape_middle = Box( Text( text=lambda: stereotypes_str(self.subject), style={"min-width": 0, "min-height": 0}, ), EditableText(text=lambda: self.subject and self.subject.name or ""), ) self.watch("subject<NamedElement>.name") self.watch("subject.appliedStereotype.classifier.name") <REPLACE_END> <|endoftext|> """ ExtensionItem -- Graphical representation of an association. """ # TODO: for Extension.postload(): in some cases where the association ends # are connected to the same Class, the head_end property is connected to the # tail end and visa versa. from gaphor import UML from gaphor.UML.modelfactory import stereotypes_str from gaphor.diagram.presentation import LinePresentation from gaphor.diagram.shapes import Box, EditableText, Text from gaphor.diagram.support import represents @represents(UML.Extension) class ExtensionItem(LinePresentation): """ ExtensionItem represents associations. An ExtensionItem has two ExtensionEnd items. Each ExtensionEnd item represents a Property (with Property.association == my association). """ def __init__(self, id=None, model=None): super().__init__(id, model) self.shape_middle = Box( Text( text=lambda: stereotypes_str(self.subject), style={"min-width": 0, "min-height": 0}, ), EditableText(text=lambda: self.subject and self.subject.name or ""), ) self.watch("subject<NamedElement>.name") self.watch("subject.appliedStereotype.classifier.name") def draw_head(self, context): cr = context.cairo cr.move_to(0, 0) cr.line_to(15, -10) cr.line_to(15, 10) cr.line_to(0, 0) cr.set_source_rgb(0, 0, 0) cr.fill() cr.move_to(15, 0)
Convert Extension item to new line style """ ExtensionItem -- Graphical representation of an association. """ # TODO: for Extension.postload(): in some cases where the association ends # are connected to the same Class, the head_end property is connected to the # tail end and visa versa. from gaphor import UML from gaphor.diagram.diagramline import NamedLine class ExtensionItem(NamedLine): """ ExtensionItem represents associations. An ExtensionItem has two ExtensionEnd items. Each ExtensionEnd item represents a Property (with Property.association == my association). """ __uml__ = UML.Extension def __init__(self, id=None, model=None): NamedLine.__init__(self, id, model) self.watch("subject<Extension>.ownedEnd") def draw_head(self, context): cr = context.cairo cr.move_to(0, 0) cr.line_to(15, -10) cr.line_to(15, 10) cr.line_to(0, 0) cr.set_source_rgb(0, 0, 0) cr.fill() cr.move_to(15, 0)
86cede2c228e2e6bccb4adbdfe81d9d4bd34ac6f
teams/blog_fetch.py
teams/blog_fetch.py
"""Fetches the blogs configured on the website to local files. The local files dumped to are 'BLOGNAME.incoming.yml' in the assets/blogs directory. Should be run as a regular cron-job. Must be run from within the website virtual environment. --- Copyright (c) 2013, University Radio York. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import pyramid import feedparser import pickle import lass.common.config if __name__ == '__main__': blog_config = lass.common.config.from_yaml('sitewide/blogs') for name, config in blog_config.items(): asset = 'assets:blogs/{}.incoming'.format(name) full_path = pyramid.path.AssetResolver().resolve(asset).abspath() feed = feedparser.parse(config['feed']) with open(full_path, 'wb+') as stream: pickle.dump(feed, stream)
Move the Python end of blog-fetch to LASS.
Move the Python end of blog-fetch to LASS.
Python
bsd-2-clause
UniversityRadioYork/lass-pyramid
<REPLACE_OLD> <REPLACE_NEW> """Fetches the blogs configured on the website to local files. The local files dumped to are 'BLOGNAME.incoming.yml' in the assets/blogs directory. Should be run as a regular cron-job. Must be run from within the website virtual environment. --- Copyright (c) 2013, University Radio York. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import pyramid import feedparser import pickle import lass.common.config if __name__ == '__main__': blog_config = lass.common.config.from_yaml('sitewide/blogs') for name, config in blog_config.items(): asset = 'assets:blogs/{}.incoming'.format(name) full_path = pyramid.path.AssetResolver().resolve(asset).abspath() feed = feedparser.parse(config['feed']) with open(full_path, 'wb+') as stream: pickle.dump(feed, stream) <REPLACE_END> <|endoftext|> """Fetches the blogs configured on the website to local files. The local files dumped to are 'BLOGNAME.incoming.yml' in the assets/blogs directory. Should be run as a regular cron-job. Must be run from within the website virtual environment. --- Copyright (c) 2013, University Radio York. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ import pyramid import feedparser import pickle import lass.common.config if __name__ == '__main__': blog_config = lass.common.config.from_yaml('sitewide/blogs') for name, config in blog_config.items(): asset = 'assets:blogs/{}.incoming'.format(name) full_path = pyramid.path.AssetResolver().resolve(asset).abspath() feed = feedparser.parse(config['feed']) with open(full_path, 'wb+') as stream: pickle.dump(feed, stream)
Move the Python end of blog-fetch to LASS.
7a8b041ce9e0f115f3c5daad159a03c13c5cd72d
python/pycandela/pycandela/__init__.py
python/pycandela/pycandela/__init__.py
import IPython.core.displaypub as displaypub import json import DataFrame from pandas class DataFrameEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, DataFrame): return obj.to_records() return json.JSONEncoder.default(self, obj) def publish_display_data(data): try: displaypub.publish_display_data('pycandela', data) except TypeError: displaypub.publish_display_data(data) def component(name, options): js = (""" require(['candela'], function (candela) { new candela.components['%s'](element.get(0), %s) }); """ % (name, json.dumps(options, cls=DataFrameEncoder))) publish_display_data({'application/javascript': js}) def init(): js = """ require.config({ paths: { candela: 'http://kitware.github.io/candela/candela-0.2.0-81be44f6' } }); var outputElement = element; require(['candela'], function (candela) { if (candela) { outputElement.append('<div>Candela loaded successfully.</div>'); } else { outputElement.append('<div>Error loading Candela.</div>'); } }); """ publish_display_data({'application/javascript': js})
import IPython.core.displaypub as displaypub import json from pandas import DataFrame class DataFrameEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, DataFrame): return obj.to_records() return json.JSONEncoder.default(self, obj) def publish_display_data(data): try: displaypub.publish_display_data('pycandela', data) except TypeError: displaypub.publish_display_data(data) def component(name, options): js = (""" require(['candela'], function (candela) { var vis = new candela.components['%s'](element.get(0), %s); vis.render(); }); """ % (name, json.dumps(options, cls=DataFrameEncoder))) publish_display_data({'application/javascript': js}) def init(): js = """ require.config({ paths: { candela: 'http://kitware.github.io/candela/candela-0.2.0-81be44f6' } }); var outputElement = element; require(['candela'], function (candela) { if (candela) { outputElement.append('<div>Candela loaded successfully.</div>'); } else { outputElement.append('<div>Error loading Candela.</div>'); } }); """ publish_display_data({'application/javascript': js})
Fix import and call render() on vis
Fix import and call render() on vis
Python
apache-2.0
Kitware/candela,Kitware/candela,Kitware/candela,Kitware/candela,Kitware/candela
<REPLACE_OLD> json import DataFrame from pandas class <REPLACE_NEW> json from pandas import DataFrame class <REPLACE_END> <INSERT> var vis = <INSERT_END> <REPLACE_OLD> %s) }); """ <REPLACE_NEW> %s); vis.render(); }); """ <REPLACE_END> <|endoftext|> import IPython.core.displaypub as displaypub import json from pandas import DataFrame class DataFrameEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, DataFrame): return obj.to_records() return json.JSONEncoder.default(self, obj) def publish_display_data(data): try: displaypub.publish_display_data('pycandela', data) except TypeError: displaypub.publish_display_data(data) def component(name, options): js = (""" require(['candela'], function (candela) { var vis = new candela.components['%s'](element.get(0), %s); vis.render(); }); """ % (name, json.dumps(options, cls=DataFrameEncoder))) publish_display_data({'application/javascript': js}) def init(): js = """ require.config({ paths: { candela: 'http://kitware.github.io/candela/candela-0.2.0-81be44f6' } }); var outputElement = element; require(['candela'], function (candela) { if (candela) { outputElement.append('<div>Candela loaded successfully.</div>'); } else { outputElement.append('<div>Error loading Candela.</div>'); } }); """ publish_display_data({'application/javascript': js})
Fix import and call render() on vis import IPython.core.displaypub as displaypub import json import DataFrame from pandas class DataFrameEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, DataFrame): return obj.to_records() return json.JSONEncoder.default(self, obj) def publish_display_data(data): try: displaypub.publish_display_data('pycandela', data) except TypeError: displaypub.publish_display_data(data) def component(name, options): js = (""" require(['candela'], function (candela) { new candela.components['%s'](element.get(0), %s) }); """ % (name, json.dumps(options, cls=DataFrameEncoder))) publish_display_data({'application/javascript': js}) def init(): js = """ require.config({ paths: { candela: 'http://kitware.github.io/candela/candela-0.2.0-81be44f6' } }); var outputElement = element; require(['candela'], function (candela) { if (candela) { outputElement.append('<div>Candela loaded successfully.</div>'); } else { outputElement.append('<div>Error loading Candela.</div>'); } }); """ publish_display_data({'application/javascript': js})
f54fd0bf65d731b4f25cfc2ddffb8d6f472e0d7c
examples/eiger_use_case.py
examples/eiger_use_case.py
'''Virtual datasets: The 'Eiger' use case https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf ''' import h5py import numpy as np files = ['1.h5', '2.h5', '3.h5', '4.h5', '5.h5'] entry_key = 'data' # where the data is inside of the source files. sh = h5py.File(files[0], 'r')[entry_key].shape # get the first ones shape. layout = h5py.VirtualLayout(shape=(len(files),) + sh, dtype=np.float) M_start = 0 for i, filename in enumerate(files): M_end = M_start + sh[0] vsource = h5py.VirtualSource(filename, entry_key, shape=sh) layout[M_start:M_end:1, :, :] = vsource M_start = M_end with h5py.File("eiger_vds.h5", 'w', libver='latest') as f: f.create_virtual_dataset('data', layout, fillvalue=0)
'''Virtual datasets: The 'Eiger' use case https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf ''' import h5py import numpy as np files = ['1.h5', '2.h5', '3.h5', '4.h5', '5.h5'] entry_key = 'data' # where the data is inside of the source files. sh = h5py.File(files[0], 'r')[entry_key].shape # get the first ones shape. layout = h5py.VirtualLayout(shape=(len(files) * sh[0], ) + sh[1:], dtype=np.float) M_start = 0 for i, filename in enumerate(files): M_end = M_start + sh[0] vsource = h5py.VirtualSource(filename, entry_key, shape=sh) layout[M_start:M_end:1, :, :] = vsource M_start = M_end with h5py.File("eiger_vds.h5", 'w', libver='latest') as f: f.create_virtual_dataset('data', layout, fillvalue=0)
Fix layout for Eiger example
Fix layout for Eiger example
Python
bsd-3-clause
h5py/h5py,h5py/h5py,h5py/h5py
<REPLACE_OLD> h5py.VirtualLayout(shape=(len(files),) <REPLACE_NEW> h5py.VirtualLayout(shape=(len(files) * sh[0], ) <REPLACE_END> <REPLACE_OLD> sh, <REPLACE_NEW> sh[1:], <REPLACE_END> <|endoftext|> '''Virtual datasets: The 'Eiger' use case https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf ''' import h5py import numpy as np files = ['1.h5', '2.h5', '3.h5', '4.h5', '5.h5'] entry_key = 'data' # where the data is inside of the source files. sh = h5py.File(files[0], 'r')[entry_key].shape # get the first ones shape. layout = h5py.VirtualLayout(shape=(len(files) * sh[0], ) + sh[1:], dtype=np.float) M_start = 0 for i, filename in enumerate(files): M_end = M_start + sh[0] vsource = h5py.VirtualSource(filename, entry_key, shape=sh) layout[M_start:M_end:1, :, :] = vsource M_start = M_end with h5py.File("eiger_vds.h5", 'w', libver='latest') as f: f.create_virtual_dataset('data', layout, fillvalue=0)
Fix layout for Eiger example '''Virtual datasets: The 'Eiger' use case https://support.hdfgroup.org/HDF5/docNewFeatures/VDS/HDF5-VDS-requirements-use-cases-2014-12-10.pdf ''' import h5py import numpy as np files = ['1.h5', '2.h5', '3.h5', '4.h5', '5.h5'] entry_key = 'data' # where the data is inside of the source files. sh = h5py.File(files[0], 'r')[entry_key].shape # get the first ones shape. layout = h5py.VirtualLayout(shape=(len(files),) + sh, dtype=np.float) M_start = 0 for i, filename in enumerate(files): M_end = M_start + sh[0] vsource = h5py.VirtualSource(filename, entry_key, shape=sh) layout[M_start:M_end:1, :, :] = vsource M_start = M_end with h5py.File("eiger_vds.h5", 'w', libver='latest') as f: f.create_virtual_dataset('data', layout, fillvalue=0)
6c1c38a9c293527bfb4bb5689675f0ef6b385f75
setup.py
setup.py
from setuptools import setup, find_packages import sys, os version = '0.1' setup(name='kotti_contactform', version=version, description="Simple contact form for Kotti sites", long_description="""\ This is an extension to Kotti that allows to add simple contact forms to your website.""", classifiers=[ "Development Status :: 3 - Alpha", "Programming Language :: Python", "Framework :: Pylons", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "License :: OSI Approved :: BSD License", ], keywords='kotti contact form', author='Christian Neumann', author_email='christian@datenkarussell.de', url='http://pypi.python.org/pypi/kotti_contactform', license='BSD License', packages=['kotti_contactform'], package_data={'kotti_contactform': ['templates/*.pt']}, include_package_data=True, zip_safe=False, install_requires=[ 'Kotti', 'pyramid_mailer', 'Babel', ], entry_points=""" # -*- Entry points: -*- """, message_extractors = { "kotti_contactform": [ ("**.py", "chameleon_python", None ), ("**.pt", "chameleon_xml", None ), ]}, )
from setuptools import setup, find_packages import sys, os version = '0.1' setup(name='kotti_contactform', version=version, description="Simple contact form for Kotti sites", long_description="""\ This is an extension to Kotti that allows to add simple contact forms to your website.""", classifiers=[ "Development Status :: 3 - Alpha", "Programming Language :: Python", "Framework :: Pylons", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "License :: OSI Approved :: BSD License", ], keywords='kotti contact form', author='Christian Neumann', author_email='christian@datenkarussell.de', url='http://pypi.python.org/pypi/kotti_contactform', license='BSD License', packages=['kotti_contactform'], package_data={'kotti_contactform': ['templates/*.pt', 'locale/*.*', 'locale/*/LC_MESSAGES/*.*']}, include_package_data=True, zip_safe=False, install_requires=[ 'Kotti', 'pyramid_mailer', 'Babel', ], entry_points=""" # -*- Entry points: -*- """, message_extractors = { "kotti_contactform": [ ("**.py", "chameleon_python", None ), ("**.pt", "chameleon_xml", None ), ]}, )
Add translation files to package data
Add translation files to package data
Python
bsd-2-clause
Kotti/kotti_contactform
<REPLACE_OLD> ['templates/*.pt']}, <REPLACE_NEW> ['templates/*.pt', 'locale/*.*', 'locale/*/LC_MESSAGES/*.*']}, <REPLACE_END> <|endoftext|> from setuptools import setup, find_packages import sys, os version = '0.1' setup(name='kotti_contactform', version=version, description="Simple contact form for Kotti sites", long_description="""\ This is an extension to Kotti that allows to add simple contact forms to your website.""", classifiers=[ "Development Status :: 3 - Alpha", "Programming Language :: Python", "Framework :: Pylons", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "License :: OSI Approved :: BSD License", ], keywords='kotti contact form', author='Christian Neumann', author_email='christian@datenkarussell.de', url='http://pypi.python.org/pypi/kotti_contactform', license='BSD License', packages=['kotti_contactform'], package_data={'kotti_contactform': ['templates/*.pt', 'locale/*.*', 'locale/*/LC_MESSAGES/*.*']}, include_package_data=True, zip_safe=False, install_requires=[ 'Kotti', 'pyramid_mailer', 'Babel', ], entry_points=""" # -*- Entry points: -*- """, message_extractors = { "kotti_contactform": [ ("**.py", "chameleon_python", None ), ("**.pt", "chameleon_xml", None ), ]}, )
Add translation files to package data from setuptools import setup, find_packages import sys, os version = '0.1' setup(name='kotti_contactform', version=version, description="Simple contact form for Kotti sites", long_description="""\ This is an extension to Kotti that allows to add simple contact forms to your website.""", classifiers=[ "Development Status :: 3 - Alpha", "Programming Language :: Python", "Framework :: Pylons", "Topic :: Internet :: WWW/HTTP", "Topic :: Internet :: WWW/HTTP :: Dynamic Content", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "License :: OSI Approved :: BSD License", ], keywords='kotti contact form', author='Christian Neumann', author_email='christian@datenkarussell.de', url='http://pypi.python.org/pypi/kotti_contactform', license='BSD License', packages=['kotti_contactform'], package_data={'kotti_contactform': ['templates/*.pt']}, include_package_data=True, zip_safe=False, install_requires=[ 'Kotti', 'pyramid_mailer', 'Babel', ], entry_points=""" # -*- Entry points: -*- """, message_extractors = { "kotti_contactform": [ ("**.py", "chameleon_python", None ), ("**.pt", "chameleon_xml", None ), ]}, )
64c9d2c53f0dc4c9ae92b5675248a8f11c2b4e9e
pyqode/python/managers/file.py
pyqode/python/managers/file.py
""" Contains the python specific FileManager. """ import ast import re from pyqode.core.managers import FileManager class PyFileManager(FileManager): """ Extends file manager to override detect_encoding. With python, we can detect encoding by reading the two first lines of a file and extracting its encoding tag. """ def detect_encoding(self, path): """ For the implementation of encoding definitions in Python, look at: - http://www.python.org/dev/peps/pep-0263/ .. note:: code taken and adapted from ```jedi.common.source_to_unicode.detect_encoding``` """ with open(path, 'rb') as file: source = file.read() # take care of line encodings (not in jedi) source = source.replace(b'\r', b'') source_str = str(source).replace('\\n', '\n') byte_mark = ast.literal_eval(r"b'\xef\xbb\xbf'") if source.startswith(byte_mark): # UTF-8 byte-order mark return 'utf-8' first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', source_str).group(0) possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) else: return super().detect_encoding(path)
""" Contains the python specific FileManager. """ import ast import re from pyqode.core.managers import FileManager class PyFileManager(FileManager): """ Extends file manager to override detect_encoding. With python, we can detect encoding by reading the two first lines of a file and extracting its encoding tag. """ def detect_encoding(self, path): """ For the implementation of encoding definitions in Python, look at: - http://www.python.org/dev/peps/pep-0263/ .. note:: code taken and adapted from ```jedi.common.source_to_unicode.detect_encoding``` """ with open(path, 'rb') as file: source = file.read() # take care of line encodings (not in jedi) source = source.replace(b'\r', b'') source_str = str(source).replace('\\n', '\n') byte_mark = ast.literal_eval(r"b'\xef\xbb\xbf'") if source.startswith(byte_mark): # UTF-8 byte-order mark return 'utf-8' first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', source_str).group(0) possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) def open(self, path, encoding=None, use_cached_encoding=True): if encoding is None: encoding = self.detect_encoding(path) super().open(path, encoding=encoding, use_cached_encoding=use_cached_encoding)
Fix encoding detection in python (shebang line was not parsed anymore)
Fix encoding detection in python (shebang line was not parsed anymore)
Python
mit
pyQode/pyqode.python,mmolero/pyqode.python,pyQode/pyqode.python,zwadar/pyqode.python
<REPLACE_OLD> possible_encoding.group(1) else: return super().detect_encoding(path) <REPLACE_NEW> possible_encoding.group(1) def open(self, path, encoding=None, use_cached_encoding=True): if encoding is None: encoding = self.detect_encoding(path) super().open(path, encoding=encoding, use_cached_encoding=use_cached_encoding) <REPLACE_END> <|endoftext|> """ Contains the python specific FileManager. """ import ast import re from pyqode.core.managers import FileManager class PyFileManager(FileManager): """ Extends file manager to override detect_encoding. With python, we can detect encoding by reading the two first lines of a file and extracting its encoding tag. """ def detect_encoding(self, path): """ For the implementation of encoding definitions in Python, look at: - http://www.python.org/dev/peps/pep-0263/ .. note:: code taken and adapted from ```jedi.common.source_to_unicode.detect_encoding``` """ with open(path, 'rb') as file: source = file.read() # take care of line encodings (not in jedi) source = source.replace(b'\r', b'') source_str = str(source).replace('\\n', '\n') byte_mark = ast.literal_eval(r"b'\xef\xbb\xbf'") if source.startswith(byte_mark): # UTF-8 byte-order mark return 'utf-8' first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', source_str).group(0) possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) def open(self, path, encoding=None, use_cached_encoding=True): if encoding is None: encoding = self.detect_encoding(path) super().open(path, encoding=encoding, use_cached_encoding=use_cached_encoding)
Fix encoding detection in python (shebang line was not parsed anymore) """ Contains the python specific FileManager. """ import ast import re from pyqode.core.managers import FileManager class PyFileManager(FileManager): """ Extends file manager to override detect_encoding. With python, we can detect encoding by reading the two first lines of a file and extracting its encoding tag. """ def detect_encoding(self, path): """ For the implementation of encoding definitions in Python, look at: - http://www.python.org/dev/peps/pep-0263/ .. note:: code taken and adapted from ```jedi.common.source_to_unicode.detect_encoding``` """ with open(path, 'rb') as file: source = file.read() # take care of line encodings (not in jedi) source = source.replace(b'\r', b'') source_str = str(source).replace('\\n', '\n') byte_mark = ast.literal_eval(r"b'\xef\xbb\xbf'") if source.startswith(byte_mark): # UTF-8 byte-order mark return 'utf-8' first_two_lines = re.match(r'(?:[^\n]*\n){0,2}', source_str).group(0) possible_encoding = re.search(r"coding[=:]\s*([-\w.]+)", first_two_lines) if possible_encoding: return possible_encoding.group(1) else: return super().detect_encoding(path)
34a96c9824bef5d735f521b303fe9f9755b431ee
dataportal/utils/diagnostics.py
dataportal/utils/diagnostics.py
from __future__ import (absolute_import, division, print_function, unicode_literals) from collections import OrderedDict import importlib import six def watermark(): """ Give the version of each of the dependencies -- useful for bug reports. Returns ------- result : dict mapping the name of each package to its version string or, if an optional dependency is not installed, None """ packages = ['six', 'numpy', 'scipy', 'matplotlib', 'pandas', 'pims', 'pyyaml', 'metadatastore', 'filestore', 'channelarchiver', 'bubblegum'] result = OrderedDict() for package_name in packages: try: package = importlib.import_module(package_name) except ImportError: result[package_name] = None else: version = package.__version__ # enaml provides its version differently try: import enaml except ImportError: result['enaml'] = None else: from enaml.version import version_info result['enaml'] = _make_version_string(version_info) # ...as does Python version_info = sys.version_info result['python'] = _make_version_string(version_info) return result def _make_version_string(version_info): version_string = '.'.join(map(str, [version_info[0], version_info[1], version_info[2]])) return version_string
from __future__ import (absolute_import, division, print_function, unicode_literals) from collections import OrderedDict import importlib import sys import six def watermark(): """ Give the version of each of the dependencies -- useful for bug reports. Returns ------- result : dict mapping the name of each package to its version string or, if an optional dependency is not installed, None """ packages = ['six', 'numpy', 'scipy', 'matplotlib', 'pandas', 'pims', 'pyyaml', 'metadatastore', 'filestore', 'channelarchiver', 'bubblegum'] result = OrderedDict() for package_name in packages: try: package = importlib.import_module(package_name) except ImportError: result[package_name] = None else: try: version = package.__version__ except AttributeError as err: version = "FAILED TO DETECT: {0}".format(err) result[package_name] = version # enaml provides its version differently try: import enaml except ImportError: result['enaml'] = None else: from enaml.version import version_info result['enaml'] = _make_version_string(version_info) # ...as does Python version_info = sys.version_info result['python'] = _make_version_string(version_info) return result def _make_version_string(version_info): version_string = '.'.join(map(str, [version_info[0], version_info[1], version_info[2]])) return version_string
Make watermark robust if __version__ attribute is missing.
FIX: Make watermark robust if __version__ attribute is missing.
Python
bsd-3-clause
danielballan/dataportal,danielballan/dataportal,danielballan/datamuxer,tacaswell/dataportal,tacaswell/dataportal,ericdill/datamuxer,NSLS-II/dataportal,NSLS-II/datamuxer,ericdill/datamuxer,NSLS-II/dataportal,ericdill/databroker,danielballan/datamuxer,ericdill/databroker
<INSERT> sys import <INSERT_END> <INSERT> try: <INSERT_END> <REPLACE_OLD> package.__version__ <REPLACE_NEW> package.__version__ except AttributeError as err: version = "FAILED TO DETECT: {0}".format(err) result[package_name] = version <REPLACE_END> <|endoftext|> from __future__ import (absolute_import, division, print_function, unicode_literals) from collections import OrderedDict import importlib import sys import six def watermark(): """ Give the version of each of the dependencies -- useful for bug reports. Returns ------- result : dict mapping the name of each package to its version string or, if an optional dependency is not installed, None """ packages = ['six', 'numpy', 'scipy', 'matplotlib', 'pandas', 'pims', 'pyyaml', 'metadatastore', 'filestore', 'channelarchiver', 'bubblegum'] result = OrderedDict() for package_name in packages: try: package = importlib.import_module(package_name) except ImportError: result[package_name] = None else: try: version = package.__version__ except AttributeError as err: version = "FAILED TO DETECT: {0}".format(err) result[package_name] = version # enaml provides its version differently try: import enaml except ImportError: result['enaml'] = None else: from enaml.version import version_info result['enaml'] = _make_version_string(version_info) # ...as does Python version_info = sys.version_info result['python'] = _make_version_string(version_info) return result def _make_version_string(version_info): version_string = '.'.join(map(str, [version_info[0], version_info[1], version_info[2]])) return version_string
FIX: Make watermark robust if __version__ attribute is missing. from __future__ import (absolute_import, division, print_function, unicode_literals) from collections import OrderedDict import importlib import six def watermark(): """ Give the version of each of the dependencies -- useful for bug reports. Returns ------- result : dict mapping the name of each package to its version string or, if an optional dependency is not installed, None """ packages = ['six', 'numpy', 'scipy', 'matplotlib', 'pandas', 'pims', 'pyyaml', 'metadatastore', 'filestore', 'channelarchiver', 'bubblegum'] result = OrderedDict() for package_name in packages: try: package = importlib.import_module(package_name) except ImportError: result[package_name] = None else: version = package.__version__ # enaml provides its version differently try: import enaml except ImportError: result['enaml'] = None else: from enaml.version import version_info result['enaml'] = _make_version_string(version_info) # ...as does Python version_info = sys.version_info result['python'] = _make_version_string(version_info) return result def _make_version_string(version_info): version_string = '.'.join(map(str, [version_info[0], version_info[1], version_info[2]])) return version_string
30ec6a7be967d1c4041539cf80f6ae3709460af5
wtfhack/base/models.py
wtfhack/base/models.py
""" Basic models, such as user profile """ from django.db import models class Language(models.Model): name = models.CharField(max_length=40, null=False) learn_url = models.URLField(null=True, blank=True) @staticmethod def all(): return [l.name for l in Language.objects.all()] def __unicode__(self): return self.name class Repo(models.Model): # Full name is required full_name = models.CharField(max_length=30, null=False) url = models.URLField(null=True) language = models.ForeignKey(Language, related_name='repos') description = models.CharField(max_length=300, null=True, blank=True) def save(self, *args, **kwargs): '''Override save method to set default url.''' BASE = 'https://github.com/' # Default url to base url + full_name if not self.url: self.url = BASE + self.full_name super(Repo, self).save(*args, **kwargs) def __unicode__(self): return "{full_name}: {language}".format(full_name=self.full_name, language=self.language.name)
""" Basic models, such as user profile """ from django.db import models class Language(models.Model): name = models.CharField(max_length=40, null=False) learn_url = models.URLField(null=True, blank=True) @staticmethod def all(): return [l.name for l in Language.objects.all()] def __unicode__(self): return self.name class Repo(models.Model): # Full name is required full_name = models.CharField(max_length=300, null=False) url = models.URLField(null=True) language = models.ForeignKey(Language, related_name='repos') description = models.CharField(max_length=500, null=True, blank=True) def save(self, *args, **kwargs): '''Override save method to set default url.''' BASE = 'https://github.com/' # Default url to base url + full_name if not self.url: self.url = BASE + self.full_name super(Repo, self).save(*args, **kwargs) def __unicode__(self): return "{full_name}: {language}".format(full_name=self.full_name, language=self.language.name)
Increase max length for repo names
Increase max length for repo names
Python
bsd-3-clause
sloria/wtfhack,sloria/wtfhack,sloria/wtfhack,sloria/wtfhack,sloria/wtfhack
<REPLACE_OLD> models.CharField(max_length=30, <REPLACE_NEW> models.CharField(max_length=300, <REPLACE_END> <REPLACE_OLD> models.CharField(max_length=300, <REPLACE_NEW> models.CharField(max_length=500, <REPLACE_END> <|endoftext|> """ Basic models, such as user profile """ from django.db import models class Language(models.Model): name = models.CharField(max_length=40, null=False) learn_url = models.URLField(null=True, blank=True) @staticmethod def all(): return [l.name for l in Language.objects.all()] def __unicode__(self): return self.name class Repo(models.Model): # Full name is required full_name = models.CharField(max_length=300, null=False) url = models.URLField(null=True) language = models.ForeignKey(Language, related_name='repos') description = models.CharField(max_length=500, null=True, blank=True) def save(self, *args, **kwargs): '''Override save method to set default url.''' BASE = 'https://github.com/' # Default url to base url + full_name if not self.url: self.url = BASE + self.full_name super(Repo, self).save(*args, **kwargs) def __unicode__(self): return "{full_name}: {language}".format(full_name=self.full_name, language=self.language.name)
Increase max length for repo names """ Basic models, such as user profile """ from django.db import models class Language(models.Model): name = models.CharField(max_length=40, null=False) learn_url = models.URLField(null=True, blank=True) @staticmethod def all(): return [l.name for l in Language.objects.all()] def __unicode__(self): return self.name class Repo(models.Model): # Full name is required full_name = models.CharField(max_length=30, null=False) url = models.URLField(null=True) language = models.ForeignKey(Language, related_name='repos') description = models.CharField(max_length=300, null=True, blank=True) def save(self, *args, **kwargs): '''Override save method to set default url.''' BASE = 'https://github.com/' # Default url to base url + full_name if not self.url: self.url = BASE + self.full_name super(Repo, self).save(*args, **kwargs) def __unicode__(self): return "{full_name}: {language}".format(full_name=self.full_name, language=self.language.name)
7c2548f7f4cf01d0a5cf389c290a47cdf029a7ac
apps/explorer/tests/views/test_mixins.py
apps/explorer/tests/views/test_mixins.py
import pytest from django.test import TestCase from apps.explorer.views.mixins import DataTableMixin, SubsetSelectionMixin class DataTableMixinTestCase(TestCase): def test_get_omics_units_must_be_implemented(self): class DataTableWithNoGetOmicsUnits(DataTableMixin): pass with pytest.raises(NotImplementedError): fake_session = dict() obj = DataTableWithNoGetOmicsUnits() obj.get_omics_units(fake_session) class SubsetSelectionMixinTestCase(TestCase): def test_get_omics_units_must_be_implemented(self): class SubsetSelectionWithNoGetOmicsUnits(SubsetSelectionMixin): pass with pytest.raises(NotImplementedError): fake_session = dict() obj = SubsetSelectionWithNoGetOmicsUnits() obj.get_omics_units(fake_session)
Add tests for explorer views mixins
Add tests for explorer views mixins
Python
bsd-3-clause
Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel,Candihub/pixel
<INSERT> import pytest from django.test import TestCase from apps.explorer.views.mixins import DataTableMixin, SubsetSelectionMixin class DataTableMixinTestCase(TestCase): <INSERT_END> <INSERT> def test_get_omics_units_must_be_implemented(self): class DataTableWithNoGetOmicsUnits(DataTableMixin): pass with pytest.raises(NotImplementedError): fake_session = dict() obj = DataTableWithNoGetOmicsUnits() obj.get_omics_units(fake_session) class SubsetSelectionMixinTestCase(TestCase): def test_get_omics_units_must_be_implemented(self): class SubsetSelectionWithNoGetOmicsUnits(SubsetSelectionMixin): pass with pytest.raises(NotImplementedError): fake_session = dict() obj = SubsetSelectionWithNoGetOmicsUnits() obj.get_omics_units(fake_session) <INSERT_END> <|endoftext|> import pytest from django.test import TestCase from apps.explorer.views.mixins import DataTableMixin, SubsetSelectionMixin class DataTableMixinTestCase(TestCase): def test_get_omics_units_must_be_implemented(self): class DataTableWithNoGetOmicsUnits(DataTableMixin): pass with pytest.raises(NotImplementedError): fake_session = dict() obj = DataTableWithNoGetOmicsUnits() obj.get_omics_units(fake_session) class SubsetSelectionMixinTestCase(TestCase): def test_get_omics_units_must_be_implemented(self): class SubsetSelectionWithNoGetOmicsUnits(SubsetSelectionMixin): pass with pytest.raises(NotImplementedError): fake_session = dict() obj = SubsetSelectionWithNoGetOmicsUnits() obj.get_omics_units(fake_session)
Add tests for explorer views mixins
9e7d3c35857600445cb6df42ba18d289dc0e37a9
wsgi.py
wsgi.py
from os import getenv from webapp import create_app from argparse import ArgumentParser app = create_app(getenv('FLASK_CONFIG') or 'development') def main(): parser = ArgumentParser() parser.add_argument("-p", "--port", help="port number") args = parser.parse_args() port = int(args.port or None) app.run(port=port) if __name__ == "__main__": main()
from os import getenv from webapp import create_app from argparse import ArgumentParser app = create_app(getenv('FLASK_CONFIG') or 'development') def main(): parser = ArgumentParser() parser.add_argument("-p", "--port", help="port number") args = parser.parse_args() port = int(args.port or 5000) app.run(port=port) if __name__ == "__main__": main()
Fix in port number initialisation
Fix in port number initialisation
Python
bsd-3-clause
aleksandergurin/news,aleksandergurin/news,aleksandergurin/news
<REPLACE_OLD> None) <REPLACE_NEW> 5000) <REPLACE_END> <|endoftext|> from os import getenv from webapp import create_app from argparse import ArgumentParser app = create_app(getenv('FLASK_CONFIG') or 'development') def main(): parser = ArgumentParser() parser.add_argument("-p", "--port", help="port number") args = parser.parse_args() port = int(args.port or 5000) app.run(port=port) if __name__ == "__main__": main()
Fix in port number initialisation from os import getenv from webapp import create_app from argparse import ArgumentParser app = create_app(getenv('FLASK_CONFIG') or 'development') def main(): parser = ArgumentParser() parser.add_argument("-p", "--port", help="port number") args = parser.parse_args() port = int(args.port or None) app.run(port=port) if __name__ == "__main__": main()
98c07739702fbf3951ccd0359d04be80a303d9ce
run_time/src/gae_server/font_mapper.py
run_time/src/gae_server/font_mapper.py
""" Copyright 2014 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # import logging from os import path # import StringIO # from time import sleep # from time import time # import zipfile # import webapp2 # from incremental_fonts_utils import prepare_bundle tachyfont_major_version = 1 tachyfont_minor_version = 0 BASE_DIR = path.dirname(__file__) def fontname_to_zipfile(fontname): family_dir = '' if fontname[0:10] == 'NotoSansJP': family_dir = 'NotoSansJP/' zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar' return zip_path
Add a fontname to TachyFont Jar file mapper.
Add a fontname to TachyFont Jar file mapper.
Python
apache-2.0
bstell/TachyFont,bstell/TachyFont,moyogo/tachyfont,bstell/TachyFont,moyogo/tachyfont,bstell/TachyFont,googlei18n/TachyFont,googlefonts/TachyFont,moyogo/tachyfont,googlefonts/TachyFont,googlei18n/TachyFont,googlefonts/TachyFont,googlei18n/TachyFont,googlefonts/TachyFont,bstell/TachyFont,googlefonts/TachyFont,googlei18n/TachyFont,moyogo/tachyfont,moyogo/tachyfont,googlei18n/TachyFont
<INSERT> """ <INSERT_END> <INSERT> Copyright 2014 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # import logging from os import path # import StringIO # from time import sleep # from time import time # import zipfile # import webapp2 # from incremental_fonts_utils import prepare_bundle tachyfont_major_version = 1 tachyfont_minor_version = 0 BASE_DIR = path.dirname(__file__) def fontname_to_zipfile(fontname): family_dir = '' if fontname[0:10] == 'NotoSansJP': family_dir = 'NotoSansJP/' zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar' return zip_path <INSERT_END> <|endoftext|> """ Copyright 2014 Google Inc. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ # import logging from os import path # import StringIO # from time import sleep # from time import time # import zipfile # import webapp2 # from incremental_fonts_utils import prepare_bundle tachyfont_major_version = 1 tachyfont_minor_version = 0 BASE_DIR = path.dirname(__file__) def fontname_to_zipfile(fontname): family_dir = '' if fontname[0:10] == 'NotoSansJP': family_dir = 'NotoSansJP/' zip_path = BASE_DIR + '/fonts/' + family_dir + fontname + '.TachyFont.jar' return zip_path
Add a fontname to TachyFont Jar file mapper.
ff9de1ab494ee5d48dba7aa84dfa1ce114464f09
py/tests/global_alpha_test.py
py/tests/global_alpha_test.py
#!/usr/bin/python3 import pykms import time card = pykms.Card() res = pykms.ResourceManager(card) conn = res.reserve_connector("") crtc = res.reserve_crtc(conn) mode = conn.get_default_mode() modeb = mode.to_blob(card) format = pykms.PixelFormat.ARGB8888 plane1 = res.reserve_generic_plane(crtc, format) plane2 = res.reserve_generic_plane(crtc, format) print("Got plane1 %d %d plane2 %d %d" % (plane1.idx, plane1.id, plane2.idx, plane2.id)) fb1 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, format); pykms.draw_test_pattern(fb1); fb2 = pykms.DumbFramebuffer(card, mode.hdisplay >> 1, mode.vdisplay >> 1, format); pykms.draw_test_pattern(fb2); alpha = 0 req = pykms.AtomicReq(card) req.add(conn, "CRTC_ID", crtc.id) req.add(crtc, {"ACTIVE": 1, "MODE_ID": modeb.id}) req.add_plane(plane1, fb1, crtc) req.add_plane(plane2, fb2, crtc) r = req.commit_sync(allow_modeset = True) assert r == 0, "Initial commit failed: %d" % r while alpha <= 0xFFFF: print("alpha %d" % (alpha >> 8)) req = pykms.AtomicReq(card) req.add(plane2, {"alpha": alpha }) r = req.commit_sync() assert r == 0, "alpha change commit failed: %d" % r alpha = alpha + 0xFF time.sleep(0.1) input("press enter exit\n")
Add globa_alpha_test.py for DRM per plane "alpha" property testing
Add globa_alpha_test.py for DRM per plane "alpha" property testing
Python
mpl-2.0
tomba/kmsxx,tomba/kmsxx,tomba/kmsxx,tomba/kmsxx
<INSERT> #!/usr/bin/python3 import pykms import time card = pykms.Card() res = pykms.ResourceManager(card) conn = res.reserve_connector("") crtc = res.reserve_crtc(conn) mode = conn.get_default_mode() modeb = mode.to_blob(card) format = pykms.PixelFormat.ARGB8888 plane1 = res.reserve_generic_plane(crtc, format) plane2 = res.reserve_generic_plane(crtc, format) print("Got plane1 %d %d plane2 %d %d" % <INSERT_END> <INSERT> (plane1.idx, plane1.id, plane2.idx, plane2.id)) fb1 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, format); pykms.draw_test_pattern(fb1); fb2 = pykms.DumbFramebuffer(card, mode.hdisplay >> 1, mode.vdisplay >> 1, format); pykms.draw_test_pattern(fb2); alpha = 0 req = pykms.AtomicReq(card) req.add(conn, "CRTC_ID", crtc.id) req.add(crtc, {"ACTIVE": 1, "MODE_ID": modeb.id}) req.add_plane(plane1, fb1, crtc) req.add_plane(plane2, fb2, crtc) r = req.commit_sync(allow_modeset = True) assert r == 0, "Initial commit failed: %d" % r while alpha <= 0xFFFF: print("alpha %d" % (alpha >> 8)) req = pykms.AtomicReq(card) req.add(plane2, {"alpha": alpha }) r = req.commit_sync() assert r == 0, "alpha change commit failed: %d" % r alpha = alpha + 0xFF time.sleep(0.1) input("press enter exit\n") <INSERT_END> <|endoftext|> #!/usr/bin/python3 import pykms import time card = pykms.Card() res = pykms.ResourceManager(card) conn = res.reserve_connector("") crtc = res.reserve_crtc(conn) mode = conn.get_default_mode() modeb = mode.to_blob(card) format = pykms.PixelFormat.ARGB8888 plane1 = res.reserve_generic_plane(crtc, format) plane2 = res.reserve_generic_plane(crtc, format) print("Got plane1 %d %d plane2 %d %d" % (plane1.idx, plane1.id, plane2.idx, plane2.id)) fb1 = pykms.DumbFramebuffer(card, mode.hdisplay, mode.vdisplay, format); pykms.draw_test_pattern(fb1); fb2 = pykms.DumbFramebuffer(card, mode.hdisplay >> 1, mode.vdisplay >> 1, format); pykms.draw_test_pattern(fb2); alpha = 0 req = pykms.AtomicReq(card) req.add(conn, "CRTC_ID", crtc.id) req.add(crtc, {"ACTIVE": 1, "MODE_ID": modeb.id}) req.add_plane(plane1, fb1, crtc) req.add_plane(plane2, fb2, crtc) r = req.commit_sync(allow_modeset = True) assert r == 0, "Initial commit failed: %d" % r while alpha <= 0xFFFF: print("alpha %d" % (alpha >> 8)) req = pykms.AtomicReq(card) req.add(plane2, {"alpha": alpha }) r = req.commit_sync() assert r == 0, "alpha change commit failed: %d" % r alpha = alpha + 0xFF time.sleep(0.1) input("press enter exit\n")
Add globa_alpha_test.py for DRM per plane "alpha" property testing
5056586586becb94bba265bdd90e46f2e2366534
factory/checkFactory.py
factory/checkFactory.py
#!/bin/env python # # Description: # Check if a glideinFactory is running # # Arguments: # $1 = glidein submit_dir (i.e. factory dir) # # Author: # Igor Sfiligoi Jul 9th 2008 # import sys import glideFactoryPidLib try: startup_dir=sys.argv[1] factory_pid=glideFactoryPidLib.get_gfactory_pid(startup_dir) except: print "Not running" sys.exit(1) print "Running" sys.exit(0)
Check if a glideinFactory is running
Check if a glideinFactory is running
Python
bsd-3-clause
holzman/glideinwms-old,bbockelm/glideinWMS,bbockelm/glideinWMS,bbockelm/glideinWMS,holzman/glideinwms-old,holzman/glideinwms-old,bbockelm/glideinWMS
<INSERT> #!/bin/env python # # Description: # <INSERT_END> <INSERT> Check if a glideinFactory is running # # Arguments: # $1 = glidein submit_dir (i.e. factory dir) # # Author: # Igor Sfiligoi Jul 9th 2008 # import sys import glideFactoryPidLib try: startup_dir=sys.argv[1] factory_pid=glideFactoryPidLib.get_gfactory_pid(startup_dir) except: print "Not running" sys.exit(1) print "Running" sys.exit(0) <INSERT_END> <|endoftext|> #!/bin/env python # # Description: # Check if a glideinFactory is running # # Arguments: # $1 = glidein submit_dir (i.e. factory dir) # # Author: # Igor Sfiligoi Jul 9th 2008 # import sys import glideFactoryPidLib try: startup_dir=sys.argv[1] factory_pid=glideFactoryPidLib.get_gfactory_pid(startup_dir) except: print "Not running" sys.exit(1) print "Running" sys.exit(0)
Check if a glideinFactory is running
4d7df38e056d0132af41759062cf8e380c736250
django_backend_test/noras_menu/urls.py
django_backend_test/noras_menu/urls.py
# -*- encoding: utf-8 -*- from django.conf.urls import url, include from django.views.decorators.csrf import csrf_exempt from .views import CreateMenu,ListMenu,UpdateMenu,CreateSelection,ListSelection,CreateSubscriber urlpatterns = [ url(r'^menu/new$',CreateMenu.as_view(),name='Create Menu'), url(r'^menu/edit/(?P<pk>\d+)/$',UpdateMenu.as_view(),name='Update Menu'), url(r'^menu/list$',ListMenu.as_view(),name='List Menu'), url(r'^menu/selection$',ListSelection.as_view(),name='List Selection'), url(r'^menu/(?P<uuid>[0-9a-z-]+)$',CreateSelection.as_view(),name='Create Selection'), url(r'^subscriber/new$',CreateSubscriber.as_view(),name='Create Subscriber'), ]
Update Urls from nora_menu app
Update Urls from nora_menu app
Python
mit
semorale/backend-test,semorale/backend-test,semorale/backend-test
<INSERT> # -*- encoding: utf-8 -*- from django.conf.urls import url, include from django.views.decorators.csrf import csrf_exempt from .views import CreateMenu,ListMenu,UpdateMenu,CreateSelection,ListSelection,CreateSubscriber urlpatterns = [ <INSERT_END> <INSERT> url(r'^menu/new$',CreateMenu.as_view(),name='Create Menu'), url(r'^menu/edit/(?P<pk>\d+)/$',UpdateMenu.as_view(),name='Update Menu'), url(r'^menu/list$',ListMenu.as_view(),name='List Menu'), url(r'^menu/selection$',ListSelection.as_view(),name='List Selection'), url(r'^menu/(?P<uuid>[0-9a-z-]+)$',CreateSelection.as_view(),name='Create Selection'), url(r'^subscriber/new$',CreateSubscriber.as_view(),name='Create Subscriber'), ] <INSERT_END> <|endoftext|> # -*- encoding: utf-8 -*- from django.conf.urls import url, include from django.views.decorators.csrf import csrf_exempt from .views import CreateMenu,ListMenu,UpdateMenu,CreateSelection,ListSelection,CreateSubscriber urlpatterns = [ url(r'^menu/new$',CreateMenu.as_view(),name='Create Menu'), url(r'^menu/edit/(?P<pk>\d+)/$',UpdateMenu.as_view(),name='Update Menu'), url(r'^menu/list$',ListMenu.as_view(),name='List Menu'), url(r'^menu/selection$',ListSelection.as_view(),name='List Selection'), url(r'^menu/(?P<uuid>[0-9a-z-]+)$',CreateSelection.as_view(),name='Create Selection'), url(r'^subscriber/new$',CreateSubscriber.as_view(),name='Create Subscriber'), ]
Update Urls from nora_menu app
015ecbbe112edaa3ada4cb1af70f62f03654dfe4
py/app.py
py/app.py
import json import functools from flask import Flask, Response from foxgami.red import Story app = Flask(__name__) def return_as_json(inner_f): @functools.wraps(inner_f) def new_f(*args, **kwargs): result = inner_f(*args, **kwargs) return Response(json.dumps( result, indent=4, separators=(', ', ': ') ), mimetype='application/json') return new_f @app.route('/api/stories') @return_as_json def hardcoded_aww(): return Story.find() @app.route('/api/stories/<string:story_id>') def get_story(story_id): return Story.get(story_id) if __name__ == '__main__': app.run(debug=True)
import json import functools from flask import Flask, Response from foxgami.red import Story app = Flask(__name__) @app.after_response def add_content_headers(response): response.headers['Access-Control-Allow-Origin'] = '*' return response def return_as_json(inner_f): @functools.wraps(inner_f) def new_f(*args, **kwargs): result = inner_f(*args, **kwargs) return Response(json.dumps( result, indent=4, separators=(', ', ': ') ), mimetype='application/json') return new_f @app.route('/api/stories') @return_as_json def hardcoded_aww(): return Story.find() @app.route('/api/stories/<string:story_id>') def get_story(story_id): return Story.get(story_id) if __name__ == '__main__': app.run(debug=True)
Add Access-Control headers to python
Add Access-Control headers to python
Python
mit
flubstep/foxgami.com,flubstep/foxgami.com
<REPLACE_OLD> Flask(__name__) def <REPLACE_NEW> Flask(__name__) @app.after_response def add_content_headers(response): response.headers['Access-Control-Allow-Origin'] = '*' return response def <REPLACE_END> <|endoftext|> import json import functools from flask import Flask, Response from foxgami.red import Story app = Flask(__name__) @app.after_response def add_content_headers(response): response.headers['Access-Control-Allow-Origin'] = '*' return response def return_as_json(inner_f): @functools.wraps(inner_f) def new_f(*args, **kwargs): result = inner_f(*args, **kwargs) return Response(json.dumps( result, indent=4, separators=(', ', ': ') ), mimetype='application/json') return new_f @app.route('/api/stories') @return_as_json def hardcoded_aww(): return Story.find() @app.route('/api/stories/<string:story_id>') def get_story(story_id): return Story.get(story_id) if __name__ == '__main__': app.run(debug=True)
Add Access-Control headers to python import json import functools from flask import Flask, Response from foxgami.red import Story app = Flask(__name__) def return_as_json(inner_f): @functools.wraps(inner_f) def new_f(*args, **kwargs): result = inner_f(*args, **kwargs) return Response(json.dumps( result, indent=4, separators=(', ', ': ') ), mimetype='application/json') return new_f @app.route('/api/stories') @return_as_json def hardcoded_aww(): return Story.find() @app.route('/api/stories/<string:story_id>') def get_story(story_id): return Story.get(story_id) if __name__ == '__main__': app.run(debug=True)
6b49f7b1948ab94631c79304c91f8d5590d03e40
addons/project/models/project_config_settings.py
addons/project/models/project_config_settings.py
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models class ProjectConfiguration(models.TransientModel): _name = 'project.config.settings' _inherit = 'res.config.settings' company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.user.company_id) module_pad = fields.Boolean("Collaborative Pads") module_hr_timesheet = fields.Boolean("Timesheets") module_project_timesheet_synchro = fields.Boolean("Awesome Timesheet") module_rating_project = fields.Boolean(string="Rating on Tasks") module_project_forecast = fields.Boolean(string="Forecasts") module_hr_holidays = fields.Boolean("Leave Management") module_hr_timesheet_attendance = fields.Boolean("Attendances") module_sale_timesheet = fields.Boolean("Time Billing") module_hr_expense = fields.Boolean("Expenses") group_subtask_project = fields.Boolean("Sub-tasks", implied_group="project.group_subtask_project")
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models class ProjectConfiguration(models.TransientModel): _name = 'project.config.settings' _inherit = 'res.config.settings' company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.user.company_id) module_pad = fields.Boolean("Collaborative Pads") module_hr_timesheet = fields.Boolean("Timesheets") module_project_timesheet_synchro = fields.Boolean("Awesome Timesheet") module_rating_project = fields.Boolean(string="Rating on Tasks") module_project_forecast = fields.Boolean(string="Forecasts") module_hr_holidays = fields.Boolean("Leave Management") module_hr_timesheet_attendance = fields.Boolean("Attendances") module_sale_timesheet = fields.Boolean("Time Billing") module_hr_expense = fields.Boolean("Expenses") group_subtask_project = fields.Boolean("Sub-tasks", implied_group="project.group_subtask_project") @api.onchange('module_sale_timesheet') def _onchange_module_sale_timesheet(self): if self.module_sale_timesheet: self.module_hr_timesheet = True
Enable `Timesheets` option if `Time Billing` is enabled
[IMP] project: Enable `Timesheets` option if `Time Billing` is enabled
Python
agpl-3.0
ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo,ygol/odoo
<REPLACE_OLD> implied_group="project.group_subtask_project") <REPLACE_NEW> implied_group="project.group_subtask_project") @api.onchange('module_sale_timesheet') def _onchange_module_sale_timesheet(self): if self.module_sale_timesheet: self.module_hr_timesheet = True <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models class ProjectConfiguration(models.TransientModel): _name = 'project.config.settings' _inherit = 'res.config.settings' company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.user.company_id) module_pad = fields.Boolean("Collaborative Pads") module_hr_timesheet = fields.Boolean("Timesheets") module_project_timesheet_synchro = fields.Boolean("Awesome Timesheet") module_rating_project = fields.Boolean(string="Rating on Tasks") module_project_forecast = fields.Boolean(string="Forecasts") module_hr_holidays = fields.Boolean("Leave Management") module_hr_timesheet_attendance = fields.Boolean("Attendances") module_sale_timesheet = fields.Boolean("Time Billing") module_hr_expense = fields.Boolean("Expenses") group_subtask_project = fields.Boolean("Sub-tasks", implied_group="project.group_subtask_project") @api.onchange('module_sale_timesheet') def _onchange_module_sale_timesheet(self): if self.module_sale_timesheet: self.module_hr_timesheet = True
[IMP] project: Enable `Timesheets` option if `Time Billing` is enabled # -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. from odoo import api, fields, models class ProjectConfiguration(models.TransientModel): _name = 'project.config.settings' _inherit = 'res.config.settings' company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.user.company_id) module_pad = fields.Boolean("Collaborative Pads") module_hr_timesheet = fields.Boolean("Timesheets") module_project_timesheet_synchro = fields.Boolean("Awesome Timesheet") module_rating_project = fields.Boolean(string="Rating on Tasks") module_project_forecast = fields.Boolean(string="Forecasts") module_hr_holidays = fields.Boolean("Leave Management") module_hr_timesheet_attendance = fields.Boolean("Attendances") module_sale_timesheet = fields.Boolean("Time Billing") module_hr_expense = fields.Boolean("Expenses") group_subtask_project = fields.Boolean("Sub-tasks", implied_group="project.group_subtask_project")
1f6cac883995cfaf4d1b19c6c13f3fc13e9ddc7a
tools/scyllatop/views/base.py
tools/scyllatop/views/base.py
import time import curses import curses.panel import logging class Base(object): def __init__(self, window): lines, columns = window.getmaxyx() self._window = curses.newwin(lines, columns) self._panel = curses.panel.new_panel(self._window) def writeStatusLine(self, measurements): line = 'time: {0}| {1} measurements, at most {2} visible'.format(time.asctime(), len(measurements), self.availableLines()) columns = self.dimensions()['columns'] self._window.addstr(0, 0, line.ljust(columns), curses.A_REVERSE) def availableLines(self): STATUS_LINE = 1 return self.dimensions()['lines'] - STATUS_LINE def refresh(self): curses.panel.update_panels() curses.doupdate() def onTop(self): logging.info('put {0} view on top'.format(self.__class__.__name__)) self._panel.top() curses.panel.update_panels() curses.doupdate() def clearScreen(self): self._window.clear() self._window.move(0, 0) def writeLine(self, thing, line): self._window.addstr(line, 0, str(thing)) def dimensions(self): lines, columns = self._window.getmaxyx() return {'lines': lines, 'columns': columns}
import time import curses import curses.panel import logging class Base(object): def __init__(self, window): lines, columns = window.getmaxyx() self._window = curses.newwin(lines, columns) self._panel = curses.panel.new_panel(self._window) def writeStatusLine(self, measurements): line = 'time: {0}| {1} measurements, at most {2} visible'.format(time.asctime(), len(measurements), self.availableLines()) columns = self.dimensions()['columns'] self._window.addstr(0, 0, line.ljust(columns), curses.A_REVERSE) def availableLines(self): STATUS_LINE = 1 return self.dimensions()['lines'] - STATUS_LINE def refresh(self): curses.panel.update_panels() curses.doupdate() def onTop(self): logging.info('put {0} view on top'.format(self.__class__.__name__)) self._panel.top() curses.panel.update_panels() curses.doupdate() def clearScreen(self): self._window.erase() self._window.move(0, 0) def writeLine(self, thing, line): self._window.addstr(line, 0, str(thing)) def dimensions(self): lines, columns = self._window.getmaxyx() return {'lines': lines, 'columns': columns}
Use 'erase' to clear the screen
tools/scyllatop: Use 'erase' to clear the screen The 'clear' function explicitly clears the screen and repaints it which causes really annoying flicker. Use 'erase' to make scyllatop more pleasant on the eyes. Message-Id: <2bf04f96d7d510dddf38de01959db6b168f25a31@scylladb.com>
Python
agpl-3.0
raphaelsc/scylla,avikivity/scylla,scylladb/scylla,duarten/scylla,avikivity/scylla,scylladb/scylla,kjniemi/scylla,kjniemi/scylla,duarten/scylla,duarten/scylla,scylladb/scylla,kjniemi/scylla,raphaelsc/scylla,scylladb/scylla,avikivity/scylla,raphaelsc/scylla
<REPLACE_OLD> self._window.clear() <REPLACE_NEW> self._window.erase() <REPLACE_END> <|endoftext|> import time import curses import curses.panel import logging class Base(object): def __init__(self, window): lines, columns = window.getmaxyx() self._window = curses.newwin(lines, columns) self._panel = curses.panel.new_panel(self._window) def writeStatusLine(self, measurements): line = 'time: {0}| {1} measurements, at most {2} visible'.format(time.asctime(), len(measurements), self.availableLines()) columns = self.dimensions()['columns'] self._window.addstr(0, 0, line.ljust(columns), curses.A_REVERSE) def availableLines(self): STATUS_LINE = 1 return self.dimensions()['lines'] - STATUS_LINE def refresh(self): curses.panel.update_panels() curses.doupdate() def onTop(self): logging.info('put {0} view on top'.format(self.__class__.__name__)) self._panel.top() curses.panel.update_panels() curses.doupdate() def clearScreen(self): self._window.erase() self._window.move(0, 0) def writeLine(self, thing, line): self._window.addstr(line, 0, str(thing)) def dimensions(self): lines, columns = self._window.getmaxyx() return {'lines': lines, 'columns': columns}
tools/scyllatop: Use 'erase' to clear the screen The 'clear' function explicitly clears the screen and repaints it which causes really annoying flicker. Use 'erase' to make scyllatop more pleasant on the eyes. Message-Id: <2bf04f96d7d510dddf38de01959db6b168f25a31@scylladb.com> import time import curses import curses.panel import logging class Base(object): def __init__(self, window): lines, columns = window.getmaxyx() self._window = curses.newwin(lines, columns) self._panel = curses.panel.new_panel(self._window) def writeStatusLine(self, measurements): line = 'time: {0}| {1} measurements, at most {2} visible'.format(time.asctime(), len(measurements), self.availableLines()) columns = self.dimensions()['columns'] self._window.addstr(0, 0, line.ljust(columns), curses.A_REVERSE) def availableLines(self): STATUS_LINE = 1 return self.dimensions()['lines'] - STATUS_LINE def refresh(self): curses.panel.update_panels() curses.doupdate() def onTop(self): logging.info('put {0} view on top'.format(self.__class__.__name__)) self._panel.top() curses.panel.update_panels() curses.doupdate() def clearScreen(self): self._window.clear() self._window.move(0, 0) def writeLine(self, thing, line): self._window.addstr(line, 0, str(thing)) def dimensions(self): lines, columns = self._window.getmaxyx() return {'lines': lines, 'columns': columns}
89a7a834638a1384bd9f1a560902b4d3aab29423
smoked/loader.py
smoked/loader.py
# coding: utf-8 from __future__ import unicode_literals from importlib import import_module from django.conf import settings from django.core.exceptions import ImproperlyConfigured def load_test_module(): """ Import test module and trigger registration of tests. Test module is defined in `SMOKE_TESTS` setting. """ test_module = getattr(settings, 'SMOKE_TESTS') if not test_module: raise ImproperlyConfigured('Missing SMOKE_TESTS in settings.') try: import_module(test_module) except ImportError as e: msg = "Can't import '{0}' module. Exception: {1}" raise ImproperlyConfigured(msg.format(test_module, e))
# coding: utf-8 from __future__ import unicode_literals from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.utils.importlib import import_module def load_test_module(): """ Import test module and trigger registration of tests. Test module is defined in `SMOKE_TESTS` setting. """ test_module = getattr(settings, 'SMOKE_TESTS') if not test_module: raise ImproperlyConfigured('Missing SMOKE_TESTS in settings.') try: import_module(test_module) except ImportError as e: msg = "Can't import '{0}' module. Exception: {1}" raise ImproperlyConfigured(msg.format(test_module, e))
Fix import of import_module for Py2.6
Fix import of import_module for Py2.6
Python
mit
djentlemen/django-smoked
<REPLACE_OLD> unicode_literals from importlib import import_module from <REPLACE_NEW> unicode_literals from <REPLACE_END> <REPLACE_OLD> ImproperlyConfigured def <REPLACE_NEW> ImproperlyConfigured from django.utils.importlib import import_module def <REPLACE_END> <|endoftext|> # coding: utf-8 from __future__ import unicode_literals from django.conf import settings from django.core.exceptions import ImproperlyConfigured from django.utils.importlib import import_module def load_test_module(): """ Import test module and trigger registration of tests. Test module is defined in `SMOKE_TESTS` setting. """ test_module = getattr(settings, 'SMOKE_TESTS') if not test_module: raise ImproperlyConfigured('Missing SMOKE_TESTS in settings.') try: import_module(test_module) except ImportError as e: msg = "Can't import '{0}' module. Exception: {1}" raise ImproperlyConfigured(msg.format(test_module, e))
Fix import of import_module for Py2.6 # coding: utf-8 from __future__ import unicode_literals from importlib import import_module from django.conf import settings from django.core.exceptions import ImproperlyConfigured def load_test_module(): """ Import test module and trigger registration of tests. Test module is defined in `SMOKE_TESTS` setting. """ test_module = getattr(settings, 'SMOKE_TESTS') if not test_module: raise ImproperlyConfigured('Missing SMOKE_TESTS in settings.') try: import_module(test_module) except ImportError as e: msg = "Can't import '{0}' module. Exception: {1}" raise ImproperlyConfigured(msg.format(test_module, e))
0e30e73ffa928b11fd6ee6c0ea12709100623e5f
pltpreview/view.py
pltpreview/view.py
"""Convenience functions for matplotlib plotting and image viewing.""" import numpy as np from matplotlib import pyplot as plt def show(image, blocking=False, title='', **kwargs): """Show *image*. If *blocking* is False the call is nonblocking. *title* is the image title. *kwargs* are passed to matplotlib's ``imshow`` function. This command always creates a new figure. Returns matplotlib's ``AxesImage``. """ plt.figure() mpl_image = plt.imshow(image, **kwargs) plt.colorbar(ticks=np.linspace(image.min(), image.max(), 8)) plt.title(title) plt.show(blocking) return mpl_image def plot(*args, **kwargs): """Plot using matplotlib's ``plot`` function. Pass it *args* and *kwargs*. *kwargs* are infected with *blocking* and if False or not specified, the call is nonblocking. *title* is also alowed to be in *kwargs* which sets the figure title. This command always creates a new figure. Returns a list of ``Line2D`` instances. """ blocking = False if 'blocking' not in kwargs else kwargs.pop('blocking') title = kwargs.pop('title', '') plt.figure() lines = plt.plot(*args, **kwargs) plt.title(title) plt.show(blocking) return lines
"""Convenience functions for matplotlib plotting and image viewing.""" import numpy as np from matplotlib import pyplot as plt def show(image, blocking=False, title='', **kwargs): """Show *image*. If *blocking* is False the call is nonblocking. *title* is the image title. *kwargs* are passed to matplotlib's ``imshow`` function. This command always creates a new figure. Returns matplotlib's ``AxesImage``. """ plt.figure() mpl_image = plt.imshow(image, **kwargs) plt.colorbar(ticks=np.linspace(image.min(), image.max(), 8)) plt.title(title) plt.show(blocking) return mpl_image def plot(*args, **kwargs): """Plot using matplotlib's ``plot`` function. Pass it *args* and *kwargs*. *kwargs* are infected with *blocking* and if False or not specified, the call is nonblocking. *title* is also alowed to be in *kwargs* which sets the figure title. This command always creates a new figure. Returns a list of ``Line2D`` instances. """ blocking = kwargs.pop('blocking', False) title = kwargs.pop('title', '') plt.figure() lines = plt.plot(*args, **kwargs) plt.title(title) plt.show(blocking) return lines
Use pop for getting blocking parameter
Use pop for getting blocking parameter
Python
mit
tfarago/pltpreview
<REPLACE_OLD> False if 'blocking' not in kwargs else kwargs.pop('blocking') <REPLACE_NEW> kwargs.pop('blocking', False) <REPLACE_END> <|endoftext|> """Convenience functions for matplotlib plotting and image viewing.""" import numpy as np from matplotlib import pyplot as plt def show(image, blocking=False, title='', **kwargs): """Show *image*. If *blocking* is False the call is nonblocking. *title* is the image title. *kwargs* are passed to matplotlib's ``imshow`` function. This command always creates a new figure. Returns matplotlib's ``AxesImage``. """ plt.figure() mpl_image = plt.imshow(image, **kwargs) plt.colorbar(ticks=np.linspace(image.min(), image.max(), 8)) plt.title(title) plt.show(blocking) return mpl_image def plot(*args, **kwargs): """Plot using matplotlib's ``plot`` function. Pass it *args* and *kwargs*. *kwargs* are infected with *blocking* and if False or not specified, the call is nonblocking. *title* is also alowed to be in *kwargs* which sets the figure title. This command always creates a new figure. Returns a list of ``Line2D`` instances. """ blocking = kwargs.pop('blocking', False) title = kwargs.pop('title', '') plt.figure() lines = plt.plot(*args, **kwargs) plt.title(title) plt.show(blocking) return lines
Use pop for getting blocking parameter """Convenience functions for matplotlib plotting and image viewing.""" import numpy as np from matplotlib import pyplot as plt def show(image, blocking=False, title='', **kwargs): """Show *image*. If *blocking* is False the call is nonblocking. *title* is the image title. *kwargs* are passed to matplotlib's ``imshow`` function. This command always creates a new figure. Returns matplotlib's ``AxesImage``. """ plt.figure() mpl_image = plt.imshow(image, **kwargs) plt.colorbar(ticks=np.linspace(image.min(), image.max(), 8)) plt.title(title) plt.show(blocking) return mpl_image def plot(*args, **kwargs): """Plot using matplotlib's ``plot`` function. Pass it *args* and *kwargs*. *kwargs* are infected with *blocking* and if False or not specified, the call is nonblocking. *title* is also alowed to be in *kwargs* which sets the figure title. This command always creates a new figure. Returns a list of ``Line2D`` instances. """ blocking = False if 'blocking' not in kwargs else kwargs.pop('blocking') title = kwargs.pop('title', '') plt.figure() lines = plt.plot(*args, **kwargs) plt.title(title) plt.show(blocking) return lines
19ee49c57fd17f14efffd946019734f1cb4ed18e
pipes/s3/__main__.py
pipes/s3/__main__.py
"""Add application.properties to Application's S3 Bucket directory.""" import logging import argparse from .create_archaius import init_properties LOG = logging.getLogger(__name__) def main(): """Create application.properties for a given application.""" logging.basicConfig() parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument('-d', '--debug', action='store_const', const=logging.DEBUG, default=logging.INFO, help='Set DEBUG output') parser.add_argument('-e', '--env', choices=('dev', 'stage', 'prod'), default='dev', help='Deploy environment') parser.add_argument('-g', '--group', default='extra', help='Application Group name, e.g. forrest') parser.add_argument('-a', '--app', default='unnecessary', help='Application name, e.g. forrestcore') args = parser.parse_args() LOG.setLevel(args.debug) logging.getLogger(__package__).setLevel(args.debug) vars(args).pop('debug') LOG.debug('Args: %s', vars(args)) init_properties(env=args.env, group=args.group, app=args.app) if __name__ == '__main__': main()
"""Add application.properties to Application's S3 Bucket directory.""" import logging import argparse from .create_archaius import init_properties LOG = logging.getLogger(__name__) def main(): """Create application.properties for a given application.""" logging.basicConfig() parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument('-d', '--debug', action='store_const', const=logging.DEBUG, default=logging.INFO, help='Set DEBUG output') parser.add_argument('-e', '--env', choices=('build', 'dev', 'stage', 'prod'), default='dev', help='Deploy environment') parser.add_argument('-g', '--group', default='extra', help='Application Group name, e.g. forrest') parser.add_argument('-a', '--app', default='unnecessary', help='Application name, e.g. forrestcore') args = parser.parse_args() LOG.setLevel(args.debug) logging.getLogger(__package__).setLevel(args.debug) vars(args).pop('debug') LOG.debug('Args: %s', vars(args)) init_properties(env=args.env, group=args.group, app=args.app) if __name__ == '__main__': main()
Add build as a possible environment option
Add build as a possible environment option
Python
apache-2.0
gogoair/foremast,gogoair/foremast
<REPLACE_OLD> choices=('dev', <REPLACE_NEW> choices=('build', 'dev', <REPLACE_END> <|endoftext|> """Add application.properties to Application's S3 Bucket directory.""" import logging import argparse from .create_archaius import init_properties LOG = logging.getLogger(__name__) def main(): """Create application.properties for a given application.""" logging.basicConfig() parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument('-d', '--debug', action='store_const', const=logging.DEBUG, default=logging.INFO, help='Set DEBUG output') parser.add_argument('-e', '--env', choices=('build', 'dev', 'stage', 'prod'), default='dev', help='Deploy environment') parser.add_argument('-g', '--group', default='extra', help='Application Group name, e.g. forrest') parser.add_argument('-a', '--app', default='unnecessary', help='Application name, e.g. forrestcore') args = parser.parse_args() LOG.setLevel(args.debug) logging.getLogger(__package__).setLevel(args.debug) vars(args).pop('debug') LOG.debug('Args: %s', vars(args)) init_properties(env=args.env, group=args.group, app=args.app) if __name__ == '__main__': main()
Add build as a possible environment option """Add application.properties to Application's S3 Bucket directory.""" import logging import argparse from .create_archaius import init_properties LOG = logging.getLogger(__name__) def main(): """Create application.properties for a given application.""" logging.basicConfig() parser = argparse.ArgumentParser(description=main.__doc__) parser.add_argument('-d', '--debug', action='store_const', const=logging.DEBUG, default=logging.INFO, help='Set DEBUG output') parser.add_argument('-e', '--env', choices=('dev', 'stage', 'prod'), default='dev', help='Deploy environment') parser.add_argument('-g', '--group', default='extra', help='Application Group name, e.g. forrest') parser.add_argument('-a', '--app', default='unnecessary', help='Application name, e.g. forrestcore') args = parser.parse_args() LOG.setLevel(args.debug) logging.getLogger(__package__).setLevel(args.debug) vars(args).pop('debug') LOG.debug('Args: %s', vars(args)) init_properties(env=args.env, group=args.group, app=args.app) if __name__ == '__main__': main()
6ce72c5b0726fc2e3ae78c6f0a22e4f03f26a2ca
erpnext/patches/v5_4/update_purchase_cost_against_project.py
erpnext/patches/v5_4/update_purchase_cost_against_project.py
# Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe def execute(): for p in frappe.get_all("Project"): project = frappe.get_doc("Project", p.name) project.update_purchase_costing() project.save()
# Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe def execute(): for p in frappe.get_all("Project", filters={"docstatus": 0}): project = frappe.get_doc("Project", p.name) project.update_purchase_costing() project.save()
Update project cost for draft project only
[fix] Update project cost for draft project only
Python
agpl-3.0
mbauskar/helpdesk-erpnext,hernad/erpnext,gangadharkadam/saloon_erp_install,mbauskar/omnitech-demo-erpnext,indictranstech/trufil-erpnext,mbauskar/helpdesk-erpnext,susuchina/ERPNEXT,njmube/erpnext,aruizramon/alec_erpnext,ShashaQin/erpnext,anandpdoshi/erpnext,pombredanne/erpnext,aruizramon/alec_erpnext,mahabuber/erpnext,gangadharkadam/saloon_erp,shft117/SteckerApp,fuhongliang/erpnext,gangadhar-kadam/helpdesk-erpnext,gangadharkadam/saloon_erp,MartinEnder/erpnext-de,gangadharkadam/contributionerp,mbauskar/omnitech-demo-erpnext,gsnbng/erpnext,SPKian/Testing2,hanselke/erpnext-1,indictranstech/biggift-erpnext,mbauskar/omnitech-demo-erpnext,mbauskar/helpdesk-erpnext,mbauskar/alec_frappe5_erpnext,mahabuber/erpnext,Aptitudetech/ERPNext,aruizramon/alec_erpnext,sagar30051991/ozsmart-erp,hernad/erpnext,SPKian/Testing2,mahabuber/erpnext,indictranstech/erpnext,mbauskar/omnitech-erpnext,njmube/erpnext,susuchina/ERPNEXT,gangadharkadam/contributionerp,mbauskar/helpdesk-erpnext,gangadharkadam/v6_erp,ShashaQin/erpnext,anandpdoshi/erpnext,gangadharkadam/saloon_erp_install,fuhongliang/erpnext,Tejal011089/huntercamp_erpnext,gangadharkadam/v6_erp,Tejal011089/huntercamp_erpnext,mbauskar/sapphire-erpnext,pombredanne/erpnext,indictranstech/biggift-erpnext,indictranstech/osmosis-erpnext,MartinEnder/erpnext-de,gmarke/erpnext,shft117/SteckerApp,hernad/erpnext,indictranstech/reciphergroup-erpnext,fuhongliang/erpnext,gsnbng/erpnext,gangadhar-kadam/helpdesk-erpnext,anandpdoshi/erpnext,sheafferusa/erpnext,mbauskar/alec_frappe5_erpnext,mahabuber/erpnext,indictranstech/biggift-erpnext,indictranstech/erpnext,gsnbng/erpnext,SPKian/Testing,gmarke/erpnext,indictranstech/osmosis-erpnext,sagar30051991/ozsmart-erp,susuchina/ERPNEXT,hatwar/buyback-erpnext,njmube/erpnext,SPKian/Testing,SPKian/Testing2,geekroot/erpnext,geekroot/erpnext,indictranstech/trufil-erpnext,SPKian/Testing2,sheafferusa/erpnext,ShashaQin/erpnext,anandpdoshi/erpnext,SPKian/Testing,ShashaQin/erpnext,gangadhar-kadam/helpdesk-erpnext,meisterkleister/erpnext,indictranstech/erpnext,hanselke/erpnext-1,indictranstech/erpnext,indictranstech/biggift-erpnext,hatwar/buyback-erpnext,gangadharkadam/contributionerp,mbauskar/omnitech-erpnext,hatwar/buyback-erpnext,sagar30051991/ozsmart-erp,njmube/erpnext,indictranstech/reciphergroup-erpnext,hernad/erpnext,gangadharkadam/saloon_erp,pombredanne/erpnext,mbauskar/alec_frappe5_erpnext,gangadharkadam/v6_erp,sheafferusa/erpnext,mbauskar/omnitech-erpnext,mbauskar/sapphire-erpnext,meisterkleister/erpnext,indictranstech/reciphergroup-erpnext,indictranstech/osmosis-erpnext,meisterkleister/erpnext,Tejal011089/huntercamp_erpnext,gangadharkadam/contributionerp,mbauskar/sapphire-erpnext,MartinEnder/erpnext-de,gmarke/erpnext,gsnbng/erpnext,mbauskar/omnitech-erpnext,susuchina/ERPNEXT,indictranstech/osmosis-erpnext,hanselke/erpnext-1,mbauskar/alec_frappe5_erpnext,hatwar/buyback-erpnext,geekroot/erpnext,sheafferusa/erpnext,gmarke/erpnext,hanselke/erpnext-1,gangadhar-kadam/helpdesk-erpnext,mbauskar/omnitech-demo-erpnext,shft117/SteckerApp,mbauskar/sapphire-erpnext,gangadharkadam/saloon_erp_install,indictranstech/reciphergroup-erpnext,gangadharkadam/saloon_erp_install,pombredanne/erpnext,aruizramon/alec_erpnext,SPKian/Testing,sagar30051991/ozsmart-erp,fuhongliang/erpnext,indictranstech/trufil-erpnext,shft117/SteckerApp,MartinEnder/erpnext-de,gangadharkadam/saloon_erp,geekroot/erpnext,meisterkleister/erpnext,Tejal011089/huntercamp_erpnext,gangadharkadam/v6_erp,indictranstech/trufil-erpnext
<REPLACE_OLD> frappe.get_all("Project"): project <REPLACE_NEW> frappe.get_all("Project", filters={"docstatus": 0}): project <REPLACE_END> <|endoftext|> # Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe def execute(): for p in frappe.get_all("Project", filters={"docstatus": 0}): project = frappe.get_doc("Project", p.name) project.update_purchase_costing() project.save()
[fix] Update project cost for draft project only # Copyright (c) 2015, Web Notes Technologies Pvt. Ltd. and Contributors # License: GNU General Public License v3. See license.txt from __future__ import unicode_literals import frappe def execute(): for p in frappe.get_all("Project"): project = frappe.get_doc("Project", p.name) project.update_purchase_costing() project.save()
5b0d308d1859920cc59e7241626472edb42c7856
djangosanetesting/testrunner.py
djangosanetesting/testrunner.py
from django.test.utils import setup_test_environment, teardown_test_environment from django.db.backends.creation import create_test_db, destroy_test_db import nose def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]): """ Run tests with nose instead of defualt test runner """ setup_test_environment() old_name = settings.DATABASE_NAME create_test_db(verbosity, autoclobber=not interactive) argv_backup = sys.argv # we have to strip script name before passing to nose sys.argv = argv_backup[0:1] config = Config(files=all_config_files(), plugins=DefaultPluginManager()) nose.run(config=config) sys.argv = argv_backup destroy_test_db(old_name, verbosity) teardown_test_environment() run_tests.__test__ = False
import sys from django.conf import settings from django.test.utils import setup_test_environment, teardown_test_environment import nose from nose.config import Config, all_config_files from nose.plugins.manager import DefaultPluginManager def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]): """ Run tests with nose instead of defualt test runner """ setup_test_environment() from django.db import connection old_name = settings.DATABASE_NAME connection.creation.create_test_db(verbosity, autoclobber=not interactive) argv_backup = sys.argv # we have to strip script name before passing to nose sys.argv = argv_backup[0:1] config = Config(files=all_config_files(), plugins=DefaultPluginManager()) nose.run(config=config) sys.argv = argv_backup connection.creation.destroy_test_db(old_name, verbosity) teardown_test_environment() #TODO: return len(result.failures) + len(result.errors) run_tests.__test__ = False
Use database connection instead of old-style functions
Use database connection instead of old-style functions
Python
bsd-3-clause
Almad/django-sane-testing
<REPLACE_OLD> from <REPLACE_NEW> import sys from django.conf import settings from <REPLACE_END> <REPLACE_OLD> teardown_test_environment from django.db.backends.creation <REPLACE_NEW> teardown_test_environment import nose from nose.config <REPLACE_END> <REPLACE_OLD> create_test_db, destroy_test_db import nose def <REPLACE_NEW> Config, all_config_files from nose.plugins.manager import DefaultPluginManager def <REPLACE_END> <REPLACE_OLD> setup_test_environment() <REPLACE_NEW> setup_test_environment() from django.db import connection <REPLACE_END> <REPLACE_OLD> create_test_db(verbosity, <REPLACE_NEW> connection.creation.create_test_db(verbosity, <REPLACE_END> <REPLACE_OLD> sys.argv <REPLACE_NEW> sys.argv <REPLACE_END> <REPLACE_OLD> plugins=DefaultPluginManager()) <REPLACE_NEW> plugins=DefaultPluginManager()) <REPLACE_END> <REPLACE_OLD> nose.run(config=config) <REPLACE_NEW> nose.run(config=config) <REPLACE_END> <REPLACE_OLD> destroy_test_db(old_name, <REPLACE_NEW> connection.creation.destroy_test_db(old_name, <REPLACE_END> <REPLACE_OLD> teardown_test_environment() run_tests.__test__ <REPLACE_NEW> teardown_test_environment() #TODO: return len(result.failures) + len(result.errors) run_tests.__test__ <REPLACE_END> <|endoftext|> import sys from django.conf import settings from django.test.utils import setup_test_environment, teardown_test_environment import nose from nose.config import Config, all_config_files from nose.plugins.manager import DefaultPluginManager def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]): """ Run tests with nose instead of defualt test runner """ setup_test_environment() from django.db import connection old_name = settings.DATABASE_NAME connection.creation.create_test_db(verbosity, autoclobber=not interactive) argv_backup = sys.argv # we have to strip script name before passing to nose sys.argv = argv_backup[0:1] config = Config(files=all_config_files(), plugins=DefaultPluginManager()) nose.run(config=config) sys.argv = argv_backup connection.creation.destroy_test_db(old_name, verbosity) teardown_test_environment() #TODO: return len(result.failures) + len(result.errors) run_tests.__test__ = False
Use database connection instead of old-style functions from django.test.utils import setup_test_environment, teardown_test_environment from django.db.backends.creation import create_test_db, destroy_test_db import nose def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]): """ Run tests with nose instead of defualt test runner """ setup_test_environment() old_name = settings.DATABASE_NAME create_test_db(verbosity, autoclobber=not interactive) argv_backup = sys.argv # we have to strip script name before passing to nose sys.argv = argv_backup[0:1] config = Config(files=all_config_files(), plugins=DefaultPluginManager()) nose.run(config=config) sys.argv = argv_backup destroy_test_db(old_name, verbosity) teardown_test_environment() run_tests.__test__ = False
b211306824db0a10a79cdab4153c457813b44bca
linter.py
linter.py
# # linter.py # Markdown Linter for SublimeLinter, a code checking framework # for Sublime Text 3 # # Written by Jon LaBelle # Copyright (c) 2018 Jon LaBelle # # License: MIT # """This module exports the Markdownlint plugin class.""" from SublimeLinter.lint import NodeLinter, util class MarkdownLint(NodeLinter): """Provides an interface to markdownlint.""" syntax = ('markdown', 'markdown gfm', 'multimarkdown', 'markdown extended') cmd = ('markdownlint', '${args}', '${file}') npm_name = 'markdownlint' config_file = ('--config', '.markdownlintrc') version_args = '--version' version_re = r'(?P<version>\d+\.\d+\.\d+)' version_requirement = '>= 0.6.0' check_version = True regex = r'.+?[:]\s(?P<line>\d+)[:]\s(?P<error>MD\d+)?[/]?(?P<message>.+)' multiline = False line_col_base = (1, 1) tempfile_suffix = '-' error_stream = util.STREAM_STDERR word_re = None comment_re = r'\s*/[/*]'
# # linter.py # Markdown Linter for SublimeLinter, a code checking framework # for Sublime Text 3 # # Written by Jon LaBelle # Copyright (c) 2018 Jon LaBelle # # License: MIT # """This module exports the Markdownlint plugin class.""" from SublimeLinter.lint import NodeLinter, util class MarkdownLint(NodeLinter): """Provides an interface to markdownlint.""" syntax = ('markdown', 'markdown gfm', 'multimarkdown', 'markdown extended') cmd = ('markdownlint', '${args}', '${file}') npm_name = 'markdownlint' config_file = ('--config', '.markdownlintrc') regex = r'.+?[:]\s(?P<line>\d+)[:]\s(?P<error>MD\d+)?[/]?(?P<message>.+)' multiline = False line_col_base = (1, 1) tempfile_suffix = '-' error_stream = util.STREAM_STDERR word_re = None comment_re = r'\s*/[/*]'
Remove deprecated version requirement settings
Remove deprecated version requirement settings Linter plugins can no longer set version requirements. https://github.com/SublimeLinter/SublimeLinter/issues/1087
Python
mit
jonlabelle/SublimeLinter-contrib-markdownlint,jonlabelle/SublimeLinter-contrib-markdownlint
<DELETE> version_args = '--version' version_re = r'(?P<version>\d+\.\d+\.\d+)' version_requirement = '>= 0.6.0' check_version = True <DELETE_END> <|endoftext|> # # linter.py # Markdown Linter for SublimeLinter, a code checking framework # for Sublime Text 3 # # Written by Jon LaBelle # Copyright (c) 2018 Jon LaBelle # # License: MIT # """This module exports the Markdownlint plugin class.""" from SublimeLinter.lint import NodeLinter, util class MarkdownLint(NodeLinter): """Provides an interface to markdownlint.""" syntax = ('markdown', 'markdown gfm', 'multimarkdown', 'markdown extended') cmd = ('markdownlint', '${args}', '${file}') npm_name = 'markdownlint' config_file = ('--config', '.markdownlintrc') regex = r'.+?[:]\s(?P<line>\d+)[:]\s(?P<error>MD\d+)?[/]?(?P<message>.+)' multiline = False line_col_base = (1, 1) tempfile_suffix = '-' error_stream = util.STREAM_STDERR word_re = None comment_re = r'\s*/[/*]'
Remove deprecated version requirement settings Linter plugins can no longer set version requirements. https://github.com/SublimeLinter/SublimeLinter/issues/1087 # # linter.py # Markdown Linter for SublimeLinter, a code checking framework # for Sublime Text 3 # # Written by Jon LaBelle # Copyright (c) 2018 Jon LaBelle # # License: MIT # """This module exports the Markdownlint plugin class.""" from SublimeLinter.lint import NodeLinter, util class MarkdownLint(NodeLinter): """Provides an interface to markdownlint.""" syntax = ('markdown', 'markdown gfm', 'multimarkdown', 'markdown extended') cmd = ('markdownlint', '${args}', '${file}') npm_name = 'markdownlint' config_file = ('--config', '.markdownlintrc') version_args = '--version' version_re = r'(?P<version>\d+\.\d+\.\d+)' version_requirement = '>= 0.6.0' check_version = True regex = r'.+?[:]\s(?P<line>\d+)[:]\s(?P<error>MD\d+)?[/]?(?P<message>.+)' multiline = False line_col_base = (1, 1) tempfile_suffix = '-' error_stream = util.STREAM_STDERR word_re = None comment_re = r'\s*/[/*]'
1fd87ad0cab5d45602192c83681340d5da27a6db
examples/custom_context.py
examples/custom_context.py
import random import discord from discord.ext import commands class MyContext(commands.Context): async def tick(self, value): # reacts to the message with an emoji # depending on whether value is True or False # if its True, it'll add a green check mark # otherwise, it'll add a red cross mark emoji = '\N{WHITE HEAVY CHECK MARK}' if value else '\N{CROSS MARK}' try: # this will react to the command author's message await self.message.add_reaction(emoji) except discord.HTTPException: # sometimes errors occur during this, for example # maybe you dont have permission to do that # we dont mind, so we can just ignore them pass class MyBot(commands.Bot): async def get_context(self, message, *, cls=MyContext): # when you override this method, you pass your new Context # subclass to the super() method, which tells the bot to # use the new MyContext class return await super().get_context(message, cls=cls) bot = MyBot(command_prefix='!') @bot.command() async def guess(ctx, number: int): """ Guess a random number from 1 to 6. """ # explained in a previous example, this gives you # a random number from 1-6 value = random.randint(1, 6) # with your new helper function, you can add a # green check mark if the guess was correct, # or a red cross mark if it wasnt await ctx.tick(number == value) # important: you shouldnt hard code your token # these are very important, and leaking them can # let people do very malicious things with your # bot. try to use a file or something to keep # them private, and dont commit it to GitHub token = "your token here" bot.run(token)
Add example on subclassing commands.Context
Add example on subclassing commands.Context
Python
mit
Rapptz/discord.py,Harmon758/discord.py,khazhyk/discord.py,rapptz/discord.py,Harmon758/discord.py
<REPLACE_OLD> <REPLACE_NEW> import random import discord from discord.ext import commands class MyContext(commands.Context): async def tick(self, value): # reacts to the message with an emoji # depending on whether value is True or False # if its True, it'll add a green check mark # otherwise, it'll add a red cross mark emoji = '\N{WHITE HEAVY CHECK MARK}' if value else '\N{CROSS MARK}' try: # this will react to the command author's message await self.message.add_reaction(emoji) except discord.HTTPException: # sometimes errors occur during this, for example # maybe you dont have permission to do that # we dont mind, so we can just ignore them pass class MyBot(commands.Bot): async def get_context(self, message, *, cls=MyContext): # when you override this method, you pass your new Context # subclass to the super() method, which tells the bot to # use the new MyContext class return await super().get_context(message, cls=cls) bot = MyBot(command_prefix='!') @bot.command() async def guess(ctx, number: int): """ Guess a random number from 1 to 6. """ # explained in a previous example, this gives you # a random number from 1-6 value = random.randint(1, 6) # with your new helper function, you can add a # green check mark if the guess was correct, # or a red cross mark if it wasnt await ctx.tick(number == value) # important: you shouldnt hard code your token # these are very important, and leaking them can # let people do very malicious things with your # bot. try to use a file or something to keep # them private, and dont commit it to GitHub token = "your token here" bot.run(token) <REPLACE_END> <|endoftext|> import random import discord from discord.ext import commands class MyContext(commands.Context): async def tick(self, value): # reacts to the message with an emoji # depending on whether value is True or False # if its True, it'll add a green check mark # otherwise, it'll add a red cross mark emoji = '\N{WHITE HEAVY CHECK MARK}' if value else '\N{CROSS MARK}' try: # this will react to the command author's message await self.message.add_reaction(emoji) except discord.HTTPException: # sometimes errors occur during this, for example # maybe you dont have permission to do that # we dont mind, so we can just ignore them pass class MyBot(commands.Bot): async def get_context(self, message, *, cls=MyContext): # when you override this method, you pass your new Context # subclass to the super() method, which tells the bot to # use the new MyContext class return await super().get_context(message, cls=cls) bot = MyBot(command_prefix='!') @bot.command() async def guess(ctx, number: int): """ Guess a random number from 1 to 6. """ # explained in a previous example, this gives you # a random number from 1-6 value = random.randint(1, 6) # with your new helper function, you can add a # green check mark if the guess was correct, # or a red cross mark if it wasnt await ctx.tick(number == value) # important: you shouldnt hard code your token # these are very important, and leaking them can # let people do very malicious things with your # bot. try to use a file or something to keep # them private, and dont commit it to GitHub token = "your token here" bot.run(token)
Add example on subclassing commands.Context
f7aa9e986abd9fb55cb72ac4661f319a867e059d
scripts/ms_jsfs.py
scripts/ms_jsfs.py
#!/usr/bin/env python """ Convert the output of ms to an N-dimensional frequency spectrum. """ import sys import dadi if __name__ == '__main__': average = ('-av' in sys.argv) input = sys.stdin output = sys.stdout sfs,header = dadi.Spectrum.from_ms_file(input, average, mask_corners=True, return_header=True) dadi.IO.sfs_to_file(sfs, output, comment_lines=header)
Rename so we can import it and thus get useful epydoc documentation.
Rename so we can import it and thus get useful epydoc documentation. git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@132 979d6bd5-6d4d-0410-bece-f567c23bd345
Python
bsd-3-clause
beni55/dadi,cheese1213/dadi,paulirish/dadi,yangjl/dadi,ChenHsiang/dadi,cheese1213/dadi,RyanGutenkunst/dadi,paulirish/dadi,niuhuifei/dadi,beni55/dadi,ChenHsiang/dadi,yangjl/dadi,niuhuifei/dadi,RyanGutenkunst/dadi
<INSERT> #!/usr/bin/env python """ Convert the output of ms to an N-dimensional frequency spectrum. """ import sys import dadi if __name__ == '__main__': <INSERT_END> <INSERT> average = ('-av' in sys.argv) input = sys.stdin output = sys.stdout sfs,header = dadi.Spectrum.from_ms_file(input, average, mask_corners=True, return_header=True) dadi.IO.sfs_to_file(sfs, output, comment_lines=header) <INSERT_END> <|endoftext|> #!/usr/bin/env python """ Convert the output of ms to an N-dimensional frequency spectrum. """ import sys import dadi if __name__ == '__main__': average = ('-av' in sys.argv) input = sys.stdin output = sys.stdout sfs,header = dadi.Spectrum.from_ms_file(input, average, mask_corners=True, return_header=True) dadi.IO.sfs_to_file(sfs, output, comment_lines=header)
Rename so we can import it and thus get useful epydoc documentation. git-svn-id: 4c7b13231a96299fde701bb5dec4bd2aaf383fc6@132 979d6bd5-6d4d-0410-bece-f567c23bd345
db6dafeabdade2cc8f2e14be3ed06938d3dff644
tests/test_classes.py
tests/test_classes.py
import unittest from classes import Paladin from models.spells.loader import load_paladin_spells_for_level class PaladinTests(unittest.TestCase): def setUp(self): self.name = "Netherblood" self.level = 3 self.dummy = Paladin(name=self.name, level=self.level, health=100, mana=100, strength=10) def test_init(self): """ The __init__ should load/save all the spells for the Paladin""" spells = [spell for level in range(1,self.level+1) for spell in load_paladin_spells_for_level(level)] self.assertNotEqual(len(self.dummy.learned_spells), 0) for spell in spells: self.assertIn(spell.name, self.dummy.learned_spells) char_spell = self.dummy.learned_spells[spell.name] # find the largest rank in our spells list (the char has the highest rank only) max_rank = list(sorted(filter(lambda x: x.name == spell.name, spells), key=lambda x: x.rank))[-1].rank self.assertEqual(char_spell.rank, max_rank) if __name__ == '__main__': unittest.main()
Test for the __init__ function of the Paladin class
Test for the __init__ function of the Paladin class
Python
mit
Enether/python_wow
<REPLACE_OLD> <REPLACE_NEW> import unittest from classes import Paladin from models.spells.loader import load_paladin_spells_for_level class PaladinTests(unittest.TestCase): def setUp(self): self.name = "Netherblood" self.level = 3 self.dummy = Paladin(name=self.name, level=self.level, health=100, mana=100, strength=10) def test_init(self): """ The __init__ should load/save all the spells for the Paladin""" spells = [spell for level in range(1,self.level+1) for spell in load_paladin_spells_for_level(level)] self.assertNotEqual(len(self.dummy.learned_spells), 0) for spell in spells: self.assertIn(spell.name, self.dummy.learned_spells) char_spell = self.dummy.learned_spells[spell.name] # find the largest rank in our spells list (the char has the highest rank only) max_rank = list(sorted(filter(lambda x: x.name == spell.name, spells), key=lambda x: x.rank))[-1].rank self.assertEqual(char_spell.rank, max_rank) if __name__ == '__main__': unittest.main() <REPLACE_END> <|endoftext|> import unittest from classes import Paladin from models.spells.loader import load_paladin_spells_for_level class PaladinTests(unittest.TestCase): def setUp(self): self.name = "Netherblood" self.level = 3 self.dummy = Paladin(name=self.name, level=self.level, health=100, mana=100, strength=10) def test_init(self): """ The __init__ should load/save all the spells for the Paladin""" spells = [spell for level in range(1,self.level+1) for spell in load_paladin_spells_for_level(level)] self.assertNotEqual(len(self.dummy.learned_spells), 0) for spell in spells: self.assertIn(spell.name, self.dummy.learned_spells) char_spell = self.dummy.learned_spells[spell.name] # find the largest rank in our spells list (the char has the highest rank only) max_rank = list(sorted(filter(lambda x: x.name == spell.name, spells), key=lambda x: x.rank))[-1].rank self.assertEqual(char_spell.rank, max_rank) if __name__ == '__main__': unittest.main()
Test for the __init__ function of the Paladin class
6e58e0fbf059f137bfcfef070968191f2ea42655
tests/libpeas/plugins/extension-python/extension-python.py
tests/libpeas/plugins/extension-python/extension-python.py
# -*- coding: utf-8 -*- # ex:set ts=4 et sw=4 ai: from gi.repository import GObject, Introspection, Peas class ExtensionPythonPlugin(GObject.Object, Peas.Activatable, Introspection.Base, Introspection.Callable, Introspection.PropertiesPrerequisite, Introspection.Properties, Introspection.HasPrerequisite): object = GObject.property(type=GObject.Object) construct_only = GObject.property(type=str) read_only = GObject.property(type=str, default="read-only") write_only = GObject.property(type=str) readwrite = GObject.property(type=str, default="readwrite") prerequisite = GObject.property(type=str) def do_activate(self): pass def do_deactivate(self): pass def do_update_state(self): pass def do_get_plugin_info(self): return self.plugin_info def do_get_settings(self): return self.plugin_info.get_settings(None) def do_call_with_return(self): return "Hello, World!"; def do_call_no_args(self): pass def do_call_single_arg(self): return True def do_call_multi_args(self, in_, inout): return (inout, in_)
# -*- coding: utf-8 -*- # ex:set ts=4 et sw=4 ai: from gi.repository import GObject, Introspection, Peas class ExtensionPythonPlugin(GObject.Object, Peas.Activatable, Introspection.Base, Introspection.Callable, Introspection.PropertiesPrerequisite, Introspection.Properties, Introspection.HasPrerequisite): object = GObject.property(type=GObject.Object) construct_only = GObject.property(type=str) read_only = GObject.property(type=str, default="read-only") write_only = GObject.property(type=str) readwrite = GObject.property(type=str, default="readwrite") prerequisite = GObject.property(type=str) def do_activate(self): pass def do_deactivate(self): pass def do_update_state(self): pass def do_get_plugin_info(self): return self.plugin_info def do_get_settings(self): return self.plugin_info.get_settings(None) def do_call_with_return(self): return "Hello, World!" def do_call_no_args(self): pass def do_call_single_arg(self): return True def do_call_multi_args(self, in_, inout): return (inout, in_)
Fix style issues in python test plugin
Fix style issues in python test plugin https://bugzilla.gnome.org/show_bug.cgi?id=678339
Python
lgpl-2.1
Distrotech/libpeas,chergert/libpeas,gregier/libpeas,chergert/libpeas,GNOME/libpeas,gregier/libpeas,gregier/libpeas,gregier/libpeas,Distrotech/libpeas,Distrotech/libpeas,GNOME/libpeas,chergert/libpeas
<REPLACE_OLD> pass <REPLACE_NEW> pass <REPLACE_END> <REPLACE_OLD> World!"; <REPLACE_NEW> World!" <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- # ex:set ts=4 et sw=4 ai: from gi.repository import GObject, Introspection, Peas class ExtensionPythonPlugin(GObject.Object, Peas.Activatable, Introspection.Base, Introspection.Callable, Introspection.PropertiesPrerequisite, Introspection.Properties, Introspection.HasPrerequisite): object = GObject.property(type=GObject.Object) construct_only = GObject.property(type=str) read_only = GObject.property(type=str, default="read-only") write_only = GObject.property(type=str) readwrite = GObject.property(type=str, default="readwrite") prerequisite = GObject.property(type=str) def do_activate(self): pass def do_deactivate(self): pass def do_update_state(self): pass def do_get_plugin_info(self): return self.plugin_info def do_get_settings(self): return self.plugin_info.get_settings(None) def do_call_with_return(self): return "Hello, World!" def do_call_no_args(self): pass def do_call_single_arg(self): return True def do_call_multi_args(self, in_, inout): return (inout, in_)
Fix style issues in python test plugin https://bugzilla.gnome.org/show_bug.cgi?id=678339 # -*- coding: utf-8 -*- # ex:set ts=4 et sw=4 ai: from gi.repository import GObject, Introspection, Peas class ExtensionPythonPlugin(GObject.Object, Peas.Activatable, Introspection.Base, Introspection.Callable, Introspection.PropertiesPrerequisite, Introspection.Properties, Introspection.HasPrerequisite): object = GObject.property(type=GObject.Object) construct_only = GObject.property(type=str) read_only = GObject.property(type=str, default="read-only") write_only = GObject.property(type=str) readwrite = GObject.property(type=str, default="readwrite") prerequisite = GObject.property(type=str) def do_activate(self): pass def do_deactivate(self): pass def do_update_state(self): pass def do_get_plugin_info(self): return self.plugin_info def do_get_settings(self): return self.plugin_info.get_settings(None) def do_call_with_return(self): return "Hello, World!"; def do_call_no_args(self): pass def do_call_single_arg(self): return True def do_call_multi_args(self, in_, inout): return (inout, in_)
af566e0cd0958dadd0de55d51b50ad026a8f2b99
build_android_prepare.py
build_android_prepare.py
"""Configuration for the Caffe2 installation. """ from build import Config import sys Config.USE_SYSTEM_PROTOBUF = False Config.PROTOC_BINARY = 'gen/third_party/google/protoc' Config.USE_OPENMP = False if __name__ == '__main__': from brewtool.brewery import Brewery Brewery.Run( Config, ['build_android_prepare.py', 'build', '//third_party/google:protoc']) else: print('This script is not intended to be used as an imported module.') sys.exit(1)
Add a simple script to help build android.
Add a simple script to help build android.
Python
apache-2.0
davinwang/caffe2,sf-wind/caffe2,davinwang/caffe2,bwasti/caffe2,pietern/caffe2,xzturn/caffe2,davinwang/caffe2,sf-wind/caffe2,bwasti/caffe2,davinwang/caffe2,bwasti/caffe2,bwasti/caffe2,sf-wind/caffe2,caffe2/caffe2,pietern/caffe2,pietern/caffe2,Yangqing/caffe2,xzturn/caffe2,Yangqing/caffe2,davinwang/caffe2,Yangqing/caffe2,xzturn/caffe2,bwasti/caffe2,sf-wind/caffe2,Yangqing/caffe2,xzturn/caffe2,pietern/caffe2,xzturn/caffe2,sf-wind/caffe2,Yangqing/caffe2,pietern/caffe2
<INSERT> """Configuration for the Caffe2 installation. """ from build import Config import sys Config.USE_SYSTEM_PROTOBUF = False Config.PROTOC_BINARY = 'gen/third_party/google/protoc' Config.USE_OPENMP = False if __name__ == '__main__': <INSERT_END> <INSERT> from brewtool.brewery import Brewery Brewery.Run( Config, ['build_android_prepare.py', 'build', '//third_party/google:protoc']) else: print('This script is not intended to be used as an imported module.') sys.exit(1) <INSERT_END> <|endoftext|> """Configuration for the Caffe2 installation. """ from build import Config import sys Config.USE_SYSTEM_PROTOBUF = False Config.PROTOC_BINARY = 'gen/third_party/google/protoc' Config.USE_OPENMP = False if __name__ == '__main__': from brewtool.brewery import Brewery Brewery.Run( Config, ['build_android_prepare.py', 'build', '//third_party/google:protoc']) else: print('This script is not intended to be used as an imported module.') sys.exit(1)
Add a simple script to help build android.
2de823ae11e1337f114457bf4e49275d8d2eda99
recursive_binary_search.py
recursive_binary_search.py
def binary_search(array, low, high, item): if(low>high) : return -1 mid = (low + high)//2 if(item == array[mid]): return mid elif item < array[mid]: return binary_search(array, low, mid-1, item) elif item > array[mid]: return binary_search(array, mid+1, high, item) print ( binary_search([1,4,5,7], 0, 3, 5)) #2 print ( binary_search([1,4,5,7], 0, 3, 10)) #-1
Add recursive binary search implementation
Add recursive binary search implementation
Python
mit
arafat-al-mahmud/algorithms-python
<INSERT> def binary_search(array, low, high, item): <INSERT_END> <INSERT> if(low>high) : return -1 mid = (low + high)//2 if(item == array[mid]): return mid elif item < array[mid]: return binary_search(array, low, mid-1, item) elif item > array[mid]: return binary_search(array, mid+1, high, item) print ( binary_search([1,4,5,7], 0, 3, 5)) #2 print ( binary_search([1,4,5,7], 0, 3, 10)) #-1 <INSERT_END> <|endoftext|> def binary_search(array, low, high, item): if(low>high) : return -1 mid = (low + high)//2 if(item == array[mid]): return mid elif item < array[mid]: return binary_search(array, low, mid-1, item) elif item > array[mid]: return binary_search(array, mid+1, high, item) print ( binary_search([1,4,5,7], 0, 3, 5)) #2 print ( binary_search([1,4,5,7], 0, 3, 10)) #-1
Add recursive binary search implementation
3e54119f07b0fdcbbe556e86de3c161a3eb20ddf
mwikiircbot.py
mwikiircbot.py
import ircbotframe import sys class Handler: def __init__(self, host, port=6667, name="MediaWiki", description="MediaWiki recent changes bot", channels=[]): self.channels = channels self.bot = ircbotframe.ircBot(host, port, name, description) self.bot.bind("376", self.endMOTD) self.bot.start() def endMOTD(self, sender, headers, message): for chan in self.channels: bot.joinchan(chan) def main(cmd, args): if len(args) < 1: print("Usage: `" + cmd + " <host> <channel> [<channel> ...]` (for full arguments, see the readme)") return else: Handler(host=args[0]) if __name__ == "__main__": if __name__ == '__main__': main(sys.argv[0], sys.argv[1:] if len(sys.argv) > 1 else [])
import ircbotframe import sys class Handler: def __init__(self, host, port=6667, name="MediaWiki", description="MediaWiki recent changes bot", channels=[]): self.channels = channels self.bot = ircbotframe.ircBot(host, port, name, description) self.bot.bind("376", self.endMOTD) self.bot.start() def endMOTD(self, sender, headers, message): for chan in self.channels: self.bot.joinchan(chan) def main(cmd, args): if len(args) < 2: print("Usage: " + cmd + " <host> <channel> [<channel> ...]") return elif len(args) > 1: Handler(host=args[0], channels=args[1:]) if __name__ == "__main__": if __name__ == '__main__': main(sys.argv[0], sys.argv[1:] if len(sys.argv) > 1 else [])
Fix bot not joining any channels
Fix bot not joining any channels Also removed unnecessary usage comment.
Python
mit
fenhl/mwikiircbot
<REPLACE_OLD> bot.joinchan(chan) def <REPLACE_NEW> self.bot.joinchan(chan) def <REPLACE_END> <REPLACE_OLD> 1: <REPLACE_NEW> 2: <REPLACE_END> <REPLACE_OLD> `" <REPLACE_NEW> " <REPLACE_END> <REPLACE_OLD> ...]` (for full arguments, see the readme)") <REPLACE_NEW> ...]") <REPLACE_END> <REPLACE_OLD> else: <REPLACE_NEW> elif len(args) > 1: <REPLACE_END> <REPLACE_OLD> Handler(host=args[0]) if <REPLACE_NEW> Handler(host=args[0], channels=args[1:]) if <REPLACE_END> <|endoftext|> import ircbotframe import sys class Handler: def __init__(self, host, port=6667, name="MediaWiki", description="MediaWiki recent changes bot", channels=[]): self.channels = channels self.bot = ircbotframe.ircBot(host, port, name, description) self.bot.bind("376", self.endMOTD) self.bot.start() def endMOTD(self, sender, headers, message): for chan in self.channels: self.bot.joinchan(chan) def main(cmd, args): if len(args) < 2: print("Usage: " + cmd + " <host> <channel> [<channel> ...]") return elif len(args) > 1: Handler(host=args[0], channels=args[1:]) if __name__ == "__main__": if __name__ == '__main__': main(sys.argv[0], sys.argv[1:] if len(sys.argv) > 1 else [])
Fix bot not joining any channels Also removed unnecessary usage comment. import ircbotframe import sys class Handler: def __init__(self, host, port=6667, name="MediaWiki", description="MediaWiki recent changes bot", channels=[]): self.channels = channels self.bot = ircbotframe.ircBot(host, port, name, description) self.bot.bind("376", self.endMOTD) self.bot.start() def endMOTD(self, sender, headers, message): for chan in self.channels: bot.joinchan(chan) def main(cmd, args): if len(args) < 1: print("Usage: `" + cmd + " <host> <channel> [<channel> ...]` (for full arguments, see the readme)") return else: Handler(host=args[0]) if __name__ == "__main__": if __name__ == '__main__': main(sys.argv[0], sys.argv[1:] if len(sys.argv) > 1 else [])
fde47133da8c5157f2cae04abb77eccbace6c831
netbox/netbox/forms.py
netbox/netbox/forms.py
from __future__ import unicode_literals from django import forms from utilities.forms import BootstrapMixin OBJ_TYPE_CHOICES = ( ('', 'All Objects'), ('Circuits', ( ('provider', 'Providers'), ('circuit', 'Circuits'), )), ('DCIM', ( ('site', 'Sites'), ('rack', 'Racks'), ('devicetype', 'Device types'), ('device', 'Devices'), )), ('IPAM', ( ('vrf', 'VRFs'), ('aggregate', 'Aggregates'), ('prefix', 'Prefixes'), ('ipaddress', 'IP addresses'), ('vlan', 'VLANs'), )), ('Secrets', ( ('secret', 'Secrets'), )), ('Tenancy', ( ('tenant', 'Tenants'), )), ) class SearchForm(BootstrapMixin, forms.Form): q = forms.CharField( label='Query', widget=forms.TextInput(attrs={'style': 'width: 350px'}) ) obj_type = forms.ChoiceField( choices=OBJ_TYPE_CHOICES, required=False, label='Type' )
from __future__ import unicode_literals from django import forms from utilities.forms import BootstrapMixin OBJ_TYPE_CHOICES = ( ('', 'All Objects'), ('Circuits', ( ('provider', 'Providers'), ('circuit', 'Circuits'), )), ('DCIM', ( ('site', 'Sites'), ('rack', 'Racks'), ('devicetype', 'Device types'), ('device', 'Devices'), )), ('IPAM', ( ('vrf', 'VRFs'), ('aggregate', 'Aggregates'), ('prefix', 'Prefixes'), ('ipaddress', 'IP addresses'), ('vlan', 'VLANs'), )), ('Secrets', ( ('secret', 'Secrets'), )), ('Tenancy', ( ('tenant', 'Tenants'), )), ) class SearchForm(BootstrapMixin, forms.Form): q = forms.CharField( label='Search', widget=forms.TextInput(attrs={'style': 'width: 350px'}) ) obj_type = forms.ChoiceField( choices=OBJ_TYPE_CHOICES, required=False, label='Type' )
Fix global search placeholder text
Fix global search placeholder text
Python
apache-2.0
digitalocean/netbox,digitalocean/netbox,digitalocean/netbox,lampwins/netbox,lampwins/netbox,lampwins/netbox,digitalocean/netbox,lampwins/netbox
<REPLACE_OLD> label='Query', <REPLACE_NEW> label='Search', <REPLACE_END> <|endoftext|> from __future__ import unicode_literals from django import forms from utilities.forms import BootstrapMixin OBJ_TYPE_CHOICES = ( ('', 'All Objects'), ('Circuits', ( ('provider', 'Providers'), ('circuit', 'Circuits'), )), ('DCIM', ( ('site', 'Sites'), ('rack', 'Racks'), ('devicetype', 'Device types'), ('device', 'Devices'), )), ('IPAM', ( ('vrf', 'VRFs'), ('aggregate', 'Aggregates'), ('prefix', 'Prefixes'), ('ipaddress', 'IP addresses'), ('vlan', 'VLANs'), )), ('Secrets', ( ('secret', 'Secrets'), )), ('Tenancy', ( ('tenant', 'Tenants'), )), ) class SearchForm(BootstrapMixin, forms.Form): q = forms.CharField( label='Search', widget=forms.TextInput(attrs={'style': 'width: 350px'}) ) obj_type = forms.ChoiceField( choices=OBJ_TYPE_CHOICES, required=False, label='Type' )
Fix global search placeholder text from __future__ import unicode_literals from django import forms from utilities.forms import BootstrapMixin OBJ_TYPE_CHOICES = ( ('', 'All Objects'), ('Circuits', ( ('provider', 'Providers'), ('circuit', 'Circuits'), )), ('DCIM', ( ('site', 'Sites'), ('rack', 'Racks'), ('devicetype', 'Device types'), ('device', 'Devices'), )), ('IPAM', ( ('vrf', 'VRFs'), ('aggregate', 'Aggregates'), ('prefix', 'Prefixes'), ('ipaddress', 'IP addresses'), ('vlan', 'VLANs'), )), ('Secrets', ( ('secret', 'Secrets'), )), ('Tenancy', ( ('tenant', 'Tenants'), )), ) class SearchForm(BootstrapMixin, forms.Form): q = forms.CharField( label='Query', widget=forms.TextInput(attrs={'style': 'width: 350px'}) ) obj_type = forms.ChoiceField( choices=OBJ_TYPE_CHOICES, required=False, label='Type' )
27a0226ec444523034d739a00a999b089ce116ba
enthought/chaco/tools/api.py
enthought/chaco/tools/api.py
from better_zoom import BetterZoom from better_selecting_zoom import BetterSelectingZoom from broadcaster import BroadcasterTool from dataprinter import DataPrinter from data_label_tool import DataLabelTool from drag_zoom import DragZoom from enthought.enable.tools.drag_tool import DragTool from draw_points_tool import DrawPointsTool from highlight_tool import HighlightTool from image_inspector_tool import ImageInspectorTool, ImageInspectorOverlay from lasso_selection import LassoSelection from legend_tool import LegendTool from legend_highlighter import LegendHighlighter from line_inspector import LineInspector from line_segment_tool import LineSegmentTool from move_tool import MoveTool from pan_tool import PanTool from point_marker import PointMarker from range_selection import RangeSelection from range_selection_2d import RangeSelection2D from range_selection_overlay import RangeSelectionOverlay from regression_lasso import RegressionLasso, RegressionOverlay from save_tool import SaveTool from scatter_inspector import ScatterInspector from select_tool import SelectTool from simple_inspector import SimpleInspectorTool from tracking_pan_tool import TrackingPanTool from tracking_zoom import TrackingZoom from traits_tool import TraitsTool from zoom_tool import ZoomTool # EOF
from better_zoom import BetterZoom from better_selecting_zoom import BetterSelectingZoom from broadcaster import BroadcasterTool from dataprinter import DataPrinter from data_label_tool import DataLabelTool from enthought.enable.tools.drag_tool import DragTool from draw_points_tool import DrawPointsTool from highlight_tool import HighlightTool from image_inspector_tool import ImageInspectorTool, ImageInspectorOverlay from lasso_selection import LassoSelection from legend_tool import LegendTool from legend_highlighter import LegendHighlighter from line_inspector import LineInspector from line_segment_tool import LineSegmentTool from move_tool import MoveTool from pan_tool import PanTool from point_marker import PointMarker from range_selection import RangeSelection from range_selection_2d import RangeSelection2D from range_selection_overlay import RangeSelectionOverlay from regression_lasso import RegressionLasso, RegressionOverlay from save_tool import SaveTool from scatter_inspector import ScatterInspector from select_tool import SelectTool from simple_inspector import SimpleInspectorTool from tracking_pan_tool import TrackingPanTool from tracking_zoom import TrackingZoom from traits_tool import TraitsTool from zoom_tool import ZoomTool # EOF
Remove deprecated DragZoom from Chaco tools API to eliminate irrelevant BaseZoomTool deprecation warning. DragZoom is still used in 4 Chaco examples
[Chaco] Remove deprecated DragZoom from Chaco tools API to eliminate irrelevant BaseZoomTool deprecation warning. DragZoom is still used in 4 Chaco examples
Python
bsd-3-clause
ContinuumIO/chaco,tommy-u/chaco,tommy-u/chaco,ContinuumIO/chaco,tommy-u/chaco,ContinuumIO/chaco,burnpanck/chaco,burnpanck/chaco,ContinuumIO/chaco,burnpanck/chaco
<DELETE> drag_zoom import DragZoom from <DELETE_END> <|endoftext|> from better_zoom import BetterZoom from better_selecting_zoom import BetterSelectingZoom from broadcaster import BroadcasterTool from dataprinter import DataPrinter from data_label_tool import DataLabelTool from enthought.enable.tools.drag_tool import DragTool from draw_points_tool import DrawPointsTool from highlight_tool import HighlightTool from image_inspector_tool import ImageInspectorTool, ImageInspectorOverlay from lasso_selection import LassoSelection from legend_tool import LegendTool from legend_highlighter import LegendHighlighter from line_inspector import LineInspector from line_segment_tool import LineSegmentTool from move_tool import MoveTool from pan_tool import PanTool from point_marker import PointMarker from range_selection import RangeSelection from range_selection_2d import RangeSelection2D from range_selection_overlay import RangeSelectionOverlay from regression_lasso import RegressionLasso, RegressionOverlay from save_tool import SaveTool from scatter_inspector import ScatterInspector from select_tool import SelectTool from simple_inspector import SimpleInspectorTool from tracking_pan_tool import TrackingPanTool from tracking_zoom import TrackingZoom from traits_tool import TraitsTool from zoom_tool import ZoomTool # EOF
[Chaco] Remove deprecated DragZoom from Chaco tools API to eliminate irrelevant BaseZoomTool deprecation warning. DragZoom is still used in 4 Chaco examples from better_zoom import BetterZoom from better_selecting_zoom import BetterSelectingZoom from broadcaster import BroadcasterTool from dataprinter import DataPrinter from data_label_tool import DataLabelTool from drag_zoom import DragZoom from enthought.enable.tools.drag_tool import DragTool from draw_points_tool import DrawPointsTool from highlight_tool import HighlightTool from image_inspector_tool import ImageInspectorTool, ImageInspectorOverlay from lasso_selection import LassoSelection from legend_tool import LegendTool from legend_highlighter import LegendHighlighter from line_inspector import LineInspector from line_segment_tool import LineSegmentTool from move_tool import MoveTool from pan_tool import PanTool from point_marker import PointMarker from range_selection import RangeSelection from range_selection_2d import RangeSelection2D from range_selection_overlay import RangeSelectionOverlay from regression_lasso import RegressionLasso, RegressionOverlay from save_tool import SaveTool from scatter_inspector import ScatterInspector from select_tool import SelectTool from simple_inspector import SimpleInspectorTool from tracking_pan_tool import TrackingPanTool from tracking_zoom import TrackingZoom from traits_tool import TraitsTool from zoom_tool import ZoomTool # EOF
68ae2de4b51a2fe0f02c40bad8731d34b1092521
narcissa.py
narcissa.py
#!/usr/bin/env python3 import subprocess import atexit import sys from utils.safe_schedule import SafeScheduler from time import sleep from glob import glob META_IMPORT = '# narcissa import ' scheduler = SafeScheduler() def make_exit_graceful(): original_hook = sys.excepthook def new_hook(type, value, traceback): if type == KeyboardInterrupt: sys.exit("\nBye for now!") else: original_hook(type, value, traceback) sys.excepthook = new_hook def start_server(): cmd = 'waitress-serve --port=5000 server:app' p = subprocess.Popen(cmd.split(), cwd='server') return p def start_scrapers(): for scraper_path in glob('scrapers/*.py'): with open(scraper_path) as f: print(scraper_path) scraper_data = f.read() exec(scraper_data) def main(): make_exit_graceful() server = start_server() atexit.register(server.terminate) start_scrapers() while True: scheduler.run_pending() sleep(1) if __name__ == '__main__': main()
#!/usr/bin/env python3 import subprocess import atexit import sys from utils.safe_schedule import SafeScheduler from time import sleep from glob import glob META_IMPORT = '# narcissa import ' scheduler = SafeScheduler() def make_exit_graceful(): original_hook = sys.excepthook def new_hook(type, value, traceback): if type == KeyboardInterrupt: sys.exit("\nBye for now!") else: original_hook(type, value, traceback) sys.excepthook = new_hook def start_server(): cmd = 'waitress-serve --port=5000 server:app' p = subprocess.Popen(cmd.split(), cwd='server') return p def load_scrapers(): for scraper_path in glob('scrapers/*.py'): with open(scraper_path) as f: print(scraper_path) scraper_data = f.read() exec(scraper_data) def main(): make_exit_graceful() server = start_server() atexit.register(server.terminate) load_scrapers() while True: scheduler.run_pending() sleep(1) if __name__ == '__main__': main()
Change name of start_scrapers() to be more accurate
Change name of start_scrapers() to be more accurate
Python
mit
mplewis/narcissa
<REPLACE_OLD> start_scrapers(): <REPLACE_NEW> load_scrapers(): <REPLACE_END> <REPLACE_OLD> start_scrapers() <REPLACE_NEW> load_scrapers() <REPLACE_END> <|endoftext|> #!/usr/bin/env python3 import subprocess import atexit import sys from utils.safe_schedule import SafeScheduler from time import sleep from glob import glob META_IMPORT = '# narcissa import ' scheduler = SafeScheduler() def make_exit_graceful(): original_hook = sys.excepthook def new_hook(type, value, traceback): if type == KeyboardInterrupt: sys.exit("\nBye for now!") else: original_hook(type, value, traceback) sys.excepthook = new_hook def start_server(): cmd = 'waitress-serve --port=5000 server:app' p = subprocess.Popen(cmd.split(), cwd='server') return p def load_scrapers(): for scraper_path in glob('scrapers/*.py'): with open(scraper_path) as f: print(scraper_path) scraper_data = f.read() exec(scraper_data) def main(): make_exit_graceful() server = start_server() atexit.register(server.terminate) load_scrapers() while True: scheduler.run_pending() sleep(1) if __name__ == '__main__': main()
Change name of start_scrapers() to be more accurate #!/usr/bin/env python3 import subprocess import atexit import sys from utils.safe_schedule import SafeScheduler from time import sleep from glob import glob META_IMPORT = '# narcissa import ' scheduler = SafeScheduler() def make_exit_graceful(): original_hook = sys.excepthook def new_hook(type, value, traceback): if type == KeyboardInterrupt: sys.exit("\nBye for now!") else: original_hook(type, value, traceback) sys.excepthook = new_hook def start_server(): cmd = 'waitress-serve --port=5000 server:app' p = subprocess.Popen(cmd.split(), cwd='server') return p def start_scrapers(): for scraper_path in glob('scrapers/*.py'): with open(scraper_path) as f: print(scraper_path) scraper_data = f.read() exec(scraper_data) def main(): make_exit_graceful() server = start_server() atexit.register(server.terminate) start_scrapers() while True: scheduler.run_pending() sleep(1) if __name__ == '__main__': main()
54f70d759b2e0d384d626f4b55016166f9b26f16
camelot/roundtable/migrations/0002_add_knight_data.py
camelot/roundtable/migrations/0002_add_knight_data.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.core.management import call_command def add_knight_data(apps, schema_editor): call_command('loaddata', 'knight_data.json') def remove_knight_data(apps, schema_editor): pass class Migration(migrations.Migration): dependencies = [ ('roundtable', '0001_initial'), ] operations = [ migrations.RunPython( add_knight_data, reverse_code=remove_knight_data), ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations def add_knight_data(apps, schema_editor): Knight = apps.get_model('roundtable', 'Knight') Knight.objects.bulk_create([ Knight(name='Arthur'), Knight(name='Bedevere'), Knight(name='Bors'), Knight(name='Ector'), Knight(name='Galahad'), Knight(name='Gawain'), Knight(name='Lancelot'), Knight(name='Robin'), ]) def remove_knight_data(apps, schema_editor): pass class Migration(migrations.Migration): dependencies = [ ('roundtable', '0001_initial'), ] operations = [ migrations.RunPython( add_knight_data, reverse_code=remove_knight_data), ]
Implement add_knight_data to generate data directly.
Implement add_knight_data to generate data directly.
Python
bsd-2-clause
jambonrose/djangocon2014-updj17
<REPLACE_OLD> migrations from django.core.management import call_command def <REPLACE_NEW> migrations def <REPLACE_END> <REPLACE_OLD> call_command('loaddata', 'knight_data.json') def <REPLACE_NEW> Knight = apps.get_model('roundtable', 'Knight') Knight.objects.bulk_create([ Knight(name='Arthur'), Knight(name='Bedevere'), Knight(name='Bors'), Knight(name='Ector'), Knight(name='Galahad'), Knight(name='Gawain'), Knight(name='Lancelot'), Knight(name='Robin'), ]) def <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations def add_knight_data(apps, schema_editor): Knight = apps.get_model('roundtable', 'Knight') Knight.objects.bulk_create([ Knight(name='Arthur'), Knight(name='Bedevere'), Knight(name='Bors'), Knight(name='Ector'), Knight(name='Galahad'), Knight(name='Gawain'), Knight(name='Lancelot'), Knight(name='Robin'), ]) def remove_knight_data(apps, schema_editor): pass class Migration(migrations.Migration): dependencies = [ ('roundtable', '0001_initial'), ] operations = [ migrations.RunPython( add_knight_data, reverse_code=remove_knight_data), ]
Implement add_knight_data to generate data directly. # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import models, migrations from django.core.management import call_command def add_knight_data(apps, schema_editor): call_command('loaddata', 'knight_data.json') def remove_knight_data(apps, schema_editor): pass class Migration(migrations.Migration): dependencies = [ ('roundtable', '0001_initial'), ] operations = [ migrations.RunPython( add_knight_data, reverse_code=remove_knight_data), ]
417b3caf2a64a68e0da56b3caa9213c2c64087c3
grammpy/Terminal.py
grammpy/Terminal.py
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ class Terminal: def __init__(self, symbol, grammar): self.__symbol = symbol self.__grammar = grammar def __hash__(self): return hash((self.__symbol, id(self.__grammar))) def __eq__(self, other): return isinstance(other, Terminal) and hash(self) == hash(other)
Add class represents terminal symbol
Add class represents terminal symbol
Python
mit
PatrikValkovic/grammpy
<INSERT> #!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ class Terminal: <INSERT_END> <INSERT> def __init__(self, symbol, grammar): self.__symbol = symbol self.__grammar = grammar def __hash__(self): return hash((self.__symbol, id(self.__grammar))) def __eq__(self, other): return isinstance(other, Terminal) and hash(self) == hash(other) <INSERT_END> <|endoftext|> #!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ class Terminal: def __init__(self, symbol, grammar): self.__symbol = symbol self.__grammar = grammar def __hash__(self): return hash((self.__symbol, id(self.__grammar))) def __eq__(self, other): return isinstance(other, Terminal) and hash(self) == hash(other)
Add class represents terminal symbol
c4ab1ebcbc9d452972732ef5b15c0cf1b09bd8bc
changes/jobs/sync_repo.py
changes/jobs/sync_repo.py
from datetime import datetime from changes.config import db, queue from changes.models import Repository def sync_repo(repo_id): repo = Repository.query.get(repo_id) if not repo: return vcs = repo.get_vcs() if vcs is None: return repo.last_update_attempt = datetime.utcnow() db.session.add(repo) db.session.commit() try: if vcs.exists(): vcs.update() else: vcs.clone() # TODO(dcramer): this doesnt scrape everything, and really we wouldn't # want to do this all in a single job so we should split this into a # backfill task might_have_more = True parent = None while might_have_more: might_have_more = False for commit in vcs.log(parent=parent): revision, created = commit.save(repo) db.session.commit() if not created: break might_have_more = True parent = commit.id repo.last_update = datetime.utcnow() db.session.add(repo) db.session.commit() queue.delay('sync_repo', kwargs={ 'repo_id': repo_id }, countdown=15) except Exception as exc: # should we actually use retry support here? raise queue.retry('sync_repo', kwargs={ 'repo_id': repo_id, }, exc=exc, countdown=120)
from datetime import datetime from flask import current_app from changes.config import db, queue from changes.models import Repository def sync_repo(repo_id): repo = Repository.query.get(repo_id) if not repo: return vcs = repo.get_vcs() if vcs is None: return repo.last_update_attempt = datetime.utcnow() db.session.add(repo) db.session.commit() try: if vcs.exists(): vcs.update() else: vcs.clone() # TODO(dcramer): this doesnt scrape everything, and really we wouldn't # want to do this all in a single job so we should split this into a # backfill task might_have_more = True parent = None while might_have_more: might_have_more = False for commit in vcs.log(parent=parent): revision, created = commit.save(repo) db.session.commit() if not created: break might_have_more = True parent = commit.id repo.last_update = datetime.utcnow() db.session.add(repo) db.session.commit() queue.delay('sync_repo', kwargs={ 'repo_id': repo_id }, countdown=15) except Exception as exc: # should we actually use retry support here? current_app.logger.exception('Failed to sync repository %s', repo_id) raise queue.retry('sync_repo', kwargs={ 'repo_id': repo_id, }, exc=exc, countdown=120)
Use app logging instead of celery
Use app logging instead of celery
Python
apache-2.0
wfxiang08/changes,dropbox/changes,bowlofstew/changes,bowlofstew/changes,wfxiang08/changes,dropbox/changes,wfxiang08/changes,bowlofstew/changes,dropbox/changes,dropbox/changes,wfxiang08/changes,bowlofstew/changes
<REPLACE_OLD> datetime from <REPLACE_NEW> datetime from flask import current_app from <REPLACE_END> <INSERT> current_app.logger.exception('Failed to sync repository %s', repo_id) <INSERT_END> <|endoftext|> from datetime import datetime from flask import current_app from changes.config import db, queue from changes.models import Repository def sync_repo(repo_id): repo = Repository.query.get(repo_id) if not repo: return vcs = repo.get_vcs() if vcs is None: return repo.last_update_attempt = datetime.utcnow() db.session.add(repo) db.session.commit() try: if vcs.exists(): vcs.update() else: vcs.clone() # TODO(dcramer): this doesnt scrape everything, and really we wouldn't # want to do this all in a single job so we should split this into a # backfill task might_have_more = True parent = None while might_have_more: might_have_more = False for commit in vcs.log(parent=parent): revision, created = commit.save(repo) db.session.commit() if not created: break might_have_more = True parent = commit.id repo.last_update = datetime.utcnow() db.session.add(repo) db.session.commit() queue.delay('sync_repo', kwargs={ 'repo_id': repo_id }, countdown=15) except Exception as exc: # should we actually use retry support here? current_app.logger.exception('Failed to sync repository %s', repo_id) raise queue.retry('sync_repo', kwargs={ 'repo_id': repo_id, }, exc=exc, countdown=120)
Use app logging instead of celery from datetime import datetime from changes.config import db, queue from changes.models import Repository def sync_repo(repo_id): repo = Repository.query.get(repo_id) if not repo: return vcs = repo.get_vcs() if vcs is None: return repo.last_update_attempt = datetime.utcnow() db.session.add(repo) db.session.commit() try: if vcs.exists(): vcs.update() else: vcs.clone() # TODO(dcramer): this doesnt scrape everything, and really we wouldn't # want to do this all in a single job so we should split this into a # backfill task might_have_more = True parent = None while might_have_more: might_have_more = False for commit in vcs.log(parent=parent): revision, created = commit.save(repo) db.session.commit() if not created: break might_have_more = True parent = commit.id repo.last_update = datetime.utcnow() db.session.add(repo) db.session.commit() queue.delay('sync_repo', kwargs={ 'repo_id': repo_id }, countdown=15) except Exception as exc: # should we actually use retry support here? raise queue.retry('sync_repo', kwargs={ 'repo_id': repo_id, }, exc=exc, countdown=120)
5cb049385aa3d3ae57d18dc4b7d12f3d5e6f1ae4
tests/intervaltrigger_test.py
tests/intervaltrigger_test.py
import time import mock import pytest import spreadsplug.intervaltrigger as intervaltrigger @pytest.fixture def plugin(config): config['intervaltrigger']['interval'] = 0.1 return intervaltrigger.IntervalTrigger(config) def test_trigger_loop(plugin): cbmock = mock.Mock() plugin.start_trigger_loop(cbmock) time.sleep(0.55) plugin.stop_trigger_loop() assert cbmock.call_count == 5
import time import mock import pytest import spreadsplug.intervaltrigger as intervaltrigger @pytest.fixture def plugin(config): config['intervaltrigger']['interval'] = 0.1 return intervaltrigger.IntervalTrigger(config) def test_trigger_loop(plugin): cbmock = mock.Mock() plugin.start_trigger_loop(cbmock) time.sleep(0.6) plugin.stop_trigger_loop() assert cbmock.call_count == 5
Increase sleep-time in intervaltrigger tests
Increase sleep-time in intervaltrigger tests
Python
agpl-3.0
jbaiter/spreads,DIYBookScanner/spreads,nafraf/spreads,adongy/spreads,jbaiter/spreads,miloh/spreads,gareth8118/spreads,gareth8118/spreads,nafraf/spreads,gareth8118/spreads,jbaiter/spreads,DIYBookScanner/spreads,DIYBookScanner/spreads,nafraf/spreads,adongy/spreads,miloh/spreads,adongy/spreads,miloh/spreads
<REPLACE_OLD> time.sleep(0.55) <REPLACE_NEW> time.sleep(0.6) <REPLACE_END> <|endoftext|> import time import mock import pytest import spreadsplug.intervaltrigger as intervaltrigger @pytest.fixture def plugin(config): config['intervaltrigger']['interval'] = 0.1 return intervaltrigger.IntervalTrigger(config) def test_trigger_loop(plugin): cbmock = mock.Mock() plugin.start_trigger_loop(cbmock) time.sleep(0.6) plugin.stop_trigger_loop() assert cbmock.call_count == 5
Increase sleep-time in intervaltrigger tests import time import mock import pytest import spreadsplug.intervaltrigger as intervaltrigger @pytest.fixture def plugin(config): config['intervaltrigger']['interval'] = 0.1 return intervaltrigger.IntervalTrigger(config) def test_trigger_loop(plugin): cbmock = mock.Mock() plugin.start_trigger_loop(cbmock) time.sleep(0.55) plugin.stop_trigger_loop() assert cbmock.call_count == 5
e8c6be3565bd8b33dfb7a01dfb77938534ce9d09
pysswords/crypt.py
pysswords/crypt.py
import os import gnupg import logging from .utils import which def create_key_input(gpg, passphrase, testing=False): key_input = gpg.gen_key_input( name_real='Pysswords', name_email='pysswords@pysswords', name_comment='Autogenerated by Pysswords', passphrase=passphrase, testing=testing ) return key_input def create_gpg(binary, database_path, passphrase): gnupg_path = os.path.join(database_path, ".gnupg") gpg = gnupg.GPG(which(binary), homedir=gnupg_path) gpg.gen_key(create_key_input(gpg, passphrase)) return gpg
import os import gnupg import logging from .utils import which def create_key_input(gpg, passphrase, testing=False): key_input = gpg.gen_key_input( name_real='Pysswords', name_email='pysswords@pysswords', name_comment='Autogenerated by Pysswords', passphrase=passphrase, testing=testing ) return key_input def create_gpg(binary, database_path, passphrase): gnupg_path = os.path.join(database_path, ".gnupg") gpg = gnupg.GPG(which(binary), homedir=gnupg_path) gpg.gen_key(create_key_input(gpg, passphrase)) return gpg def load_gpg(binary, database_path): gnupg_path = os.path.join(database_path, ".gnupg") gpg = gnupg.GPG(which(binary), homedir=gnupg_path) return gpg
Add load gpg to get an instance of gpg
Add load gpg to get an instance of gpg
Python
mit
scorphus/passpie,scorphus/passpie,marcwebbie/passpie,eiginn/passpie,marcwebbie/pysswords,marcwebbie/passpie,eiginn/passpie
<INSERT> gpg def load_gpg(binary, database_path): gnupg_path = os.path.join(database_path, ".gnupg") gpg = gnupg.GPG(which(binary), homedir=gnupg_path) return <INSERT_END> <|endoftext|> import os import gnupg import logging from .utils import which def create_key_input(gpg, passphrase, testing=False): key_input = gpg.gen_key_input( name_real='Pysswords', name_email='pysswords@pysswords', name_comment='Autogenerated by Pysswords', passphrase=passphrase, testing=testing ) return key_input def create_gpg(binary, database_path, passphrase): gnupg_path = os.path.join(database_path, ".gnupg") gpg = gnupg.GPG(which(binary), homedir=gnupg_path) gpg.gen_key(create_key_input(gpg, passphrase)) return gpg def load_gpg(binary, database_path): gnupg_path = os.path.join(database_path, ".gnupg") gpg = gnupg.GPG(which(binary), homedir=gnupg_path) return gpg
Add load gpg to get an instance of gpg import os import gnupg import logging from .utils import which def create_key_input(gpg, passphrase, testing=False): key_input = gpg.gen_key_input( name_real='Pysswords', name_email='pysswords@pysswords', name_comment='Autogenerated by Pysswords', passphrase=passphrase, testing=testing ) return key_input def create_gpg(binary, database_path, passphrase): gnupg_path = os.path.join(database_path, ".gnupg") gpg = gnupg.GPG(which(binary), homedir=gnupg_path) gpg.gen_key(create_key_input(gpg, passphrase)) return gpg
ba942aa988e049779a717c41d068547f5bce8b0b
setup.py
setup.py
#!/usr/bin/env python # coding: utf-8 import glob as _glob import setuptools as _st import tues as _tues if __name__ == '__main__': _st.setup( name='tues', version=_tues.__version__, url='https://github.com/wontfix-org/tues/', license='MIT', author='Michael van Bracht', author_email='michael@wontfix.org', description='Easy remote command execution', packages=_st.find_packages(), scripts=_glob.glob('scripts/tues*'), include_package_data=True, platforms='any', setup_requires=['setuptools-markdown'], long_description_markdown_filename='README.md', install_requires=['docopt', 'fabric3', 'requests>=2.4'], classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Natural Language :: English', ], )
#!/usr/bin/env python # coding: utf-8 import glob as _glob import setuptools as _st import tues as _tues if __name__ == '__main__': _st.setup( name='tues', version=_tues.__version__, url='https://github.com/wontfix-org/tues/', license='MIT', author='Michael van Bracht', author_email='michael@wontfix.org', description='Easy remote command execution', packages=_st.find_packages(), scripts=_glob.glob('scripts/tues*'), include_package_data=True, platforms='any', long_description=open("README.md").read(), long_description_content_type="text/markdown", install_requires=['docopt', 'fabric3', 'requests>=2.4'], classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Natural Language :: English', ], )
Move to native markdown parsing
packaging: Move to native markdown parsing setuptools-markdown is deprecated
Python
mit
wontfix-org/tues
<REPLACE_OLD> setup_requires=['setuptools-markdown'], long_description_markdown_filename='README.md', <REPLACE_NEW> long_description=open("README.md").read(), long_description_content_type="text/markdown", <REPLACE_END> <INSERT> 'Programming Language :: Python :: 3', <INSERT_END> <|endoftext|> #!/usr/bin/env python # coding: utf-8 import glob as _glob import setuptools as _st import tues as _tues if __name__ == '__main__': _st.setup( name='tues', version=_tues.__version__, url='https://github.com/wontfix-org/tues/', license='MIT', author='Michael van Bracht', author_email='michael@wontfix.org', description='Easy remote command execution', packages=_st.find_packages(), scripts=_glob.glob('scripts/tues*'), include_package_data=True, platforms='any', long_description=open("README.md").read(), long_description_content_type="text/markdown", install_requires=['docopt', 'fabric3', 'requests>=2.4'], classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Natural Language :: English', ], )
packaging: Move to native markdown parsing setuptools-markdown is deprecated #!/usr/bin/env python # coding: utf-8 import glob as _glob import setuptools as _st import tues as _tues if __name__ == '__main__': _st.setup( name='tues', version=_tues.__version__, url='https://github.com/wontfix-org/tues/', license='MIT', author='Michael van Bracht', author_email='michael@wontfix.org', description='Easy remote command execution', packages=_st.find_packages(), scripts=_glob.glob('scripts/tues*'), include_package_data=True, platforms='any', setup_requires=['setuptools-markdown'], long_description_markdown_filename='README.md', install_requires=['docopt', 'fabric3', 'requests>=2.4'], classifiers=[ 'Programming Language :: Python', 'Programming Language :: Python :: 2.7', 'Natural Language :: English', ], )
342d3791aa80084309ffc00a9e5e936fa8277401
AFQ/viz.py
AFQ/viz.py
import tempfile import os.path as op import numpy as np import IPython.display as display import nibabel as nib from dipy.viz import fvtk from palettable.tableau import Tableau_20 def visualize_bundles(trk, ren=None, inline=True, interact=False): """ Visualize bundles in 3D using fvtk """ if isinstance(trk, str): trk = nib.streamlines.load(trk) if ren is None: ren = fvtk.ren() for b in np.unique(trk.tractogram.data_per_streamline['bundle']): idx = np.where(trk.tractogram.data_per_streamline['bundle'] == b)[0] this_sl = list(trk.streamlines[idx]) sl_actor = fvtk.line(this_sl, Tableau_20.colors[np.mod(20, int(b))]) fvtk.add(ren, sl_actor) if inline: tdir = tempfile.gettempdir() fname = op.join(tdir, "fig.png") fvtk.record(ren, out_path=fname) display.display_png(display.Image(fname)) if interact: fvtk.show(ren) return ren
import tempfile import os.path as op import numpy as np import IPython.display as display import nibabel as nib from dipy.viz import fvtk from dipy.viz.colormap import line_colors from palettable.tableau import Tableau_20 def visualize_bundles(trk, ren=None, inline=True, interact=False): """ Visualize bundles in 3D using fvtk """ if isinstance(trk, str): trk = nib.streamlines.load(trk) if ren is None: ren = fvtk.ren() # There are no bundles in here: if list(trk.tractogram.data_per_streamline.keys()) == []: streamlines = list(trk.streamlines) sl_actor = fvtk.line(streamlines, line_colors(streamlines)) fvtk.add(ren, sl_actor) for b in np.unique(trk.tractogram.data_per_streamline['bundle']): idx = np.where(trk.tractogram.data_per_streamline['bundle'] == b)[0] this_sl = list(trk.streamlines[idx]) sl_actor = fvtk.line(this_sl, Tableau_20.colors[np.mod(20, int(b))]) fvtk.add(ren, sl_actor) if inline: tdir = tempfile.gettempdir() fname = op.join(tdir, "fig.png") fvtk.record(ren, out_path=fname) display.display_png(display.Image(fname)) if interact: fvtk.show(ren) return ren
Enable visualizing trk files without bundle designations.
Enable visualizing trk files without bundle designations.
Python
bsd-2-clause
yeatmanlab/pyAFQ,arokem/pyAFQ,yeatmanlab/pyAFQ,arokem/pyAFQ
<INSERT> dipy.viz.colormap import line_colors from <INSERT_END> <INSERT> # There are no bundles in here: if list(trk.tractogram.data_per_streamline.keys()) == []: streamlines = list(trk.streamlines) sl_actor = fvtk.line(streamlines, line_colors(streamlines)) fvtk.add(ren, sl_actor) <INSERT_END> <|endoftext|> import tempfile import os.path as op import numpy as np import IPython.display as display import nibabel as nib from dipy.viz import fvtk from dipy.viz.colormap import line_colors from palettable.tableau import Tableau_20 def visualize_bundles(trk, ren=None, inline=True, interact=False): """ Visualize bundles in 3D using fvtk """ if isinstance(trk, str): trk = nib.streamlines.load(trk) if ren is None: ren = fvtk.ren() # There are no bundles in here: if list(trk.tractogram.data_per_streamline.keys()) == []: streamlines = list(trk.streamlines) sl_actor = fvtk.line(streamlines, line_colors(streamlines)) fvtk.add(ren, sl_actor) for b in np.unique(trk.tractogram.data_per_streamline['bundle']): idx = np.where(trk.tractogram.data_per_streamline['bundle'] == b)[0] this_sl = list(trk.streamlines[idx]) sl_actor = fvtk.line(this_sl, Tableau_20.colors[np.mod(20, int(b))]) fvtk.add(ren, sl_actor) if inline: tdir = tempfile.gettempdir() fname = op.join(tdir, "fig.png") fvtk.record(ren, out_path=fname) display.display_png(display.Image(fname)) if interact: fvtk.show(ren) return ren
Enable visualizing trk files without bundle designations. import tempfile import os.path as op import numpy as np import IPython.display as display import nibabel as nib from dipy.viz import fvtk from palettable.tableau import Tableau_20 def visualize_bundles(trk, ren=None, inline=True, interact=False): """ Visualize bundles in 3D using fvtk """ if isinstance(trk, str): trk = nib.streamlines.load(trk) if ren is None: ren = fvtk.ren() for b in np.unique(trk.tractogram.data_per_streamline['bundle']): idx = np.where(trk.tractogram.data_per_streamline['bundle'] == b)[0] this_sl = list(trk.streamlines[idx]) sl_actor = fvtk.line(this_sl, Tableau_20.colors[np.mod(20, int(b))]) fvtk.add(ren, sl_actor) if inline: tdir = tempfile.gettempdir() fname = op.join(tdir, "fig.png") fvtk.record(ren, out_path=fname) display.display_png(display.Image(fname)) if interact: fvtk.show(ren) return ren
99469256b4585b5c0056d69e153e7c628f4430c1
leak_matcher.py
leak_matcher.py
#!/usr/bin/python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import sys import re # If you add a line like this to the ctor for a class: # printf_stderr("ZZZ CREATE %p\n", this); # and a line line this to the dtor for a class: # printf_stderr("ZZZ DESTROY %p\n", this); # then this log will process the resulting mochitest log # and give you the mochitest that was running when any such # objects were allocated that had no matching dtor. cdMatch = re.compile('^.* ZZZ (CREATE|DESTROY) ([0-9A-F]+)\r?$') anyUnknown = False live = {} currTest = None for l in sys.stdin: if not 'ZZ' in l: if l.find("TEST-START") > -1: currTest = l.split('|')[1].strip() continue cdm = cdMatch.match(l) if not cdm: print 'Unknown line: ', l, anyUnknown = True continue isCreate = cdm.group(1) == 'CREATE' assert isCreate or cdm.group(1) == 'DESTROY' addr = cdm.group(2) if len(addr) != 8: print 'Not enough characters in address:', addr, l, if isCreate: assert not addr in live assert currTest live[addr] = currTest else: assert addr in live del live[addr] if anyUnknown: exit(-1) testCounts = {} for liveAddr, inTest in live.iteritems(): testCounts[inTest] = testCounts.setdefault(inTest, 0) + 1 for t, n in testCounts.iteritems(): print n, t
Add leak matcher for figuring out which test is leaking objects of a type
Add leak matcher for figuring out which test is leaking objects of a type
Python
mpl-2.0
amccreight/mochitest-logs
<REPLACE_OLD> <REPLACE_NEW> #!/usr/bin/python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import sys import re # If you add a line like this to the ctor for a class: # printf_stderr("ZZZ CREATE %p\n", this); # and a line line this to the dtor for a class: # printf_stderr("ZZZ DESTROY %p\n", this); # then this log will process the resulting mochitest log # and give you the mochitest that was running when any such # objects were allocated that had no matching dtor. cdMatch = re.compile('^.* ZZZ (CREATE|DESTROY) ([0-9A-F]+)\r?$') anyUnknown = False live = {} currTest = None for l in sys.stdin: if not 'ZZ' in l: if l.find("TEST-START") > -1: currTest = l.split('|')[1].strip() continue cdm = cdMatch.match(l) if not cdm: print 'Unknown line: ', l, anyUnknown = True continue isCreate = cdm.group(1) == 'CREATE' assert isCreate or cdm.group(1) == 'DESTROY' addr = cdm.group(2) if len(addr) != 8: print 'Not enough characters in address:', addr, l, if isCreate: assert not addr in live assert currTest live[addr] = currTest else: assert addr in live del live[addr] if anyUnknown: exit(-1) testCounts = {} for liveAddr, inTest in live.iteritems(): testCounts[inTest] = testCounts.setdefault(inTest, 0) + 1 for t, n in testCounts.iteritems(): print n, t <REPLACE_END> <|endoftext|> #!/usr/bin/python # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. import sys import re # If you add a line like this to the ctor for a class: # printf_stderr("ZZZ CREATE %p\n", this); # and a line line this to the dtor for a class: # printf_stderr("ZZZ DESTROY %p\n", this); # then this log will process the resulting mochitest log # and give you the mochitest that was running when any such # objects were allocated that had no matching dtor. cdMatch = re.compile('^.* ZZZ (CREATE|DESTROY) ([0-9A-F]+)\r?$') anyUnknown = False live = {} currTest = None for l in sys.stdin: if not 'ZZ' in l: if l.find("TEST-START") > -1: currTest = l.split('|')[1].strip() continue cdm = cdMatch.match(l) if not cdm: print 'Unknown line: ', l, anyUnknown = True continue isCreate = cdm.group(1) == 'CREATE' assert isCreate or cdm.group(1) == 'DESTROY' addr = cdm.group(2) if len(addr) != 8: print 'Not enough characters in address:', addr, l, if isCreate: assert not addr in live assert currTest live[addr] = currTest else: assert addr in live del live[addr] if anyUnknown: exit(-1) testCounts = {} for liveAddr, inTest in live.iteritems(): testCounts[inTest] = testCounts.setdefault(inTest, 0) + 1 for t, n in testCounts.iteritems(): print n, t
Add leak matcher for figuring out which test is leaking objects of a type
fadb99ce3a93b2e4be7a654277c921fb5ed562ad
replace-jars.py
replace-jars.py
#!/usr/bin/env python import os import re import shutil import sys """ Automate updating multiple HDP jars for debugging/hotfix purposes. Agnostic to the target directory layout which can differ across HDP versions. """ if (len(sys.argv) != 4): print("Usage: replace-jars.pl <source-dir> <source-version> <dst-version>") print(" source-dir : Directory containing the new jar versions.") print(" source-version : Version string of the new jars.") print(" dst-version : Installed HDP version to be updated.") sys.exit(1) # Strip out the first three digits which are the Apache version # from dst_ver. # src, src_ver, dst_ver = sys.argv[1:] ver_pattern = re.compile('^\d+\.\d+\.\d+\.') dst = "/usr/hdp/" + re.sub(ver_pattern, "", dst_ver) # Sanity checks. # if not os.path.isdir(dst): print("Directory {} does not exist".format(dst)) sys.exit(1) if not os.path.isdir(src): print("Directory {} does not exist".format(src)) sys.exit(1) # Build a map of source jar name to its full path under # the source directory. # sources = {} for root, dirs, files in os.walk(src): for f in files: if f.endswith('.jar') and f not in sources: sources[f] = os.path.join(root, f) print("Got {} source jars.".format(len(sources))) # List destination jars, and replace each with the corresponding # source jar. # TODO: Create a backup of the jars being replaced. # jars_replaced = 0 for root, dirs, files in os.walk(dst): for f in files: if f.endswith('.jar') and f.startswith('hadoop'): dest = os.path.join(root, f) src_jar_name = f.replace(dst_ver, src_ver, 1) if src_jar_name in sources and os.path.isfile(dest): print("{} -> {}".format(dest, sources[src_jar_name])) shutil.copy2(sources[src_jar_name], dest) jars_replaced += 1 print("Replaced {} jars.".format(jars_replaced))
Add script to automate replacing HDP jars.
Add script to automate replacing HDP jars.
Python
apache-2.0
arp7/HadoopTools,arp7/HadoopTools,arp7/HadoopTools
<REPLACE_OLD> <REPLACE_NEW> #!/usr/bin/env python import os import re import shutil import sys """ Automate updating multiple HDP jars for debugging/hotfix purposes. Agnostic to the target directory layout which can differ across HDP versions. """ if (len(sys.argv) != 4): print("Usage: replace-jars.pl <source-dir> <source-version> <dst-version>") print(" source-dir : Directory containing the new jar versions.") print(" source-version : Version string of the new jars.") print(" dst-version : Installed HDP version to be updated.") sys.exit(1) # Strip out the first three digits which are the Apache version # from dst_ver. # src, src_ver, dst_ver = sys.argv[1:] ver_pattern = re.compile('^\d+\.\d+\.\d+\.') dst = "/usr/hdp/" + re.sub(ver_pattern, "", dst_ver) # Sanity checks. # if not os.path.isdir(dst): print("Directory {} does not exist".format(dst)) sys.exit(1) if not os.path.isdir(src): print("Directory {} does not exist".format(src)) sys.exit(1) # Build a map of source jar name to its full path under # the source directory. # sources = {} for root, dirs, files in os.walk(src): for f in files: if f.endswith('.jar') and f not in sources: sources[f] = os.path.join(root, f) print("Got {} source jars.".format(len(sources))) # List destination jars, and replace each with the corresponding # source jar. # TODO: Create a backup of the jars being replaced. # jars_replaced = 0 for root, dirs, files in os.walk(dst): for f in files: if f.endswith('.jar') and f.startswith('hadoop'): dest = os.path.join(root, f) src_jar_name = f.replace(dst_ver, src_ver, 1) if src_jar_name in sources and os.path.isfile(dest): print("{} -> {}".format(dest, sources[src_jar_name])) shutil.copy2(sources[src_jar_name], dest) jars_replaced += 1 print("Replaced {} jars.".format(jars_replaced)) <REPLACE_END> <|endoftext|> #!/usr/bin/env python import os import re import shutil import sys """ Automate updating multiple HDP jars for debugging/hotfix purposes. Agnostic to the target directory layout which can differ across HDP versions. """ if (len(sys.argv) != 4): print("Usage: replace-jars.pl <source-dir> <source-version> <dst-version>") print(" source-dir : Directory containing the new jar versions.") print(" source-version : Version string of the new jars.") print(" dst-version : Installed HDP version to be updated.") sys.exit(1) # Strip out the first three digits which are the Apache version # from dst_ver. # src, src_ver, dst_ver = sys.argv[1:] ver_pattern = re.compile('^\d+\.\d+\.\d+\.') dst = "/usr/hdp/" + re.sub(ver_pattern, "", dst_ver) # Sanity checks. # if not os.path.isdir(dst): print("Directory {} does not exist".format(dst)) sys.exit(1) if not os.path.isdir(src): print("Directory {} does not exist".format(src)) sys.exit(1) # Build a map of source jar name to its full path under # the source directory. # sources = {} for root, dirs, files in os.walk(src): for f in files: if f.endswith('.jar') and f not in sources: sources[f] = os.path.join(root, f) print("Got {} source jars.".format(len(sources))) # List destination jars, and replace each with the corresponding # source jar. # TODO: Create a backup of the jars being replaced. # jars_replaced = 0 for root, dirs, files in os.walk(dst): for f in files: if f.endswith('.jar') and f.startswith('hadoop'): dest = os.path.join(root, f) src_jar_name = f.replace(dst_ver, src_ver, 1) if src_jar_name in sources and os.path.isfile(dest): print("{} -> {}".format(dest, sources[src_jar_name])) shutil.copy2(sources[src_jar_name], dest) jars_replaced += 1 print("Replaced {} jars.".format(jars_replaced))
Add script to automate replacing HDP jars.
0c3f5008dd66b0bb8dfd2a4993def7d0c7a5bf84
greyjay/articles/migrations/0091_articlepage_reading_time.py
greyjay/articles/migrations/0091_articlepage_reading_time.py
# -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-05-03 18:13 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('articles', '0090_auto_20170502_1621'), ] operations = [ migrations.AddField( model_name='articlepage', name='reading_time', field=models.PositiveIntegerField(default=0), ), ]
Add reading_time to article pages.
Add reading_time to article pages.
Python
mit
CIGIHub/greyjay,CIGIHub/greyjay,CIGIHub/greyjay
<INSERT> # -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-05-03 18:13 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): <INSERT_END> <INSERT> dependencies = [ ('articles', '0090_auto_20170502_1621'), ] operations = [ migrations.AddField( model_name='articlepage', name='reading_time', field=models.PositiveIntegerField(default=0), ), ] <INSERT_END> <|endoftext|> # -*- coding: utf-8 -*- # Generated by Django 1.10.7 on 2017-05-03 18:13 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('articles', '0090_auto_20170502_1621'), ] operations = [ migrations.AddField( model_name='articlepage', name='reading_time', field=models.PositiveIntegerField(default=0), ), ]
Add reading_time to article pages.
ff1b1f8c61ea14c598443b54024357dc05d4dda2
shapes.py
shapes.py
#Create a function to get a mesh file 'f' def getMesh(fname): #Open the file f = open(fname, "r") #Store the lines as a list f = list(f) #Strip newlines from the list for l in range(len(f)): f[l] = f[l].replace("\n","") #Store the number of vertices, edges and sides v = int(f[0]) e = int(f[1]) s = int(f[2]) #Create empty lists to hold the data vertices = [] edges = [] sides = [] #Loop over all of the vertices and add them to the list for i in range(3, v + 3): #Split the vertex into a list of coordinates vertex = f[i].split(",") #Turn the coordinates into integers and append to the vertex vertices.append((int(vertex[0]), int(vertex[1]), int(vertex[2]))) #Loop over all of the edges and add them to the list for i in range(v + 3, e + v + 3): #Split the edge into a list of vertices edge = f[i].split(",") #Turn the vertices indexes into integers and add to the side edges.append((int(edge[0]), int(edge[1]))) #Loop over all of the sides and add them to the list for i in range(e + v + 3, s + e + v + 3): #Split the side into a list of vertices side = f[i].split(",") #Create a new side list to hold the data newside = [] #For each vertex index in the side, add it to the new side variable for p in side: newside.append(int(p)) #Add the side to the list of sides sides.append(tuple(newside)) #Return the data return (vertices, edges, sides)
Write function to read in vertices, edges and sides from file.
Write function to read in vertices, edges and sides from file.
Python
mit
thebillington/pyPhys3D
<REPLACE_OLD> <REPLACE_NEW> #Create a function to get a mesh file 'f' def getMesh(fname): #Open the file f = open(fname, "r") #Store the lines as a list f = list(f) #Strip newlines from the list for l in range(len(f)): f[l] = f[l].replace("\n","") #Store the number of vertices, edges and sides v = int(f[0]) e = int(f[1]) s = int(f[2]) #Create empty lists to hold the data vertices = [] edges = [] sides = [] #Loop over all of the vertices and add them to the list for i in range(3, v + 3): #Split the vertex into a list of coordinates vertex = f[i].split(",") #Turn the coordinates into integers and append to the vertex vertices.append((int(vertex[0]), int(vertex[1]), int(vertex[2]))) #Loop over all of the edges and add them to the list for i in range(v + 3, e + v + 3): #Split the edge into a list of vertices edge = f[i].split(",") #Turn the vertices indexes into integers and add to the side edges.append((int(edge[0]), int(edge[1]))) #Loop over all of the sides and add them to the list for i in range(e + v + 3, s + e + v + 3): #Split the side into a list of vertices side = f[i].split(",") #Create a new side list to hold the data newside = [] #For each vertex index in the side, add it to the new side variable for p in side: newside.append(int(p)) #Add the side to the list of sides sides.append(tuple(newside)) #Return the data return (vertices, edges, sides) <REPLACE_END> <|endoftext|> #Create a function to get a mesh file 'f' def getMesh(fname): #Open the file f = open(fname, "r") #Store the lines as a list f = list(f) #Strip newlines from the list for l in range(len(f)): f[l] = f[l].replace("\n","") #Store the number of vertices, edges and sides v = int(f[0]) e = int(f[1]) s = int(f[2]) #Create empty lists to hold the data vertices = [] edges = [] sides = [] #Loop over all of the vertices and add them to the list for i in range(3, v + 3): #Split the vertex into a list of coordinates vertex = f[i].split(",") #Turn the coordinates into integers and append to the vertex vertices.append((int(vertex[0]), int(vertex[1]), int(vertex[2]))) #Loop over all of the edges and add them to the list for i in range(v + 3, e + v + 3): #Split the edge into a list of vertices edge = f[i].split(",") #Turn the vertices indexes into integers and add to the side edges.append((int(edge[0]), int(edge[1]))) #Loop over all of the sides and add them to the list for i in range(e + v + 3, s + e + v + 3): #Split the side into a list of vertices side = f[i].split(",") #Create a new side list to hold the data newside = [] #For each vertex index in the side, add it to the new side variable for p in side: newside.append(int(p)) #Add the side to the list of sides sides.append(tuple(newside)) #Return the data return (vertices, edges, sides)
Write function to read in vertices, edges and sides from file.
2d688f97b9869fdfed9237b91fdce287278e3c6c
wsgi.py
wsgi.py
import os from elasticsearch_raven.transport import ElasticsearchTransport from elasticsearch_raven.utils import get_index host = os.environ.get('ELASTICSEARCH_HOST', 'localhost:9200') transport = ElasticsearchTransport(host) def application(environ, start_response): index = get_index(environ) transport.send(environ['wsgi.input'].read(), index) status = '200 OK' response_headers = [('Content-Type', 'text/plain')] start_response(status, response_headers) return [''.encode('utf-8')]
import os from queue import Queue from threading import Thread from elasticsearch_raven.transport import ElasticsearchTransport from elasticsearch_raven.utils import get_index host = os.environ.get('ELASTICSEARCH_HOST', 'localhost:9200') transport = ElasticsearchTransport(host) blocking_queue = Queue() def send(): while True: data, index = blocking_queue.get() transport.send(data, index) blocking_queue.task_done() sender = Thread(target=send) sender.daemon = True sender.start() def application(environ, start_response): index = get_index(environ) length = int(environ.get('CONTENT_LENGTH', '0')) data = environ['wsgi.input'].read(length) blocking_queue.put((data, index)) status = '200 OK' response_headers = [('Content-Type', 'text/plain')] start_response(status, response_headers) return [''.encode('utf-8')]
Send data to elasticsearch asynchronously.
Send data to elasticsearch asynchronously.
Python
mit
socialwifi/elasticsearch-raven,pozytywnie/elasticsearch-raven,serathius/elasticsearch-raven
<REPLACE_OLD> os from <REPLACE_NEW> os from queue import Queue from threading import Thread from <REPLACE_END> <REPLACE_OLD> ElasticsearchTransport(host) def <REPLACE_NEW> ElasticsearchTransport(host) blocking_queue = Queue() def send(): while True: data, index = blocking_queue.get() transport.send(data, index) blocking_queue.task_done() sender = Thread(target=send) sender.daemon = True sender.start() def <REPLACE_END> <REPLACE_OLD> transport.send(environ['wsgi.input'].read(), index) <REPLACE_NEW> length = int(environ.get('CONTENT_LENGTH', '0')) data = environ['wsgi.input'].read(length) blocking_queue.put((data, index)) <REPLACE_END> <|endoftext|> import os from queue import Queue from threading import Thread from elasticsearch_raven.transport import ElasticsearchTransport from elasticsearch_raven.utils import get_index host = os.environ.get('ELASTICSEARCH_HOST', 'localhost:9200') transport = ElasticsearchTransport(host) blocking_queue = Queue() def send(): while True: data, index = blocking_queue.get() transport.send(data, index) blocking_queue.task_done() sender = Thread(target=send) sender.daemon = True sender.start() def application(environ, start_response): index = get_index(environ) length = int(environ.get('CONTENT_LENGTH', '0')) data = environ['wsgi.input'].read(length) blocking_queue.put((data, index)) status = '200 OK' response_headers = [('Content-Type', 'text/plain')] start_response(status, response_headers) return [''.encode('utf-8')]
Send data to elasticsearch asynchronously. import os from elasticsearch_raven.transport import ElasticsearchTransport from elasticsearch_raven.utils import get_index host = os.environ.get('ELASTICSEARCH_HOST', 'localhost:9200') transport = ElasticsearchTransport(host) def application(environ, start_response): index = get_index(environ) transport.send(environ['wsgi.input'].read(), index) status = '200 OK' response_headers = [('Content-Type', 'text/plain')] start_response(status, response_headers) return [''.encode('utf-8')]
2fdf35f8a9bf7a6249bc92236952655314a47080
swapify/__init__.py
swapify/__init__.py
# -*- encoding: utf-8 -*- __version__ = VERSION = '0.0.0'
# -*- encoding: utf-8 -*- __author__ = 'Sebastian Vetter' __version__ = VERSION = '0.0.0' __license__ = 'MIT'
Add author and lincense variables
Add author and lincense variables
Python
mit
elbaschid/swapify
<REPLACE_OLD> -*- __version__ <REPLACE_NEW> -*- __author__ = 'Sebastian Vetter' __version__ <REPLACE_END> <REPLACE_OLD> '0.0.0' <REPLACE_NEW> '0.0.0' __license__ = 'MIT' <REPLACE_END> <|endoftext|> # -*- encoding: utf-8 -*- __author__ = 'Sebastian Vetter' __version__ = VERSION = '0.0.0' __license__ = 'MIT'
Add author and lincense variables # -*- encoding: utf-8 -*- __version__ = VERSION = '0.0.0'
2c1673930a40fc94c3d7c7d4f764ea423b638d26
mccurse/cli.py
mccurse/cli.py
"""Package command line interface.""" import click from .curse import Game, Mod # Static data MINECRAFT = {'id': 432, 'name': 'Minecraft'} @click.group() def cli(): """Minecraft Curse CLI client.""" @cli.command() @click.option( '--refresh', is_flag=True, default=False, help='Force refreshing of search data.' ) @click.argument('text', nargs=-1, type=str) def search(refresh, text): """Search for TEXT in mods on CurseForge.""" mc = Game(**MINECRAFT) text = ' '.join(text) refresh = refresh or not mc.have_fresh_data() if refresh: click.echo('Refreshing search data, please wait…', err=True) mc.refresh_data() mod_fmt = '{0.name}: {0.summary}' for mod in Mod.search(mc.database.session(), text): click.echo(mod_fmt.format(mod)) # If run as a package, run whole cli cli()
"""Package command line interface.""" import click from .curse import Game, Mod # Static data MINECRAFT = {'id': 432, 'name': 'Minecraft'} @click.group() def cli(): """Minecraft Curse CLI client.""" @cli.command() @click.option( '--refresh', is_flag=True, default=False, help='Force refreshing of search data.' ) @click.argument('text', nargs=-1, type=str) def search(refresh, text): """Search for TEXT in mods on CurseForge.""" if not text: raise SystemExit('No text to search for!') mc = Game(**MINECRAFT) text = ' '.join(text) refresh = refresh or not mc.have_fresh_data() if refresh: click.echo('Refreshing search data, please wait…', err=True) mc.refresh_data() mod_fmt = '{0.name}: {0.summary}' for mod in Mod.search(mc.database.session(), text): click.echo(mod_fmt.format(mod)) # If run as a package, run whole cli cli()
Raise error when there is no term to search for
Raise error when there is no term to search for
Python
agpl-3.0
khardix/mccurse
<INSERT> if not text: raise SystemExit('No text to search for!') <INSERT_END> <|endoftext|> """Package command line interface.""" import click from .curse import Game, Mod # Static data MINECRAFT = {'id': 432, 'name': 'Minecraft'} @click.group() def cli(): """Minecraft Curse CLI client.""" @cli.command() @click.option( '--refresh', is_flag=True, default=False, help='Force refreshing of search data.' ) @click.argument('text', nargs=-1, type=str) def search(refresh, text): """Search for TEXT in mods on CurseForge.""" if not text: raise SystemExit('No text to search for!') mc = Game(**MINECRAFT) text = ' '.join(text) refresh = refresh or not mc.have_fresh_data() if refresh: click.echo('Refreshing search data, please wait…', err=True) mc.refresh_data() mod_fmt = '{0.name}: {0.summary}' for mod in Mod.search(mc.database.session(), text): click.echo(mod_fmt.format(mod)) # If run as a package, run whole cli cli()
Raise error when there is no term to search for """Package command line interface.""" import click from .curse import Game, Mod # Static data MINECRAFT = {'id': 432, 'name': 'Minecraft'} @click.group() def cli(): """Minecraft Curse CLI client.""" @cli.command() @click.option( '--refresh', is_flag=True, default=False, help='Force refreshing of search data.' ) @click.argument('text', nargs=-1, type=str) def search(refresh, text): """Search for TEXT in mods on CurseForge.""" mc = Game(**MINECRAFT) text = ' '.join(text) refresh = refresh or not mc.have_fresh_data() if refresh: click.echo('Refreshing search data, please wait…', err=True) mc.refresh_data() mod_fmt = '{0.name}: {0.summary}' for mod in Mod.search(mc.database.session(), text): click.echo(mod_fmt.format(mod)) # If run as a package, run whole cli cli()
abac33bc2c8713f5187529e13557ea6b58472079
Problems/shapeAreaCF.py
Problems/shapeAreaCF.py
def shapeArea(n): if n < 1 or n > 10**4: raise ValueError if n == 1: return 1 else: innerArea = shapeArea(n - 1) return innerArea + (n - 1) * 4 def main(): tests = [-1, 10**5, 1, 2, 3, 4] results = [False, False, 1, 5, 13, 25] for i, t in enumerate(tests): try: r = shapeArea(t) if r == results[i]: print("PASSED: shapeArea({}) returned {}".format(t, r)) else: print("FAILED: shapeArea({}) returned\ {}, vs {}".format(t, r, results[i])) except ValueError: print("PASSED ValueError test") if __name__ == '__main__': main()
Add code fight shape area solution
Add code fight shape area solution
Python
mit
HKuz/Test_Code
<REPLACE_OLD> <REPLACE_NEW> def shapeArea(n): if n < 1 or n > 10**4: raise ValueError if n == 1: return 1 else: innerArea = shapeArea(n - 1) return innerArea + (n - 1) * 4 def main(): tests = [-1, 10**5, 1, 2, 3, 4] results = [False, False, 1, 5, 13, 25] for i, t in enumerate(tests): try: r = shapeArea(t) if r == results[i]: print("PASSED: shapeArea({}) returned {}".format(t, r)) else: print("FAILED: shapeArea({}) returned\ {}, vs {}".format(t, r, results[i])) except ValueError: print("PASSED ValueError test") if __name__ == '__main__': main() <REPLACE_END> <|endoftext|> def shapeArea(n): if n < 1 or n > 10**4: raise ValueError if n == 1: return 1 else: innerArea = shapeArea(n - 1) return innerArea + (n - 1) * 4 def main(): tests = [-1, 10**5, 1, 2, 3, 4] results = [False, False, 1, 5, 13, 25] for i, t in enumerate(tests): try: r = shapeArea(t) if r == results[i]: print("PASSED: shapeArea({}) returned {}".format(t, r)) else: print("FAILED: shapeArea({}) returned\ {}, vs {}".format(t, r, results[i])) except ValueError: print("PASSED ValueError test") if __name__ == '__main__': main()
Add code fight shape area solution
69fe4ba3cc0338b4cd962e0571b9ae1d54e139ee
website/addons/base/serializer.py
website/addons/base/serializer.py
import abc from website.util import web_url_for class AddonSerializer(object): __metaclass__ = abc.ABCMeta def __init__(self, addon_node_settings, user): self.addon_node_settings = addon_node_settings self.user = user @abc.abstractproperty def serialized_urls(self): pass @abc.abstractproperty def has_valid_credentials(self): pass @abc.abstractproperty def node_has_auth(self): pass @abc.abstractproperty def user_has_auth(self): pass @abc.abstractproperty def user_is_owner(self): pass @abc.abstractproperty def credentials_owner(self): pass @property def serialized_settings(self): node_has_auth = self.node_has_auth result = { 'nodeHasAuth': node_has_auth, 'userHasAuth': self.user_has_auth, 'userIsOwner': self.user_is_owner, 'validCredentials': self.has_valid_credentials, 'urls': self.serialized_urls, } if node_has_auth: owner = self.credentials_owner if owner: result['urls']['owner'] = web_url_for('profile_view_id', uid=owner._primary_key) result['ownerName'] = owner.fullname return result class StorageAddonSerializer(AddonSerializer): __metaclass__ = abc.ABCMeta @abc.abstractproperty def serialized_folder(self): pass @property def serialized_settings(self): result = super(StorageAddonSerializer, self).serialized_settings result['folder'] = self.serialized_folder return result class CitationsAddonSerializer(AddonSerializer): __metaclass__ = abc.ABCMeta
Add base class for serializing addons.
Add base class for serializing addons.
Python
apache-2.0
SSJohns/osf.io,chrisseto/osf.io,aaxelb/osf.io,Nesiehr/osf.io,petermalcolm/osf.io,kwierman/osf.io,MerlinZhang/osf.io,ZobairAlijan/osf.io,SSJohns/osf.io,caseyrygt/osf.io,DanielSBrown/osf.io,baylee-d/osf.io,jmcarp/osf.io,ticklemepierce/osf.io,CenterForOpenScience/osf.io,njantrania/osf.io,asanfilippo7/osf.io,HarryRybacki/osf.io,jeffreyliu3230/osf.io,HarryRybacki/osf.io,cldershem/osf.io,icereval/osf.io,abought/osf.io,lamdnhan/osf.io,KAsante95/osf.io,danielneis/osf.io,mluke93/osf.io,zkraime/osf.io,bdyetton/prettychart,Johnetordoff/osf.io,monikagrabowska/osf.io,barbour-em/osf.io,zamattiac/osf.io,brandonPurvis/osf.io,monikagrabowska/osf.io,DanielSBrown/osf.io,felliott/osf.io,samchrisinger/osf.io,rdhyee/osf.io,jnayak1/osf.io,cosenal/osf.io,CenterForOpenScience/osf.io,samchrisinger/osf.io,chrisseto/osf.io,MerlinZhang/osf.io,zamattiac/osf.io,RomanZWang/osf.io,lyndsysimon/osf.io,njantrania/osf.io,mluke93/osf.io,felliott/osf.io,mluo613/osf.io,kushG/osf.io,hmoco/osf.io,sbt9uc/osf.io,mfraezz/osf.io,kushG/osf.io,zamattiac/osf.io,reinaH/osf.io,bdyetton/prettychart,ZobairAlijan/osf.io,HalcyonChimera/osf.io,acshi/osf.io,emetsger/osf.io,saradbowman/osf.io,abought/osf.io,chennan47/osf.io,GageGaskins/osf.io,wearpants/osf.io,KAsante95/osf.io,kch8qx/osf.io,cwisecarver/osf.io,monikagrabowska/osf.io,emetsger/osf.io,kch8qx/osf.io,hmoco/osf.io,reinaH/osf.io,cldershem/osf.io,RomanZWang/osf.io,HalcyonChimera/osf.io,Johnetordoff/osf.io,ticklemepierce/osf.io,dplorimer/osf,mluo613/osf.io,GageGaskins/osf.io,cslzchen/osf.io,lamdnhan/osf.io,GageGaskins/osf.io,billyhunt/osf.io,GaryKriebel/osf.io,GaryKriebel/osf.io,samanehsan/osf.io,wearpants/osf.io,petermalcolm/osf.io,adlius/osf.io,asanfilippo7/osf.io,KAsante95/osf.io,HarryRybacki/osf.io,cosenal/osf.io,acshi/osf.io,felliott/osf.io,caseyrollins/osf.io,billyhunt/osf.io,TomBaxter/osf.io,rdhyee/osf.io,samchrisinger/osf.io,zkraime/osf.io,caneruguz/osf.io,KAsante95/osf.io,sloria/osf.io,lyndsysimon/osf.io,lyndsysimon/osf.io,petermalcolm/osf.io,samanehsan/osf.io,zachjanicki/osf.io,caseyrollins/osf.io,himanshuo/osf.io,barbour-em/osf.io,fabianvf/osf.io,chennan47/osf.io,cslzchen/osf.io,chennan47/osf.io,TomHeatwole/osf.io,brianjgeiger/osf.io,alexschiller/osf.io,acshi/osf.io,caseyrollins/osf.io,DanielSBrown/osf.io,adlius/osf.io,njantrania/osf.io,fabianvf/osf.io,mluke93/osf.io,sloria/osf.io,caneruguz/osf.io,petermalcolm/osf.io,GageGaskins/osf.io,adlius/osf.io,barbour-em/osf.io,amyshi188/osf.io,binoculars/osf.io,Nesiehr/osf.io,cldershem/osf.io,mfraezz/osf.io,cosenal/osf.io,jolene-esposito/osf.io,caseyrygt/osf.io,wearpants/osf.io,doublebits/osf.io,bdyetton/prettychart,RomanZWang/osf.io,sbt9uc/osf.io,MerlinZhang/osf.io,caseyrygt/osf.io,CenterForOpenScience/osf.io,pattisdr/osf.io,hmoco/osf.io,ticklemepierce/osf.io,haoyuchen1992/osf.io,acshi/osf.io,baylee-d/osf.io,brianjgeiger/osf.io,samanehsan/osf.io,alexschiller/osf.io,doublebits/osf.io,doublebits/osf.io,doublebits/osf.io,caseyrygt/osf.io,mfraezz/osf.io,revanthkolli/osf.io,cwisecarver/osf.io,emetsger/osf.io,jinluyuan/osf.io,jmcarp/osf.io,reinaH/osf.io,rdhyee/osf.io,jolene-esposito/osf.io,saradbowman/osf.io,barbour-em/osf.io,lamdnhan/osf.io,GaryKriebel/osf.io,GaryKriebel/osf.io,KAsante95/osf.io,amyshi188/osf.io,lyndsysimon/osf.io,binoculars/osf.io,caneruguz/osf.io,brandonPurvis/osf.io,cwisecarver/osf.io,ticklemepierce/osf.io,ckc6cz/osf.io,kushG/osf.io,jmcarp/osf.io,reinaH/osf.io,haoyuchen1992/osf.io,jnayak1/osf.io,TomBaxter/osf.io,mluo613/osf.io,emetsger/osf.io,leb2dg/osf.io,alexschiller/osf.io,kch8qx/osf.io,bdyetton/prettychart,jolene-esposito/osf.io,RomanZWang/osf.io,kwierman/osf.io,jeffreyliu3230/osf.io,mattclark/osf.io,cosenal/osf.io,wearpants/osf.io,jeffreyliu3230/osf.io,billyhunt/osf.io,asanfilippo7/osf.io,leb2dg/osf.io,ckc6cz/osf.io,revanthkolli/osf.io,ZobairAlijan/osf.io,cwisecarver/osf.io,chrisseto/osf.io,fabianvf/osf.io,zkraime/osf.io,Johnetordoff/osf.io,erinspace/osf.io,dplorimer/osf,ckc6cz/osf.io,revanthkolli/osf.io,brandonPurvis/osf.io,arpitar/osf.io,mattclark/osf.io,pattisdr/osf.io,aaxelb/osf.io,Ghalko/osf.io,danielneis/osf.io,SSJohns/osf.io,jnayak1/osf.io,lamdnhan/osf.io,TomBaxter/osf.io,caneruguz/osf.io,billyhunt/osf.io,felliott/osf.io,Ghalko/osf.io,arpitar/osf.io,GageGaskins/osf.io,arpitar/osf.io,pattisdr/osf.io,himanshuo/osf.io,zachjanicki/osf.io,revanthkolli/osf.io,abought/osf.io,mluke93/osf.io,himanshuo/osf.io,laurenrevere/osf.io,TomHeatwole/osf.io,leb2dg/osf.io,mluo613/osf.io,CenterForOpenScience/osf.io,adlius/osf.io,billyhunt/osf.io,himanshuo/osf.io,abought/osf.io,haoyuchen1992/osf.io,TomHeatwole/osf.io,icereval/osf.io,Ghalko/osf.io,mattclark/osf.io,SSJohns/osf.io,crcresearch/osf.io,alexschiller/osf.io,kushG/osf.io,alexschiller/osf.io,zkraime/osf.io,kwierman/osf.io,jinluyuan/osf.io,laurenrevere/osf.io,MerlinZhang/osf.io,acshi/osf.io,leb2dg/osf.io,brandonPurvis/osf.io,jeffreyliu3230/osf.io,hmoco/osf.io,amyshi188/osf.io,aaxelb/osf.io,HalcyonChimera/osf.io,brianjgeiger/osf.io,erinspace/osf.io,jmcarp/osf.io,jolene-esposito/osf.io,jinluyuan/osf.io,asanfilippo7/osf.io,Johnetordoff/osf.io,brandonPurvis/osf.io,cldershem/osf.io,samchrisinger/osf.io,rdhyee/osf.io,monikagrabowska/osf.io,HarryRybacki/osf.io,zachjanicki/osf.io,TomHeatwole/osf.io,danielneis/osf.io,binoculars/osf.io,monikagrabowska/osf.io,ckc6cz/osf.io,jnayak1/osf.io,crcresearch/osf.io,jinluyuan/osf.io,crcresearch/osf.io,HalcyonChimera/osf.io,kch8qx/osf.io,njantrania/osf.io,icereval/osf.io,kwierman/osf.io,RomanZWang/osf.io,zachjanicki/osf.io,sbt9uc/osf.io,fabianvf/osf.io,Nesiehr/osf.io,samanehsan/osf.io,kch8qx/osf.io,baylee-d/osf.io,cslzchen/osf.io,cslzchen/osf.io,doublebits/osf.io,danielneis/osf.io,sloria/osf.io,mluo613/osf.io,Nesiehr/osf.io,erinspace/osf.io,dplorimer/osf,arpitar/osf.io,amyshi188/osf.io,haoyuchen1992/osf.io,sbt9uc/osf.io,brianjgeiger/osf.io,Ghalko/osf.io,zamattiac/osf.io,DanielSBrown/osf.io,mfraezz/osf.io,laurenrevere/osf.io,aaxelb/osf.io,ZobairAlijan/osf.io,chrisseto/osf.io,dplorimer/osf
<REPLACE_OLD> <REPLACE_NEW> import abc from website.util import web_url_for class AddonSerializer(object): __metaclass__ = abc.ABCMeta def __init__(self, addon_node_settings, user): self.addon_node_settings = addon_node_settings self.user = user @abc.abstractproperty def serialized_urls(self): pass @abc.abstractproperty def has_valid_credentials(self): pass @abc.abstractproperty def node_has_auth(self): pass @abc.abstractproperty def user_has_auth(self): pass @abc.abstractproperty def user_is_owner(self): pass @abc.abstractproperty def credentials_owner(self): pass @property def serialized_settings(self): node_has_auth = self.node_has_auth result = { 'nodeHasAuth': node_has_auth, 'userHasAuth': self.user_has_auth, 'userIsOwner': self.user_is_owner, 'validCredentials': self.has_valid_credentials, 'urls': self.serialized_urls, } if node_has_auth: owner = self.credentials_owner if owner: result['urls']['owner'] = web_url_for('profile_view_id', uid=owner._primary_key) result['ownerName'] = owner.fullname return result class StorageAddonSerializer(AddonSerializer): __metaclass__ = abc.ABCMeta @abc.abstractproperty def serialized_folder(self): pass @property def serialized_settings(self): result = super(StorageAddonSerializer, self).serialized_settings result['folder'] = self.serialized_folder return result class CitationsAddonSerializer(AddonSerializer): __metaclass__ = abc.ABCMeta <REPLACE_END> <|endoftext|> import abc from website.util import web_url_for class AddonSerializer(object): __metaclass__ = abc.ABCMeta def __init__(self, addon_node_settings, user): self.addon_node_settings = addon_node_settings self.user = user @abc.abstractproperty def serialized_urls(self): pass @abc.abstractproperty def has_valid_credentials(self): pass @abc.abstractproperty def node_has_auth(self): pass @abc.abstractproperty def user_has_auth(self): pass @abc.abstractproperty def user_is_owner(self): pass @abc.abstractproperty def credentials_owner(self): pass @property def serialized_settings(self): node_has_auth = self.node_has_auth result = { 'nodeHasAuth': node_has_auth, 'userHasAuth': self.user_has_auth, 'userIsOwner': self.user_is_owner, 'validCredentials': self.has_valid_credentials, 'urls': self.serialized_urls, } if node_has_auth: owner = self.credentials_owner if owner: result['urls']['owner'] = web_url_for('profile_view_id', uid=owner._primary_key) result['ownerName'] = owner.fullname return result class StorageAddonSerializer(AddonSerializer): __metaclass__ = abc.ABCMeta @abc.abstractproperty def serialized_folder(self): pass @property def serialized_settings(self): result = super(StorageAddonSerializer, self).serialized_settings result['folder'] = self.serialized_folder return result class CitationsAddonSerializer(AddonSerializer): __metaclass__ = abc.ABCMeta
Add base class for serializing addons.
0907bef1a0f92f9f7fef628afba75e1d02db1d70
thermof/__init__.py
thermof/__init__.py
# Date: August 2017 # Author: Kutay B. Sezginel """ Thermal conductivity calculations of porous crystals using Lammps """ from .simulation import Simulation from .trajectory import Trajectory from .mof import MOF
# Date: August 2017 # Author: Kutay B. Sezginel """ Thermal conductivity calculations of porous crystals using Lammps """ from .simulation import Simulation from .trajectory import Trajectory from .parameters import Parameters from .mof import MOF
Add parameter import to main module
Add parameter import to main module
Python
mit
kbsezginel/tee_mof,kbsezginel/tee_mof
<INSERT> .parameters import Parameters from <INSERT_END> <|endoftext|> # Date: August 2017 # Author: Kutay B. Sezginel """ Thermal conductivity calculations of porous crystals using Lammps """ from .simulation import Simulation from .trajectory import Trajectory from .parameters import Parameters from .mof import MOF
Add parameter import to main module # Date: August 2017 # Author: Kutay B. Sezginel """ Thermal conductivity calculations of porous crystals using Lammps """ from .simulation import Simulation from .trajectory import Trajectory from .mof import MOF
612ae3adb2636fb3a926cd29d87b4b388ca48476
scripts/delete_old_user_login_events.py
scripts/delete_old_user_login_events.py
#!/usr/bin/env python """Delete login user events older than a given number of days. :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from datetime import datetime, timedelta import click from byceps.database import db from byceps.services.user.models.event import UserEvent as DbUserEvent from byceps.util.system import get_config_filename_from_env_or_exit from _util import app_context @click.command() @click.option( '--dry-run', is_flag=True, help='count but do not delete affected records', ) @click.argument('minimum_age_in_days', type=int) def execute(dry_run, minimum_age_in_days): latest_occurred_at = get_latest_occurred_at(minimum_age_in_days) click.secho( f'Deleting all user login events older than {minimum_age_in_days} days ' f'(i.e. before {latest_occurred_at:%Y-%m-%d %H:%M:%S}) ...' ) num_deleted = delete_user_login_events_before(latest_occurred_at, dry_run) click.secho(f'{num_deleted} user login events deleted.') if dry_run: click.secho( f'This was a dry run; no records have been deleted.', fg='yellow' ) def get_latest_occurred_at(minimum_age_in_days: int) -> datetime: now = datetime.utcnow() return now - timedelta(days=minimum_age_in_days) def delete_user_login_events_before( latest_occurred_at: datetime, dry_run: bool ) -> int: num_deleted = DbUserEvent.query \ .filter_by(event_type='user-logged-in') \ .filter(DbUserEvent.occurred_at <= latest_occurred_at) \ .delete() if not dry_run: db.session.commit() return num_deleted if __name__ == '__main__': config_filename = get_config_filename_from_env_or_exit() with app_context(config_filename): execute()
Add script to delete user login events older than a number of days
Add script to delete user login events older than a number of days
Python
bsd-3-clause
homeworkprod/byceps,homeworkprod/byceps,homeworkprod/byceps
<REPLACE_OLD> <REPLACE_NEW> #!/usr/bin/env python """Delete login user events older than a given number of days. :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from datetime import datetime, timedelta import click from byceps.database import db from byceps.services.user.models.event import UserEvent as DbUserEvent from byceps.util.system import get_config_filename_from_env_or_exit from _util import app_context @click.command() @click.option( '--dry-run', is_flag=True, help='count but do not delete affected records', ) @click.argument('minimum_age_in_days', type=int) def execute(dry_run, minimum_age_in_days): latest_occurred_at = get_latest_occurred_at(minimum_age_in_days) click.secho( f'Deleting all user login events older than {minimum_age_in_days} days ' f'(i.e. before {latest_occurred_at:%Y-%m-%d %H:%M:%S}) ...' ) num_deleted = delete_user_login_events_before(latest_occurred_at, dry_run) click.secho(f'{num_deleted} user login events deleted.') if dry_run: click.secho( f'This was a dry run; no records have been deleted.', fg='yellow' ) def get_latest_occurred_at(minimum_age_in_days: int) -> datetime: now = datetime.utcnow() return now - timedelta(days=minimum_age_in_days) def delete_user_login_events_before( latest_occurred_at: datetime, dry_run: bool ) -> int: num_deleted = DbUserEvent.query \ .filter_by(event_type='user-logged-in') \ .filter(DbUserEvent.occurred_at <= latest_occurred_at) \ .delete() if not dry_run: db.session.commit() return num_deleted if __name__ == '__main__': config_filename = get_config_filename_from_env_or_exit() with app_context(config_filename): execute() <REPLACE_END> <|endoftext|> #!/usr/bin/env python """Delete login user events older than a given number of days. :Copyright: 2006-2020 Jochen Kupperschmidt :License: Modified BSD, see LICENSE for details. """ from datetime import datetime, timedelta import click from byceps.database import db from byceps.services.user.models.event import UserEvent as DbUserEvent from byceps.util.system import get_config_filename_from_env_or_exit from _util import app_context @click.command() @click.option( '--dry-run', is_flag=True, help='count but do not delete affected records', ) @click.argument('minimum_age_in_days', type=int) def execute(dry_run, minimum_age_in_days): latest_occurred_at = get_latest_occurred_at(minimum_age_in_days) click.secho( f'Deleting all user login events older than {minimum_age_in_days} days ' f'(i.e. before {latest_occurred_at:%Y-%m-%d %H:%M:%S}) ...' ) num_deleted = delete_user_login_events_before(latest_occurred_at, dry_run) click.secho(f'{num_deleted} user login events deleted.') if dry_run: click.secho( f'This was a dry run; no records have been deleted.', fg='yellow' ) def get_latest_occurred_at(minimum_age_in_days: int) -> datetime: now = datetime.utcnow() return now - timedelta(days=minimum_age_in_days) def delete_user_login_events_before( latest_occurred_at: datetime, dry_run: bool ) -> int: num_deleted = DbUserEvent.query \ .filter_by(event_type='user-logged-in') \ .filter(DbUserEvent.occurred_at <= latest_occurred_at) \ .delete() if not dry_run: db.session.commit() return num_deleted if __name__ == '__main__': config_filename = get_config_filename_from_env_or_exit() with app_context(config_filename): execute()
Add script to delete user login events older than a number of days
0ebfe4a0777850aa851c7d7bc0f642d692a1515a
2016/qualification_round/revenge_of_the_pancakes.py
2016/qualification_round/revenge_of_the_pancakes.py
#!/usr/bin/env python # Google Code Jam # Google Code Jam 2016 # Qualification Round 2016 # Problem B. Revenge of the Pancakes # Solved all test sets from __future__ import print_function def calc_min_flip_step(s): grouped_height = 1 + s.count('-+') + s.count('+-') if s.endswith('-'): return grouped_height else: return grouped_height - 1 if __name__ == '__main__': import os samples = [ '-', '-+', '+-', '+++', '--+-' ] for sample in samples: print(calc_min_flip_step(sample)) data_files = ['B-small-practice', 'B-large-practice'] for f in data_files: with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '{0}.in'.format(f)), 'r') as input_file: lines = input_file.readlines() input_count = int(lines[0].replace('\n' ,'')) inputs = [line.replace('\n', '') for line in lines[1:]] i = 1 with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '{0}.out'.format(f)), 'w') as output_file: for in_ in inputs: output_file.write('Case #{0}: {1}\n'.format(i, calc_min_flip_step(in_))) i += 1
Add revenge of the pancakes
Add revenge of the pancakes
Python
apache-2.0
laichunpongben/CodeJam
<REPLACE_OLD> <REPLACE_NEW> #!/usr/bin/env python # Google Code Jam # Google Code Jam 2016 # Qualification Round 2016 # Problem B. Revenge of the Pancakes # Solved all test sets from __future__ import print_function def calc_min_flip_step(s): grouped_height = 1 + s.count('-+') + s.count('+-') if s.endswith('-'): return grouped_height else: return grouped_height - 1 if __name__ == '__main__': import os samples = [ '-', '-+', '+-', '+++', '--+-' ] for sample in samples: print(calc_min_flip_step(sample)) data_files = ['B-small-practice', 'B-large-practice'] for f in data_files: with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '{0}.in'.format(f)), 'r') as input_file: lines = input_file.readlines() input_count = int(lines[0].replace('\n' ,'')) inputs = [line.replace('\n', '') for line in lines[1:]] i = 1 with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '{0}.out'.format(f)), 'w') as output_file: for in_ in inputs: output_file.write('Case #{0}: {1}\n'.format(i, calc_min_flip_step(in_))) i += 1 <REPLACE_END> <|endoftext|> #!/usr/bin/env python # Google Code Jam # Google Code Jam 2016 # Qualification Round 2016 # Problem B. Revenge of the Pancakes # Solved all test sets from __future__ import print_function def calc_min_flip_step(s): grouped_height = 1 + s.count('-+') + s.count('+-') if s.endswith('-'): return grouped_height else: return grouped_height - 1 if __name__ == '__main__': import os samples = [ '-', '-+', '+-', '+++', '--+-' ] for sample in samples: print(calc_min_flip_step(sample)) data_files = ['B-small-practice', 'B-large-practice'] for f in data_files: with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '{0}.in'.format(f)), 'r') as input_file: lines = input_file.readlines() input_count = int(lines[0].replace('\n' ,'')) inputs = [line.replace('\n', '') for line in lines[1:]] i = 1 with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), '{0}.out'.format(f)), 'w') as output_file: for in_ in inputs: output_file.write('Case #{0}: {1}\n'.format(i, calc_min_flip_step(in_))) i += 1
Add revenge of the pancakes
49c99399c5b0e741e356cf320e338d019e06567d
taca/utils/config.py
taca/utils/config.py
"""Load and parse configuration file.""" import yaml from io import open CONFIG = {} def load_config(config_file): """Loads a configuration file.""" config = {} if type(config_file) is file: config.update(yaml.load(config_file, Loader=yaml.FullLoader) or {}) return config else: try: with open(config_file, 'r') as f: content = yaml.load(f, Loader=yaml.FullLoader) config.update(content) return content except IOError as e: e.message = 'Could not open configuration file "{}".'.format(config_file) raise e def load_yaml_config(config_file): """Load YAML config file :param str config_file: The path to the configuration file. :returns: A dict of the parsed config file. :rtype: dict :raises IOError: If the config file cannot be opened. """ if type(config_file) is file: CONFIG.update(yaml.load(config_file, Loader=yaml.FullLoader) or {}) return CONFIG else: try: with open(config_file, 'r') as f: content = yaml.load(f, Loader=yaml.FullLoader) CONFIG.update(content) return content except IOError as e: e.message = 'Could not open configuration file "{}".'.format(config_file) raise e
"""Load and parse configuration file.""" import yaml from io import open CONFIG = {} def load_config(config_file): """Loads a configuration file.""" config = {} try: with open(config_file, 'r') as f: content = yaml.load(f, Loader=yaml.FullLoader) config.update(content) return content except IOError as e: e.message = 'Could not open configuration file "{}".'.format(config_file) raise e def load_yaml_config(config_file): """Load YAML config file :param str config_file: The path to the configuration file. :returns: A dict of the parsed config file. :rtype: dict :raises IOError: If the config file cannot be opened. """ try: with open(config_file, 'r') as f: content = yaml.load(f, Loader=yaml.FullLoader) CONFIG.update(content) return content except IOError as e: e.message = 'Could not open configuration file "{}".'.format(config_file) raise e
Remove unused file type check
Remove unused file type check
Python
mit
SciLifeLab/TACA,SciLifeLab/TACA,SciLifeLab/TACA
<DELETE> if type(config_file) is file: config.update(yaml.load(config_file, Loader=yaml.FullLoader) or {}) return config else: <DELETE_END> <DELETE> <DELETE_END> <DELETE> <DELETE_END> <DELETE> <DELETE_END> <DELETE> <DELETE_END> <DELETE> <DELETE_END> <DELETE> <DELETE_END> <DELETE> <DELETE_END> <DELETE> if type(config_file) is file: CONFIG.update(yaml.load(config_file, Loader=yaml.FullLoader) or {}) return CONFIG else: <DELETE_END> <DELETE> <DELETE_END> <DELETE> <DELETE_END> <DELETE> <DELETE_END> <DELETE> <DELETE_END> <DELETE> <DELETE_END> <DELETE> <DELETE_END> <DELETE> <DELETE_END> <|endoftext|> """Load and parse configuration file.""" import yaml from io import open CONFIG = {} def load_config(config_file): """Loads a configuration file.""" config = {} try: with open(config_file, 'r') as f: content = yaml.load(f, Loader=yaml.FullLoader) config.update(content) return content except IOError as e: e.message = 'Could not open configuration file "{}".'.format(config_file) raise e def load_yaml_config(config_file): """Load YAML config file :param str config_file: The path to the configuration file. :returns: A dict of the parsed config file. :rtype: dict :raises IOError: If the config file cannot be opened. """ try: with open(config_file, 'r') as f: content = yaml.load(f, Loader=yaml.FullLoader) CONFIG.update(content) return content except IOError as e: e.message = 'Could not open configuration file "{}".'.format(config_file) raise e
Remove unused file type check """Load and parse configuration file.""" import yaml from io import open CONFIG = {} def load_config(config_file): """Loads a configuration file.""" config = {} if type(config_file) is file: config.update(yaml.load(config_file, Loader=yaml.FullLoader) or {}) return config else: try: with open(config_file, 'r') as f: content = yaml.load(f, Loader=yaml.FullLoader) config.update(content) return content except IOError as e: e.message = 'Could not open configuration file "{}".'.format(config_file) raise e def load_yaml_config(config_file): """Load YAML config file :param str config_file: The path to the configuration file. :returns: A dict of the parsed config file. :rtype: dict :raises IOError: If the config file cannot be opened. """ if type(config_file) is file: CONFIG.update(yaml.load(config_file, Loader=yaml.FullLoader) or {}) return CONFIG else: try: with open(config_file, 'r') as f: content = yaml.load(f, Loader=yaml.FullLoader) CONFIG.update(content) return content except IOError as e: e.message = 'Could not open configuration file "{}".'.format(config_file) raise e
5f7a694c72821110091d6aff5ee854681137bdcc
tests/testuser.py
tests/testuser.py
import unittest from steam import user class ProfileTestCase(unittest.TestCase): VALID_ID64 = 76561198014028523 INVALID_ID64 = 123 # This is weird but there should be no reason that it's invalid # So Valve, if you see this, be gewd guys and make 33 bit (condensed) # IDs work properly. Or at least put a more appropriate error. Currently # It's impossible to distinguish between this and a bad ID (all are code 8) WEIRD_ID64 = (VALID_ID64 >> 33 << 33) ^ VALID_ID64 class VanityTestCase(unittest.TestCase): VALID_VANITY = "stragglerastic" INVALID_VANITY = "*F*SDF9" def test_invalid_vanity(self): vanity = user.vanity_url(self.INVALID_VANITY) self.assertRaises(user.VanityError, lambda: vanity.id64) def test_pathed_vanity(self): vanity = user.vanity_url('/' + self.VALID_VANITY + '/') self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64) def test_valid_vanity(self): vanity = user.vanity_url(self.VALID_VANITY) self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64) class ProfileIdTestCase(ProfileTestCase): def test_invalid_id(self): profile = user.profile(self.INVALID_ID64) self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64) def test_pathed_id(self): profile = user.profile('/' + str(self.VALID_ID64) + '/') self.assertEqual(profile.id64, self.VALID_ID64) def test_valid_id(self): profile = user.profile(self.VALID_ID64) self.assertEqual(profile.id64, self.VALID_ID64) def test_weird_id(self): profile = user.profile(self.WEIRD_ID64) self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64)
Add initial steam.user test fixtures
Add initial steam.user test fixtures
Python
isc
miedzinski/steamodd,Lagg/steamodd
<REPLACE_OLD> <REPLACE_NEW> import unittest from steam import user class ProfileTestCase(unittest.TestCase): VALID_ID64 = 76561198014028523 INVALID_ID64 = 123 # This is weird but there should be no reason that it's invalid # So Valve, if you see this, be gewd guys and make 33 bit (condensed) # IDs work properly. Or at least put a more appropriate error. Currently # It's impossible to distinguish between this and a bad ID (all are code 8) WEIRD_ID64 = (VALID_ID64 >> 33 << 33) ^ VALID_ID64 class VanityTestCase(unittest.TestCase): VALID_VANITY = "stragglerastic" INVALID_VANITY = "*F*SDF9" def test_invalid_vanity(self): vanity = user.vanity_url(self.INVALID_VANITY) self.assertRaises(user.VanityError, lambda: vanity.id64) def test_pathed_vanity(self): vanity = user.vanity_url('/' + self.VALID_VANITY + '/') self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64) def test_valid_vanity(self): vanity = user.vanity_url(self.VALID_VANITY) self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64) class ProfileIdTestCase(ProfileTestCase): def test_invalid_id(self): profile = user.profile(self.INVALID_ID64) self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64) def test_pathed_id(self): profile = user.profile('/' + str(self.VALID_ID64) + '/') self.assertEqual(profile.id64, self.VALID_ID64) def test_valid_id(self): profile = user.profile(self.VALID_ID64) self.assertEqual(profile.id64, self.VALID_ID64) def test_weird_id(self): profile = user.profile(self.WEIRD_ID64) self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64) <REPLACE_END> <|endoftext|> import unittest from steam import user class ProfileTestCase(unittest.TestCase): VALID_ID64 = 76561198014028523 INVALID_ID64 = 123 # This is weird but there should be no reason that it's invalid # So Valve, if you see this, be gewd guys and make 33 bit (condensed) # IDs work properly. Or at least put a more appropriate error. Currently # It's impossible to distinguish between this and a bad ID (all are code 8) WEIRD_ID64 = (VALID_ID64 >> 33 << 33) ^ VALID_ID64 class VanityTestCase(unittest.TestCase): VALID_VANITY = "stragglerastic" INVALID_VANITY = "*F*SDF9" def test_invalid_vanity(self): vanity = user.vanity_url(self.INVALID_VANITY) self.assertRaises(user.VanityError, lambda: vanity.id64) def test_pathed_vanity(self): vanity = user.vanity_url('/' + self.VALID_VANITY + '/') self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64) def test_valid_vanity(self): vanity = user.vanity_url(self.VALID_VANITY) self.assertEqual(vanity.id64, ProfileTestCase.VALID_ID64) class ProfileIdTestCase(ProfileTestCase): def test_invalid_id(self): profile = user.profile(self.INVALID_ID64) self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64) def test_pathed_id(self): profile = user.profile('/' + str(self.VALID_ID64) + '/') self.assertEqual(profile.id64, self.VALID_ID64) def test_valid_id(self): profile = user.profile(self.VALID_ID64) self.assertEqual(profile.id64, self.VALID_ID64) def test_weird_id(self): profile = user.profile(self.WEIRD_ID64) self.assertRaises(user.ProfileNotFoundError, lambda: profile.id64)
Add initial steam.user test fixtures
00cea9f8e51f53f338e19adf0165031d2f9cad77
c2corg_ui/templates/utils/format.py
c2corg_ui/templates/utils/format.py
import bbcode import markdown import html from c2corg_ui.format.wikilinks import C2CWikiLinkExtension _markdown_parser = None _bbcode_parser = None def _get_markdown_parser(): global _markdown_parser if not _markdown_parser: extensions = [ C2CWikiLinkExtension(), ] _markdown_parser = markdown.Markdown(output_format='xhtml5', extensions=extensions) return _markdown_parser def _get_bbcode_parser(): global _bbcode_parser if not _bbcode_parser: _bbcode_parser = bbcode.Parser(escape_html=False, newline='\n') return _bbcode_parser def parse_code(text, md=True, bb=True): if md: text = _get_markdown_parser().convert(text) if bb: text = _get_bbcode_parser().format(text) return text def sanitize(text): return html.escape(text)
import bbcode import markdown import html from c2corg_ui.format.wikilinks import C2CWikiLinkExtension from markdown.extensions.nl2br import Nl2BrExtension from markdown.extensions.toc import TocExtension _markdown_parser = None _bbcode_parser = None def _get_markdown_parser(): global _markdown_parser if not _markdown_parser: extensions = [ C2CWikiLinkExtension(), Nl2BrExtension(), TocExtension(marker='[toc]', baselevel=2), ] _markdown_parser = markdown.Markdown(output_format='xhtml5', extensions=extensions) return _markdown_parser def _get_bbcode_parser(): global _bbcode_parser if not _bbcode_parser: _bbcode_parser = bbcode.Parser(escape_html=False, newline='\n') return _bbcode_parser def parse_code(text, md=True, bb=True): if md: text = _get_markdown_parser().convert(text) if bb: text = _get_bbcode_parser().format(text) return text def sanitize(text): return html.escape(text)
Enable markdown extensions for TOC and linebreaks
Enable markdown extensions for TOC and linebreaks
Python
agpl-3.0
Courgetteandratatouille/v6_ui,Courgetteandratatouille/v6_ui,olaurendeau/v6_ui,c2corg/v6_ui,c2corg/v6_ui,c2corg/v6_ui,Courgetteandratatouille/v6_ui,olaurendeau/v6_ui,olaurendeau/v6_ui,c2corg/v6_ui,Courgetteandratatouille/v6_ui,olaurendeau/v6_ui
<REPLACE_OLD> C2CWikiLinkExtension _markdown_parser <REPLACE_NEW> C2CWikiLinkExtension from markdown.extensions.nl2br import Nl2BrExtension from markdown.extensions.toc import TocExtension _markdown_parser <REPLACE_END> <INSERT> Nl2BrExtension(), TocExtension(marker='[toc]', baselevel=2), <INSERT_END> <|endoftext|> import bbcode import markdown import html from c2corg_ui.format.wikilinks import C2CWikiLinkExtension from markdown.extensions.nl2br import Nl2BrExtension from markdown.extensions.toc import TocExtension _markdown_parser = None _bbcode_parser = None def _get_markdown_parser(): global _markdown_parser if not _markdown_parser: extensions = [ C2CWikiLinkExtension(), Nl2BrExtension(), TocExtension(marker='[toc]', baselevel=2), ] _markdown_parser = markdown.Markdown(output_format='xhtml5', extensions=extensions) return _markdown_parser def _get_bbcode_parser(): global _bbcode_parser if not _bbcode_parser: _bbcode_parser = bbcode.Parser(escape_html=False, newline='\n') return _bbcode_parser def parse_code(text, md=True, bb=True): if md: text = _get_markdown_parser().convert(text) if bb: text = _get_bbcode_parser().format(text) return text def sanitize(text): return html.escape(text)
Enable markdown extensions for TOC and linebreaks import bbcode import markdown import html from c2corg_ui.format.wikilinks import C2CWikiLinkExtension _markdown_parser = None _bbcode_parser = None def _get_markdown_parser(): global _markdown_parser if not _markdown_parser: extensions = [ C2CWikiLinkExtension(), ] _markdown_parser = markdown.Markdown(output_format='xhtml5', extensions=extensions) return _markdown_parser def _get_bbcode_parser(): global _bbcode_parser if not _bbcode_parser: _bbcode_parser = bbcode.Parser(escape_html=False, newline='\n') return _bbcode_parser def parse_code(text, md=True, bb=True): if md: text = _get_markdown_parser().convert(text) if bb: text = _get_bbcode_parser().format(text) return text def sanitize(text): return html.escape(text)
131033fa3ab170ac2a66c1dd89074ea74702fb52
icekit/page_types/articles/migrations/0002_auto_20161012_2231.py
icekit/page_types/articles/migrations/0002_auto_20161012_2231.py
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('icekit_articles', '0001_initial'), ] operations = [ migrations.AddField( model_name='article', name='slug', field=models.SlugField(max_length=255, default='woo'), preserve_default=False, ), migrations.AddField( model_name='article', name='title', field=models.CharField(max_length=255, default='woo'), preserve_default=False, ), ]
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('icekit_articles', '0001_initial'), ] operations = [ migrations.AddField( model_name='article', name='slug', field=models.SlugField(max_length=255), preserve_default=False, ), migrations.AddField( model_name='article', name='title', field=models.CharField(max_length=255), preserve_default=False, ), ]
Remove vestigial (?) "woo" default for article slug and title fields.
Remove vestigial (?) "woo" default for article slug and title fields.
Python
mit
ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit,ic-labs/django-icekit
<REPLACE_OLD> field=models.SlugField(max_length=255, default='woo'), <REPLACE_NEW> field=models.SlugField(max_length=255), <REPLACE_END> <REPLACE_OLD> field=models.CharField(max_length=255, default='woo'), <REPLACE_NEW> field=models.CharField(max_length=255), <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('icekit_articles', '0001_initial'), ] operations = [ migrations.AddField( model_name='article', name='slug', field=models.SlugField(max_length=255), preserve_default=False, ), migrations.AddField( model_name='article', name='title', field=models.CharField(max_length=255), preserve_default=False, ), ]
Remove vestigial (?) "woo" default for article slug and title fields. # -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('icekit_articles', '0001_initial'), ] operations = [ migrations.AddField( model_name='article', name='slug', field=models.SlugField(max_length=255, default='woo'), preserve_default=False, ), migrations.AddField( model_name='article', name='title', field=models.CharField(max_length=255, default='woo'), preserve_default=False, ), ]
7946b8fe8a8cddeef675cc60b5ebb64a250ea2c4
smugmug_test.py
smugmug_test.py
import smugmug import unittest class MockNode(object): def __init__(self): self._reset_times = 0 def reset_cache(self): self._reset_times += 1 class TestChildCacheGarbageCollector(unittest.TestCase): def test_clears_child_cache(self): gc = smugmug.ChildCacheGarbageCollector(3) nodes = [MockNode(), MockNode(), MockNode(), MockNode(), MockNode()] gc.visited(nodes[0]) gc.visited(nodes[1]) gc.visited(nodes[2]) gc.visited(nodes[3]) gc.visited(nodes[4]) self.assertEqual(nodes[0]._reset_times, 1) self.assertEqual(nodes[1]._reset_times, 1) self.assertEqual(nodes[2]._reset_times, 0) self.assertEqual(nodes[3]._reset_times, 0) self.assertEqual(nodes[4]._reset_times, 0) def test_repeated_visit_are_ignored(self): gc = smugmug.ChildCacheGarbageCollector(2) nodes = [MockNode(), MockNode(), MockNode()] gc.visited(nodes[0]) gc.visited(nodes[1]) gc.visited(nodes[2]) gc.visited(nodes[2]) gc.visited(nodes[2]) self.assertEqual(nodes[0]._reset_times, 1) self.assertEqual(nodes[1]._reset_times, 0) self.assertEqual(nodes[2]._reset_times, 0)
Add a unit test for the ChildCacheGarbageColleector class.
Add a unit test for the ChildCacheGarbageColleector class.
Python
mit
graveljp/smugcli
<INSERT> import smugmug import unittest class MockNode(object): <INSERT_END> <INSERT> def __init__(self): self._reset_times = 0 def reset_cache(self): self._reset_times += 1 class TestChildCacheGarbageCollector(unittest.TestCase): def test_clears_child_cache(self): gc = smugmug.ChildCacheGarbageCollector(3) nodes = [MockNode(), MockNode(), MockNode(), MockNode(), MockNode()] gc.visited(nodes[0]) gc.visited(nodes[1]) gc.visited(nodes[2]) gc.visited(nodes[3]) gc.visited(nodes[4]) self.assertEqual(nodes[0]._reset_times, 1) self.assertEqual(nodes[1]._reset_times, 1) self.assertEqual(nodes[2]._reset_times, 0) self.assertEqual(nodes[3]._reset_times, 0) self.assertEqual(nodes[4]._reset_times, 0) def test_repeated_visit_are_ignored(self): gc = smugmug.ChildCacheGarbageCollector(2) nodes = [MockNode(), MockNode(), MockNode()] gc.visited(nodes[0]) gc.visited(nodes[1]) gc.visited(nodes[2]) gc.visited(nodes[2]) gc.visited(nodes[2]) self.assertEqual(nodes[0]._reset_times, 1) self.assertEqual(nodes[1]._reset_times, 0) self.assertEqual(nodes[2]._reset_times, 0) <INSERT_END> <|endoftext|> import smugmug import unittest class MockNode(object): def __init__(self): self._reset_times = 0 def reset_cache(self): self._reset_times += 1 class TestChildCacheGarbageCollector(unittest.TestCase): def test_clears_child_cache(self): gc = smugmug.ChildCacheGarbageCollector(3) nodes = [MockNode(), MockNode(), MockNode(), MockNode(), MockNode()] gc.visited(nodes[0]) gc.visited(nodes[1]) gc.visited(nodes[2]) gc.visited(nodes[3]) gc.visited(nodes[4]) self.assertEqual(nodes[0]._reset_times, 1) self.assertEqual(nodes[1]._reset_times, 1) self.assertEqual(nodes[2]._reset_times, 0) self.assertEqual(nodes[3]._reset_times, 0) self.assertEqual(nodes[4]._reset_times, 0) def test_repeated_visit_are_ignored(self): gc = smugmug.ChildCacheGarbageCollector(2) nodes = [MockNode(), MockNode(), MockNode()] gc.visited(nodes[0]) gc.visited(nodes[1]) gc.visited(nodes[2]) gc.visited(nodes[2]) gc.visited(nodes[2]) self.assertEqual(nodes[0]._reset_times, 1) self.assertEqual(nodes[1]._reset_times, 0) self.assertEqual(nodes[2]._reset_times, 0)
Add a unit test for the ChildCacheGarbageColleector class.
7a6e8af11ac28cf10e5ce33637bc883324dde641
game/models.py
game/models.py
from django.db import models from django.utils import timezone class Task(models.Model): EQUALS_CHECK = 'EQ' REGEX_CHECK = 'RE' CHECK_CHOICES = ( (EQUALS_CHECK, 'Equals'), (REGEX_CHECK, 'Regex'), ) title_ru = models.CharField(null=False, blank=False, max_length=256) title_en = models.CharField(null=False, blank=False, max_length=256) desc_ru = models.TextField(null=False, blank=False) desc_en = models.TextField(null=False, blank=False) writeup_ru = models.TextField(null=False, blank=False) writeup_en = models.TextField(null=False, blank=False) flag = models.CharField(max_length=1024) is_case_insensitive_check = models.BooleanField(default=False) is_trimmed_check = models.BooleanField(default=False) check = models.CharField(null=False, blank=False, max_length=2, choices=CHECK_CHOICES) created_at = models.DateTimeField(null=False, blank=True) def save(self, *args, **kwargs): if self.pk is None: self.created_at = timezone.now() return super(Task, self).save(*args, **kwargs)
from django.db import models from django.utils import timezone class Task(models.Model): EQUALS_CHECK = 'EQ' REGEX_CHECK = 'RE' CHECK_CHOICES = ( (EQUALS_CHECK, 'Equals'), (REGEX_CHECK, 'Regex'), ) title_ru = models.CharField(null=False, blank=False, max_length=256) title_en = models.CharField(null=False, blank=False, max_length=256) category = models.CharField(null=False, blank=False, max_length=256) cost = models.IntegerField(null=False, blank=False) desc_ru = models.TextField(null=False, blank=False) desc_en = models.TextField(null=False, blank=False) writeup_ru = models.TextField(null=False, blank=False) writeup_en = models.TextField(null=False, blank=False) flag = models.CharField(max_length=1024) is_case_insensitive_check = models.BooleanField(default=False) is_trimmed_check = models.BooleanField(default=False) check = models.CharField(null=False, blank=False, max_length=2, choices=CHECK_CHOICES) created_at = models.DateTimeField(null=False, blank=True) def save(self, *args, **kwargs): if self.pk is None: self.created_at = timezone.now() return super(Task, self).save(*args, **kwargs)
Add new fields to the task model
Add new fields to the task model
Python
bsd-3-clause
stefantsov/blackbox3,stefantsov/blackbox3,stefantsov/blackbox3
<INSERT> category = models.CharField(null=False, blank=False, max_length=256) cost = models.IntegerField(null=False, blank=False) <INSERT_END> <|endoftext|> from django.db import models from django.utils import timezone class Task(models.Model): EQUALS_CHECK = 'EQ' REGEX_CHECK = 'RE' CHECK_CHOICES = ( (EQUALS_CHECK, 'Equals'), (REGEX_CHECK, 'Regex'), ) title_ru = models.CharField(null=False, blank=False, max_length=256) title_en = models.CharField(null=False, blank=False, max_length=256) category = models.CharField(null=False, blank=False, max_length=256) cost = models.IntegerField(null=False, blank=False) desc_ru = models.TextField(null=False, blank=False) desc_en = models.TextField(null=False, blank=False) writeup_ru = models.TextField(null=False, blank=False) writeup_en = models.TextField(null=False, blank=False) flag = models.CharField(max_length=1024) is_case_insensitive_check = models.BooleanField(default=False) is_trimmed_check = models.BooleanField(default=False) check = models.CharField(null=False, blank=False, max_length=2, choices=CHECK_CHOICES) created_at = models.DateTimeField(null=False, blank=True) def save(self, *args, **kwargs): if self.pk is None: self.created_at = timezone.now() return super(Task, self).save(*args, **kwargs)
Add new fields to the task model from django.db import models from django.utils import timezone class Task(models.Model): EQUALS_CHECK = 'EQ' REGEX_CHECK = 'RE' CHECK_CHOICES = ( (EQUALS_CHECK, 'Equals'), (REGEX_CHECK, 'Regex'), ) title_ru = models.CharField(null=False, blank=False, max_length=256) title_en = models.CharField(null=False, blank=False, max_length=256) desc_ru = models.TextField(null=False, blank=False) desc_en = models.TextField(null=False, blank=False) writeup_ru = models.TextField(null=False, blank=False) writeup_en = models.TextField(null=False, blank=False) flag = models.CharField(max_length=1024) is_case_insensitive_check = models.BooleanField(default=False) is_trimmed_check = models.BooleanField(default=False) check = models.CharField(null=False, blank=False, max_length=2, choices=CHECK_CHOICES) created_at = models.DateTimeField(null=False, blank=True) def save(self, *args, **kwargs): if self.pk is None: self.created_at = timezone.now() return super(Task, self).save(*args, **kwargs)
6c349621dd3331bf92f803d2d66c96868f8e94c6
src/geelweb/django/editos/runtests.py
src/geelweb/django/editos/runtests.py
import os import sys os.environ['DJANGO_SETTINGS_MODULE'] = 'test_settings' test_dir = os.path.dirname(__file__) sys.path.insert(0, test_dir) from django.test.utils import get_runner from django.conf import settings def runtests(): TestRunner = get_runner(settings) test_runner = TestRunner(verbosity=1, interactive=True) failures = test_runner.run_tests(['geelweb.django.editos']) sys.exit(bool(failures))
import os import sys os.environ['DJANGO_SETTINGS_MODULE'] = 'test_settings' test_dir = os.path.dirname(__file__) sys.path.insert(0, test_dir) import django from django.test.utils import get_runner from django.conf import settings def runtests(): if django.VERSION[0] == 1 and django.VERSION[1] < 7: from django.test.utils import setup_test_environment setup_test_environment() if django.VERSION[0] == 1 and django.VERSION[1] >= 7: django.setup() TestRunner = get_runner(settings) test_runner = TestRunner() failures = test_runner.run_tests(['geelweb.django.editos']) sys.exit(bool(failures))
Upgrade to test using django 1.7 and 1.8
Upgrade to test using django 1.7 and 1.8
Python
mit
geelweb/django-editos,geelweb/django-editos
<REPLACE_OLD> test_dir) from <REPLACE_NEW> test_dir) import django from <REPLACE_END> <INSERT> if django.VERSION[0] == 1 and django.VERSION[1] < 7: from django.test.utils import setup_test_environment setup_test_environment() if django.VERSION[0] == 1 and django.VERSION[1] >= 7: django.setup() <INSERT_END> <REPLACE_OLD> TestRunner(verbosity=1, interactive=True) <REPLACE_NEW> TestRunner() <REPLACE_END> <|endoftext|> import os import sys os.environ['DJANGO_SETTINGS_MODULE'] = 'test_settings' test_dir = os.path.dirname(__file__) sys.path.insert(0, test_dir) import django from django.test.utils import get_runner from django.conf import settings def runtests(): if django.VERSION[0] == 1 and django.VERSION[1] < 7: from django.test.utils import setup_test_environment setup_test_environment() if django.VERSION[0] == 1 and django.VERSION[1] >= 7: django.setup() TestRunner = get_runner(settings) test_runner = TestRunner() failures = test_runner.run_tests(['geelweb.django.editos']) sys.exit(bool(failures))
Upgrade to test using django 1.7 and 1.8 import os import sys os.environ['DJANGO_SETTINGS_MODULE'] = 'test_settings' test_dir = os.path.dirname(__file__) sys.path.insert(0, test_dir) from django.test.utils import get_runner from django.conf import settings def runtests(): TestRunner = get_runner(settings) test_runner = TestRunner(verbosity=1, interactive=True) failures = test_runner.run_tests(['geelweb.django.editos']) sys.exit(bool(failures))
e6807ad6d71e3b115828870bb068777ad865f329
tests/test_client.py
tests/test_client.py
import pylibmc from pylibmc.test import make_test_client from tests import PylibmcTestCase from nose.tools import eq_, ok_ class ClientTests(PylibmcTestCase): def test_zerokey(self): bc = make_test_client(binary=True) k = "\x00\x01" ok_(bc.set(k, "test")) rk = bc.get_multi([k]).keys()[0] eq_(k, rk) def test_cas(self): k = "testkey" mc = make_test_client(binary=False, behaviors={"cas": True}) ok_(mc.set(k, 0)) while True: rv, cas = mc.gets(k) ok_(mc.cas(k, rv + 1, cas)) if rv == 10: break
Add tests for CAS and other things
Add tests for CAS and other things Refs #63
Python
bsd-3-clause
lericson/pylibmc,lericson/pylibmc,lericson/pylibmc
<INSERT> import pylibmc from pylibmc.test import make_test_client from tests import PylibmcTestCase from nose.tools import eq_, ok_ class ClientTests(PylibmcTestCase): <INSERT_END> <INSERT> def test_zerokey(self): bc = make_test_client(binary=True) k = "\x00\x01" ok_(bc.set(k, "test")) rk = bc.get_multi([k]).keys()[0] eq_(k, rk) def test_cas(self): k = "testkey" mc = make_test_client(binary=False, behaviors={"cas": True}) ok_(mc.set(k, 0)) while True: rv, cas = mc.gets(k) ok_(mc.cas(k, rv + 1, cas)) if rv == 10: break <INSERT_END> <|endoftext|> import pylibmc from pylibmc.test import make_test_client from tests import PylibmcTestCase from nose.tools import eq_, ok_ class ClientTests(PylibmcTestCase): def test_zerokey(self): bc = make_test_client(binary=True) k = "\x00\x01" ok_(bc.set(k, "test")) rk = bc.get_multi([k]).keys()[0] eq_(k, rk) def test_cas(self): k = "testkey" mc = make_test_client(binary=False, behaviors={"cas": True}) ok_(mc.set(k, 0)) while True: rv, cas = mc.gets(k) ok_(mc.cas(k, rv + 1, cas)) if rv == 10: break
Add tests for CAS and other things Refs #63
1bd57b89cb0deed5081540e5b29f7531215fa121
polyaxon_client/transport/socket_transport.py
polyaxon_client/transport/socket_transport.py
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import json import websocket from polyaxon_client.logger import logger class SocketTransportMixin(object): """Socket operations transport.""" def socket(self, url, message_handler, headers=None): webs = websocket.WebSocketApp( url, on_message=lambda ws, message: self._on_message(message_handler, message), on_error=self._on_error, on_close=self._on_close, header=self._get_headers(headers) ) return webs def stream(self, url, message_handler, headers=None): webs = self.socket(url=url, message_handler=message_handler, headers=headers) webs.run_forever(ping_interval=30, ping_timeout=10) def _on_message(self, message_handler, message): if message_handler and message: message_handler(json.loads(message.decode('utf-8'))) @staticmethod def _on_error(ws, error): if isinstance(error, (KeyboardInterrupt, SystemExit)): logger.info('Quitting... The session will be running in the background.') else: logger.debug('Termination cause: %s', error) logger.debug('Session disconnected.') @staticmethod def _on_close(ws): logger.info('Session ended')
# -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import json import six import websocket from polyaxon_client.logger import logger class SocketTransportMixin(object): """Socket operations transport.""" def socket(self, url, message_handler, headers=None): webs = websocket.WebSocketApp( url, on_message=lambda ws, message: self._on_message(message_handler, message), on_error=self._on_error, on_close=self._on_close, header=self._get_headers(headers) ) return webs def stream(self, url, message_handler, headers=None): webs = self.socket(url=url, message_handler=message_handler, headers=headers) webs.run_forever(ping_interval=30, ping_timeout=10) def _on_message(self, message_handler, message): if message_handler and message: if not isinstance(message, six.string_types): message = message.decode('utf-8') message_handler(json.loads(message)) @staticmethod def _on_error(ws, error): if isinstance(error, (KeyboardInterrupt, SystemExit)): logger.info('Quitting... The session will be running in the background.') else: logger.debug('Termination cause: %s', error) logger.debug('Session disconnected.') @staticmethod def _on_close(ws): logger.info('Session ended')
Check if string before decoding
Check if string before decoding
Python
apache-2.0
polyaxon/polyaxon,polyaxon/polyaxon,polyaxon/polyaxon
<INSERT> six import <INSERT_END> <REPLACE_OLD> message_handler(json.loads(message.decode('utf-8'))) <REPLACE_NEW> if not isinstance(message, six.string_types): message = message.decode('utf-8') message_handler(json.loads(message)) <REPLACE_END> <|endoftext|> # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import json import six import websocket from polyaxon_client.logger import logger class SocketTransportMixin(object): """Socket operations transport.""" def socket(self, url, message_handler, headers=None): webs = websocket.WebSocketApp( url, on_message=lambda ws, message: self._on_message(message_handler, message), on_error=self._on_error, on_close=self._on_close, header=self._get_headers(headers) ) return webs def stream(self, url, message_handler, headers=None): webs = self.socket(url=url, message_handler=message_handler, headers=headers) webs.run_forever(ping_interval=30, ping_timeout=10) def _on_message(self, message_handler, message): if message_handler and message: if not isinstance(message, six.string_types): message = message.decode('utf-8') message_handler(json.loads(message)) @staticmethod def _on_error(ws, error): if isinstance(error, (KeyboardInterrupt, SystemExit)): logger.info('Quitting... The session will be running in the background.') else: logger.debug('Termination cause: %s', error) logger.debug('Session disconnected.') @staticmethod def _on_close(ws): logger.info('Session ended')
Check if string before decoding # -*- coding: utf-8 -*- from __future__ import absolute_import, division, print_function import json import websocket from polyaxon_client.logger import logger class SocketTransportMixin(object): """Socket operations transport.""" def socket(self, url, message_handler, headers=None): webs = websocket.WebSocketApp( url, on_message=lambda ws, message: self._on_message(message_handler, message), on_error=self._on_error, on_close=self._on_close, header=self._get_headers(headers) ) return webs def stream(self, url, message_handler, headers=None): webs = self.socket(url=url, message_handler=message_handler, headers=headers) webs.run_forever(ping_interval=30, ping_timeout=10) def _on_message(self, message_handler, message): if message_handler and message: message_handler(json.loads(message.decode('utf-8'))) @staticmethod def _on_error(ws, error): if isinstance(error, (KeyboardInterrupt, SystemExit)): logger.info('Quitting... The session will be running in the background.') else: logger.debug('Termination cause: %s', error) logger.debug('Session disconnected.') @staticmethod def _on_close(ws): logger.info('Session ended')
5a680d25a5e5a697440f17639d1a0617b903aa06
opps/__init__.py
opps/__init__.py
#!/usr/bin/env python # -*- coding: utf-8 -*- VERSION = (0, 0, 1) __version__ = ".".join(map(str, VERSION)) __status__ = "Development" __description__ = u"Opps CMS websites magazines and high-traffic" __author__ = u"Thiago Avelino" __credits__ = [] __email__ = u"opps-developers@googlegroups.com" __license__ = u"BSD" __copyright__ = u"Copyright 2013, YACOWS"
#!/usr/bin/env python # -*- coding: utf-8 -*- from django.conf import settings VERSION = (0, 0, 1) __version__ = ".".join(map(str, VERSION)) __status__ = "Development" __description__ = u"Opps CMS websites magazines and high-traffic" __author__ = u"Thiago Avelino" __credits__ = [] __email__ = u"opps-developers@googlegroups.com" __license__ = u"BSD" __copyright__ = u"Copyright 2013, YACOWS" settings.INSTALLED_APPS += ('opps.article', 'opps.image', 'opps.channel', 'opps.source', 'redactor', 'tagging',) settings.REDACTOR_OPTIONS = {'lang': 'en'} settings.REDACTOR_UPLOAD = 'uploads/'
Add installed app on opps init
Add installed app on opps init
Python
mit
jeanmask/opps,jeanmask/opps,opps/opps,opps/opps,jeanmask/opps,williamroot/opps,YACOWS/opps,opps/opps,YACOWS/opps,williamroot/opps,YACOWS/opps,williamroot/opps,YACOWS/opps,williamroot/opps,opps/opps,jeanmask/opps
<REPLACE_OLD> -*- VERSION <REPLACE_NEW> -*- from django.conf import settings VERSION <REPLACE_END> <REPLACE_OLD> YACOWS" <REPLACE_NEW> YACOWS" settings.INSTALLED_APPS += ('opps.article', 'opps.image', 'opps.channel', 'opps.source', 'redactor', 'tagging',) settings.REDACTOR_OPTIONS = {'lang': 'en'} settings.REDACTOR_UPLOAD = 'uploads/' <REPLACE_END> <|endoftext|> #!/usr/bin/env python # -*- coding: utf-8 -*- from django.conf import settings VERSION = (0, 0, 1) __version__ = ".".join(map(str, VERSION)) __status__ = "Development" __description__ = u"Opps CMS websites magazines and high-traffic" __author__ = u"Thiago Avelino" __credits__ = [] __email__ = u"opps-developers@googlegroups.com" __license__ = u"BSD" __copyright__ = u"Copyright 2013, YACOWS" settings.INSTALLED_APPS += ('opps.article', 'opps.image', 'opps.channel', 'opps.source', 'redactor', 'tagging',) settings.REDACTOR_OPTIONS = {'lang': 'en'} settings.REDACTOR_UPLOAD = 'uploads/'
Add installed app on opps init #!/usr/bin/env python # -*- coding: utf-8 -*- VERSION = (0, 0, 1) __version__ = ".".join(map(str, VERSION)) __status__ = "Development" __description__ = u"Opps CMS websites magazines and high-traffic" __author__ = u"Thiago Avelino" __credits__ = [] __email__ = u"opps-developers@googlegroups.com" __license__ = u"BSD" __copyright__ = u"Copyright 2013, YACOWS"
d9a8d30ba12f4fb61fdffe353d225c2ffcd074fa
fabfile.py
fabfile.py
from fabric.api import cd, run, sudo, env, execute from datetime import datetime env.hosts = ['andrewlorente.com'] apps = { 'bloge': ['bloge@andrewlorente.com'], 'andrewlorente': ['andrewlorente@andrewlorente.com'], } def deploy(app): if app not in apps.keys(): raise Exception("Unknown deploy target '{0}'".format(app)) release_id = datetime.now().strftime("%Y%m%d%H%M%S") execute(build, app, release_id, hosts=apps[app]) execute(release, app, hosts=['alorente@andrewlorente.com']) def build(app, release_id): release_dir = "/u/apps/{0}/releases/{1}".format(app, release_id) repo = "https://github.com/AndrewLorente/{0}.git".format(app) run("git clone -q {0} {1}".format(repo, release_dir)) with cd(release_dir): run("cabal update") run("cabal install --constraint 'template-haskell installed' " "--dependencies-only --force-reinstall") run("cabal configure") run("cabal build") run("ln -nfs {0} /u/apps/{1}/current".format(release_dir, app)) def release(app): sudo("initctl restart " + app)
from fabric.api import cd, run, sudo, env, execute, task from datetime import datetime env.hosts = ['andrewlorente.com'] apps = { 'bloge': ['bloge@andrewlorente.com'], 'andrewlorente': ['andrewlorente@andrewlorente.com'], } @task def deploy(app): if app not in apps.keys(): raise Exception("Unknown deploy target '{0}'".format(app)) release_id = datetime.now().strftime("%Y%m%d%H%M%S") execute(build, app, release_id, hosts=apps[app]) execute(release, app, hosts=['alorente@andrewlorente.com']) def build(app, release_id): release_dir = "/u/apps/{0}/releases/{1}".format(app, release_id) repo = "https://github.com/AndrewLorente/{0}.git".format(app) run("git clone -q {0} {1}".format(repo, release_dir)) with cd(release_dir): run("cabal update") run("cabal install --constraint 'template-haskell installed' " "--dependencies-only --force-reinstall") run("cabal configure") run("cabal build") run("ln -nfs {0} /u/apps/{1}/current".format(release_dir, app)) def release(app): sudo("initctl restart " + app)
Hide support functions from the public interface
Hide support functions from the public interface
Python
mit
ErinCall/andrewlorente
<REPLACE_OLD> execute from <REPLACE_NEW> execute, task from <REPLACE_END> <REPLACE_OLD> ['andrewlorente@andrewlorente.com'], } def <REPLACE_NEW> ['andrewlorente@andrewlorente.com'], } @task def <REPLACE_END> <|endoftext|> from fabric.api import cd, run, sudo, env, execute, task from datetime import datetime env.hosts = ['andrewlorente.com'] apps = { 'bloge': ['bloge@andrewlorente.com'], 'andrewlorente': ['andrewlorente@andrewlorente.com'], } @task def deploy(app): if app not in apps.keys(): raise Exception("Unknown deploy target '{0}'".format(app)) release_id = datetime.now().strftime("%Y%m%d%H%M%S") execute(build, app, release_id, hosts=apps[app]) execute(release, app, hosts=['alorente@andrewlorente.com']) def build(app, release_id): release_dir = "/u/apps/{0}/releases/{1}".format(app, release_id) repo = "https://github.com/AndrewLorente/{0}.git".format(app) run("git clone -q {0} {1}".format(repo, release_dir)) with cd(release_dir): run("cabal update") run("cabal install --constraint 'template-haskell installed' " "--dependencies-only --force-reinstall") run("cabal configure") run("cabal build") run("ln -nfs {0} /u/apps/{1}/current".format(release_dir, app)) def release(app): sudo("initctl restart " + app)
Hide support functions from the public interface from fabric.api import cd, run, sudo, env, execute from datetime import datetime env.hosts = ['andrewlorente.com'] apps = { 'bloge': ['bloge@andrewlorente.com'], 'andrewlorente': ['andrewlorente@andrewlorente.com'], } def deploy(app): if app not in apps.keys(): raise Exception("Unknown deploy target '{0}'".format(app)) release_id = datetime.now().strftime("%Y%m%d%H%M%S") execute(build, app, release_id, hosts=apps[app]) execute(release, app, hosts=['alorente@andrewlorente.com']) def build(app, release_id): release_dir = "/u/apps/{0}/releases/{1}".format(app, release_id) repo = "https://github.com/AndrewLorente/{0}.git".format(app) run("git clone -q {0} {1}".format(repo, release_dir)) with cd(release_dir): run("cabal update") run("cabal install --constraint 'template-haskell installed' " "--dependencies-only --force-reinstall") run("cabal configure") run("cabal build") run("ln -nfs {0} /u/apps/{1}/current".format(release_dir, app)) def release(app): sudo("initctl restart " + app)
33535ae325e15c7341c6330cf1caa756cfa09831
tests/lib/test_coins.py
tests/lib/test_coins.py
import electrumx.lib.coins as coins def test_bitcoin_cash(): raw_header = bytes.fromhex( "00000020df975c121dcbc18bbb7ddfd0419fc368b45db86b48c87e0" "1000000000000000036ae3dd40a10a40d3050de13ca546a2f81589d" "e2d2f317925a43a115437e2381f5bf535b94da0118ac8df8c5" ) height = 540000 electrum_header = { 'block_height': 540000, 'version': 536870912, 'prev_block_hash': '0000000000000000017ec8486bb85db468c39f41d0df7dbb8bc1cb1d125c97df', 'merkle_root': '81237e4315a1435a9217f3d2e29d58812f6a54ca13de50300da4100ad43dae36', 'timestamp': 1532215285, 'bits': 402774676, 'nonce': 3321400748 } assert coins.BitcoinCash.electrum_header( raw_header, height) == electrum_header
Add test for BCH electrum header
Add test for BCH electrum header
Python
mit
thelazier/electrumx,thelazier/electrumx
<INSERT> import electrumx.lib.coins as coins def test_bitcoin_cash(): <INSERT_END> <INSERT> raw_header = bytes.fromhex( "00000020df975c121dcbc18bbb7ddfd0419fc368b45db86b48c87e0" "1000000000000000036ae3dd40a10a40d3050de13ca546a2f81589d" "e2d2f317925a43a115437e2381f5bf535b94da0118ac8df8c5" ) height = 540000 electrum_header = { 'block_height': 540000, 'version': 536870912, 'prev_block_hash': '0000000000000000017ec8486bb85db468c39f41d0df7dbb8bc1cb1d125c97df', 'merkle_root': '81237e4315a1435a9217f3d2e29d58812f6a54ca13de50300da4100ad43dae36', 'timestamp': 1532215285, 'bits': 402774676, 'nonce': 3321400748 } assert coins.BitcoinCash.electrum_header( raw_header, height) == electrum_header <INSERT_END> <|endoftext|> import electrumx.lib.coins as coins def test_bitcoin_cash(): raw_header = bytes.fromhex( "00000020df975c121dcbc18bbb7ddfd0419fc368b45db86b48c87e0" "1000000000000000036ae3dd40a10a40d3050de13ca546a2f81589d" "e2d2f317925a43a115437e2381f5bf535b94da0118ac8df8c5" ) height = 540000 electrum_header = { 'block_height': 540000, 'version': 536870912, 'prev_block_hash': '0000000000000000017ec8486bb85db468c39f41d0df7dbb8bc1cb1d125c97df', 'merkle_root': '81237e4315a1435a9217f3d2e29d58812f6a54ca13de50300da4100ad43dae36', 'timestamp': 1532215285, 'bits': 402774676, 'nonce': 3321400748 } assert coins.BitcoinCash.electrum_header( raw_header, height) == electrum_header
Add test for BCH electrum header
0b76510e58c4eaa71fb37c563b00fa6cc67d49fc
tests/print_view_hierarchy_test.py
tests/print_view_hierarchy_test.py
"""Tests for scripts/print_view_hierarchy.py.""" import re import unittest from test_utils import import_utils import_utils.prepare_lldb_import_or_exit() import lldb import_utils.prepare_for_scripts_imports() from scripts import print_view_hierarchy class PrintViewHierarchyTest(unittest.TestCase): def testPrintViewHierarchy(self): """Tests the expected output of the |pv| command.""" debugger = lldb.SBDebugger.Create() debugger.SetAsync(False) target = debugger.CreateTarget('') error = lldb.SBError() process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp', False, error) if not process: self.assertTrue(False, 'Could not attach to process "TestApp"') debugger.SetSelectedTarget(target) result = lldb.SBCommandReturnObject() print_view_hierarchy.print_view_hierarchy(debugger, None, result, None) self.assertTrue(result.Succeeded()) expected_output_regex = r'<UIWindow: 0x\w{12}; frame = \(0 0; 414 736\); autoresize = W\+H; gestureRecognizers = <NSArray: 0x\w{12}>; layer = <UIWindowLayer: 0x\w{12}>>\n \|' self.assertTrue(re.search(expected_output_regex, result.GetOutput(), re.M)) debugger.DeleteTarget(target)
Add a test for |pv| command.
Add a test for |pv| command.
Python
mit
mrhappyasthma/HappyDebugging,mrhappyasthma/happydebugging
<INSERT> """Tests for scripts/print_view_hierarchy.py.""" import re import unittest from test_utils import import_utils import_utils.prepare_lldb_import_or_exit() import lldb import_utils.prepare_for_scripts_imports() from scripts import print_view_hierarchy class PrintViewHierarchyTest(unittest.TestCase): <INSERT_END> <INSERT> def testPrintViewHierarchy(self): """Tests the expected output of the |pv| command.""" debugger = lldb.SBDebugger.Create() debugger.SetAsync(False) target = debugger.CreateTarget('') error = lldb.SBError() process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp', False, error) if not process: self.assertTrue(False, 'Could not attach to process "TestApp"') debugger.SetSelectedTarget(target) result = lldb.SBCommandReturnObject() print_view_hierarchy.print_view_hierarchy(debugger, None, result, None) self.assertTrue(result.Succeeded()) expected_output_regex = r'<UIWindow: 0x\w{12}; frame = \(0 0; 414 736\); autoresize = W\+H; gestureRecognizers = <NSArray: 0x\w{12}>; layer = <UIWindowLayer: 0x\w{12}>>\n \|' self.assertTrue(re.search(expected_output_regex, result.GetOutput(), re.M)) debugger.DeleteTarget(target) <INSERT_END> <|endoftext|> """Tests for scripts/print_view_hierarchy.py.""" import re import unittest from test_utils import import_utils import_utils.prepare_lldb_import_or_exit() import lldb import_utils.prepare_for_scripts_imports() from scripts import print_view_hierarchy class PrintViewHierarchyTest(unittest.TestCase): def testPrintViewHierarchy(self): """Tests the expected output of the |pv| command.""" debugger = lldb.SBDebugger.Create() debugger.SetAsync(False) target = debugger.CreateTarget('') error = lldb.SBError() process = target.AttachToProcessWithName(debugger.GetListener(), 'TestApp', False, error) if not process: self.assertTrue(False, 'Could not attach to process "TestApp"') debugger.SetSelectedTarget(target) result = lldb.SBCommandReturnObject() print_view_hierarchy.print_view_hierarchy(debugger, None, result, None) self.assertTrue(result.Succeeded()) expected_output_regex = r'<UIWindow: 0x\w{12}; frame = \(0 0; 414 736\); autoresize = W\+H; gestureRecognizers = <NSArray: 0x\w{12}>; layer = <UIWindowLayer: 0x\w{12}>>\n \|' self.assertTrue(re.search(expected_output_regex, result.GetOutput(), re.M)) debugger.DeleteTarget(target)
Add a test for |pv| command.
594923a44d80a2879eb1ed5b9b0a6be11e13c88f
tests/Epsilon_tests/ImportTest.py
tests/Epsilon_tests/ImportTest.py
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ from unittest import TestCase, main from grammpy import EPS from grammpy import EPSILON class ImportTest(TestCase): def test_idSame(self): self.assertEqual(id(EPS),id(EPSILON)) def test_equal(self): self.assertEqual(EPS, EPSILON) if __name__ == '__main__': main()
#!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ from unittest import TestCase, main from grammpy import EPS from grammpy import EPSILON class ImportTest(TestCase): def test_idSame(self): self.assertEqual(id(EPS), id(EPSILON)) def test_equal(self): self.assertEqual(EPS, EPSILON) def test_equalToSelf(self): self.assertEqual(EPS, EPS) def test_notEqualToNumber(self): self.assertNotEqual(EPS, 5) def test_notEqualToString(self): self.assertNotEqual(EPS, "asdf") def test_notEqualToObject(self): self.assertNotEqual(EPS, object()) if __name__ == '__main__': main()
Revert "Revert "Add tests to compare epsilon with another objects""
Revert "Revert "Add tests to compare epsilon with another objects"" This reverts commit d13b3d89124d03f563c2ee2143ae16eec7d0b191.
Python
mit
PatrikValkovic/grammpy
<REPLACE_OLD> ImportTest(TestCase): <REPLACE_NEW> ImportTest(TestCase): <REPLACE_END> <REPLACE_OLD> self.assertEqual(id(EPS),id(EPSILON)) <REPLACE_NEW> self.assertEqual(id(EPS), id(EPSILON)) <REPLACE_END> <REPLACE_OLD> EPSILON) if <REPLACE_NEW> EPSILON) def test_equalToSelf(self): self.assertEqual(EPS, EPS) def test_notEqualToNumber(self): self.assertNotEqual(EPS, 5) def test_notEqualToString(self): self.assertNotEqual(EPS, "asdf") def test_notEqualToObject(self): self.assertNotEqual(EPS, object()) if <REPLACE_END> <|endoftext|> #!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ from unittest import TestCase, main from grammpy import EPS from grammpy import EPSILON class ImportTest(TestCase): def test_idSame(self): self.assertEqual(id(EPS), id(EPSILON)) def test_equal(self): self.assertEqual(EPS, EPSILON) def test_equalToSelf(self): self.assertEqual(EPS, EPS) def test_notEqualToNumber(self): self.assertNotEqual(EPS, 5) def test_notEqualToString(self): self.assertNotEqual(EPS, "asdf") def test_notEqualToObject(self): self.assertNotEqual(EPS, object()) if __name__ == '__main__': main()
Revert "Revert "Add tests to compare epsilon with another objects"" This reverts commit d13b3d89124d03f563c2ee2143ae16eec7d0b191. #!/usr/bin/env python """ :Author Patrik Valkovic :Created 23.06.2017 16:39 :Licence GNUv3 Part of grammpy """ from unittest import TestCase, main from grammpy import EPS from grammpy import EPSILON class ImportTest(TestCase): def test_idSame(self): self.assertEqual(id(EPS),id(EPSILON)) def test_equal(self): self.assertEqual(EPS, EPSILON) if __name__ == '__main__': main()
8905993c0daa140b10cb04dca1e7bed7b813ea7a
imagedownloader/libs/console.py
imagedownloader/libs/console.py
import sys import pyttsx import aspects from datetime import datetime engine = pyttsx.init() def show(*objs): begin = '' if '\r' in objs[0] or '\b' in objs[0] else '\n' sys.stdout.write(begin) for part in objs: sys.stdout.write(str(part)) sys.stdout.flush() def say(speech): #NOT engine.startLoop() show(speech) engine.say(speech) engine.runAndWait() progress = ['/','-','\\','|'] def show_progress(i): show('\b \b', progress[i % len(progress)]) def show_times(*args): begin = datetime.now() result = yield aspects.proceed(*args) end = datetime.now() say("\t[time consumed: %.2f seconds]\n" % (end - begin).total_seconds()) yield aspects.return_stop(result)
import sys import pyttsx import aspects from datetime import datetime engine = pyttsx.init() def show(*objs): begin = '' if '\r' in objs[0] or '\b' in objs[0] else '\n' sys.stdout.write(begin) for part in objs: sys.stdout.write(str(part)) sys.stdout.flush() def say(speech): #NOT engine.startLoop() show(speech) engine.say(speech) engine.runAndWait() progress = ['/','-','\\','|'] def show_progress(i): show('\b \b', progress[i % len(progress)]) def show_times(*args): begin = datetime.utcnow().replace(tzinfo=pytz.UTC) result = yield aspects.proceed(*args) end = datetime.utcnow().replace(tzinfo=pytz.UTC) say("\t[time consumed: %.2f seconds]\n" % (end - begin).total_seconds()) yield aspects.return_stop(result)
Add UTC timezone to datetimes in the libs folder.
Add UTC timezone to datetimes in the libs folder.
Python
mit
ahMarrone/solar_radiation_model,scottlittle/solar_radiation_model,gersolar/solar_radiation_model
<REPLACE_OLD> datetime.now() result <REPLACE_NEW> datetime.utcnow().replace(tzinfo=pytz.UTC) result <REPLACE_END> <REPLACE_OLD> datetime.now() say("\t[time <REPLACE_NEW> datetime.utcnow().replace(tzinfo=pytz.UTC) say("\t[time <REPLACE_END> <|endoftext|> import sys import pyttsx import aspects from datetime import datetime engine = pyttsx.init() def show(*objs): begin = '' if '\r' in objs[0] or '\b' in objs[0] else '\n' sys.stdout.write(begin) for part in objs: sys.stdout.write(str(part)) sys.stdout.flush() def say(speech): #NOT engine.startLoop() show(speech) engine.say(speech) engine.runAndWait() progress = ['/','-','\\','|'] def show_progress(i): show('\b \b', progress[i % len(progress)]) def show_times(*args): begin = datetime.utcnow().replace(tzinfo=pytz.UTC) result = yield aspects.proceed(*args) end = datetime.utcnow().replace(tzinfo=pytz.UTC) say("\t[time consumed: %.2f seconds]\n" % (end - begin).total_seconds()) yield aspects.return_stop(result)
Add UTC timezone to datetimes in the libs folder. import sys import pyttsx import aspects from datetime import datetime engine = pyttsx.init() def show(*objs): begin = '' if '\r' in objs[0] or '\b' in objs[0] else '\n' sys.stdout.write(begin) for part in objs: sys.stdout.write(str(part)) sys.stdout.flush() def say(speech): #NOT engine.startLoop() show(speech) engine.say(speech) engine.runAndWait() progress = ['/','-','\\','|'] def show_progress(i): show('\b \b', progress[i % len(progress)]) def show_times(*args): begin = datetime.now() result = yield aspects.proceed(*args) end = datetime.now() say("\t[time consumed: %.2f seconds]\n" % (end - begin).total_seconds()) yield aspects.return_stop(result)
c4e1059b387269b6098d05d2227c085e7931b140
setup.py
setup.py
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from distutils.core import setup, Extension cpp_args = ['-std=c++11', '-stdlib=libc++', '-mmacosx-version-min=10.7'] ext_modules = [ Extension( 'make_asym', ['make_asym.cc'], include_dirs=['include'], language='c++', extra_compile_args=cpp_args, ), ] setup( name='make_asym', version='0.1', author='Nate Lust', author_email='nlust@astro.princeton.edu', description='A python extension module for calculating asymmetry values', ext_modules=ext_modules, )
# This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from distutils.core import setup, Extension cpp_args = ['-std=c++11', '-stdlib=libc++', '-mmacosx-version-min=10.7'] ext_modules = [ Extension( 'make_asym', ['make_asym.cc'], include_dirs=['include'], language='c++', extra_compile_args=cpp_args, ), ] setup( name='make_asym', version='0.1', author='Nate Lust', author_email='nlust@astro.princeton.edu', description='A module for calculating centers though least asymmetry', ext_modules=ext_modules, )
Update module description for clarity
Update module description for clarity
Python
mpl-2.0
natelust/least_asymmetry,natelust/least_asymmetry,natelust/least_asymmetry
<DELETE> python extension <DELETE_END> <REPLACE_OLD> asymmetry values', <REPLACE_NEW> centers though least asymmetry', <REPLACE_END> <|endoftext|> # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from distutils.core import setup, Extension cpp_args = ['-std=c++11', '-stdlib=libc++', '-mmacosx-version-min=10.7'] ext_modules = [ Extension( 'make_asym', ['make_asym.cc'], include_dirs=['include'], language='c++', extra_compile_args=cpp_args, ), ] setup( name='make_asym', version='0.1', author='Nate Lust', author_email='nlust@astro.princeton.edu', description='A module for calculating centers though least asymmetry', ext_modules=ext_modules, )
Update module description for clarity # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, You can obtain one at http://mozilla.org/MPL/2.0/. from distutils.core import setup, Extension cpp_args = ['-std=c++11', '-stdlib=libc++', '-mmacosx-version-min=10.7'] ext_modules = [ Extension( 'make_asym', ['make_asym.cc'], include_dirs=['include'], language='c++', extra_compile_args=cpp_args, ), ] setup( name='make_asym', version='0.1', author='Nate Lust', author_email='nlust@astro.princeton.edu', description='A python extension module for calculating asymmetry values', ext_modules=ext_modules, )
563220ef19395201aed7f6392519f84db4ec7a77
tests/test_midas.py
tests/test_midas.py
import datetime from midas import mix from midas.midas import estimate, forecast def test_estimate(gdp_data, farmpay_data): y, yl, x, yf, ylf, xf = mix.mix_freq(gdp_data.gdp, farmpay_data.farmpay, 3, 1, 1, start_date=datetime.datetime(1985, 1, 1), end_date=datetime.datetime(2009, 1, 1)) res = estimate(y, yl, x) fc = forecast(xf, ylf, res) print(fc) assert False
import datetime import numpy as np from midas import mix from midas.midas import estimate, forecast def test_estimate(gdp_data, farmpay_data): y, yl, x, yf, ylf, xf = mix.mix_freq(gdp_data.gdp, farmpay_data.farmpay, 3, 1, 1, start_date=datetime.datetime(1985, 1, 1), end_date=datetime.datetime(2009, 1, 1)) res = estimate(y, yl, x) fc = forecast(xf, ylf, res) print(fc) assert np.isclose(fc.loc['2011-04-01'][0], 1.336844, rtol=1e-6)
Add assertion for forecast test
Add assertion for forecast test
Python
mit
mikemull/midaspy
<REPLACE_OLD> datetime from <REPLACE_NEW> datetime import numpy as np from <REPLACE_END> <REPLACE_OLD> False <REPLACE_NEW> np.isclose(fc.loc['2011-04-01'][0], 1.336844, rtol=1e-6) <REPLACE_END> <|endoftext|> import datetime import numpy as np from midas import mix from midas.midas import estimate, forecast def test_estimate(gdp_data, farmpay_data): y, yl, x, yf, ylf, xf = mix.mix_freq(gdp_data.gdp, farmpay_data.farmpay, 3, 1, 1, start_date=datetime.datetime(1985, 1, 1), end_date=datetime.datetime(2009, 1, 1)) res = estimate(y, yl, x) fc = forecast(xf, ylf, res) print(fc) assert np.isclose(fc.loc['2011-04-01'][0], 1.336844, rtol=1e-6)
Add assertion for forecast test import datetime from midas import mix from midas.midas import estimate, forecast def test_estimate(gdp_data, farmpay_data): y, yl, x, yf, ylf, xf = mix.mix_freq(gdp_data.gdp, farmpay_data.farmpay, 3, 1, 1, start_date=datetime.datetime(1985, 1, 1), end_date=datetime.datetime(2009, 1, 1)) res = estimate(y, yl, x) fc = forecast(xf, ylf, res) print(fc) assert False
0d58d7c7a3eee8748efbf7405aba7a5f3e0f7eb3
bluebottle/funding_telesom/admin.py
bluebottle/funding_telesom/admin.py
from django.contrib import admin from bluebottle.funding.admin import PaymentChildAdmin, PaymentProviderChildAdmin, BankAccountChildAdmin from bluebottle.funding.models import PaymentProvider, Payment from bluebottle.funding_telesom.models import TelesomPayment, TelesomPaymentProvider, TelesomBankAccount @admin.register(TelesomPayment) class TelesomPaymentAdmin(PaymentChildAdmin): base_model = Payment fields = PaymentChildAdmin.fields + [ 'account_name', 'account_number', 'response', 'unique_id', 'reference_id', 'transaction_id', 'transaction_amount', 'issuer_transaction_id', 'amount', 'currency' ] list_display = ['created', 'account_name', 'account_number', 'amount', 'status'] @admin.register(TelesomPaymentProvider) class TelesomPaymentProviderAdmin(PaymentProviderChildAdmin): base_model = PaymentProvider @admin.register(TelesomBankAccount) class TelesomBankAccountAdmin(BankAccountChildAdmin): model = TelesomBankAccount fields = ('account_name', 'mobile_number') + BankAccountChildAdmin.fields list_filter = ['reviewed'] search_fields = ['account_name', 'mobile_number'] list_display = ['created', 'account_name', 'mobile_number', 'reviewed']
from django.contrib import admin from bluebottle.funding.admin import PaymentChildAdmin, PaymentProviderChildAdmin, BankAccountChildAdmin from bluebottle.funding.models import PaymentProvider, Payment from bluebottle.funding_telesom.models import TelesomPayment, TelesomPaymentProvider, TelesomBankAccount @admin.register(TelesomPayment) class TelesomPaymentAdmin(PaymentChildAdmin): base_model = Payment fields = PaymentChildAdmin.fields + [ 'account_name', 'account_number', 'response', 'unique_id', 'reference_id', 'transaction_id', 'transaction_amount', 'issuer_transaction_id', 'amount', 'currency' ] search_fields = ['account_name', 'account_number'] list_display = ['created', 'account_name', 'account_number', 'amount', 'status'] @admin.register(TelesomPaymentProvider) class TelesomPaymentProviderAdmin(PaymentProviderChildAdmin): base_model = PaymentProvider @admin.register(TelesomBankAccount) class TelesomBankAccountAdmin(BankAccountChildAdmin): model = TelesomBankAccount fields = ('account_name', 'mobile_number') + BankAccountChildAdmin.fields list_filter = ['reviewed'] search_fields = ['account_name', 'mobile_number'] list_display = ['created', 'account_name', 'mobile_number', 'reviewed']
Add some search fields to Zaad
Add some search fields to Zaad
Python
bsd-3-clause
onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle,onepercentclub/bluebottle
<INSERT> search_fields = ['account_name', 'account_number'] <INSERT_END> <|endoftext|> from django.contrib import admin from bluebottle.funding.admin import PaymentChildAdmin, PaymentProviderChildAdmin, BankAccountChildAdmin from bluebottle.funding.models import PaymentProvider, Payment from bluebottle.funding_telesom.models import TelesomPayment, TelesomPaymentProvider, TelesomBankAccount @admin.register(TelesomPayment) class TelesomPaymentAdmin(PaymentChildAdmin): base_model = Payment fields = PaymentChildAdmin.fields + [ 'account_name', 'account_number', 'response', 'unique_id', 'reference_id', 'transaction_id', 'transaction_amount', 'issuer_transaction_id', 'amount', 'currency' ] search_fields = ['account_name', 'account_number'] list_display = ['created', 'account_name', 'account_number', 'amount', 'status'] @admin.register(TelesomPaymentProvider) class TelesomPaymentProviderAdmin(PaymentProviderChildAdmin): base_model = PaymentProvider @admin.register(TelesomBankAccount) class TelesomBankAccountAdmin(BankAccountChildAdmin): model = TelesomBankAccount fields = ('account_name', 'mobile_number') + BankAccountChildAdmin.fields list_filter = ['reviewed'] search_fields = ['account_name', 'mobile_number'] list_display = ['created', 'account_name', 'mobile_number', 'reviewed']
Add some search fields to Zaad from django.contrib import admin from bluebottle.funding.admin import PaymentChildAdmin, PaymentProviderChildAdmin, BankAccountChildAdmin from bluebottle.funding.models import PaymentProvider, Payment from bluebottle.funding_telesom.models import TelesomPayment, TelesomPaymentProvider, TelesomBankAccount @admin.register(TelesomPayment) class TelesomPaymentAdmin(PaymentChildAdmin): base_model = Payment fields = PaymentChildAdmin.fields + [ 'account_name', 'account_number', 'response', 'unique_id', 'reference_id', 'transaction_id', 'transaction_amount', 'issuer_transaction_id', 'amount', 'currency' ] list_display = ['created', 'account_name', 'account_number', 'amount', 'status'] @admin.register(TelesomPaymentProvider) class TelesomPaymentProviderAdmin(PaymentProviderChildAdmin): base_model = PaymentProvider @admin.register(TelesomBankAccount) class TelesomBankAccountAdmin(BankAccountChildAdmin): model = TelesomBankAccount fields = ('account_name', 'mobile_number') + BankAccountChildAdmin.fields list_filter = ['reviewed'] search_fields = ['account_name', 'mobile_number'] list_display = ['created', 'account_name', 'mobile_number', 'reviewed']
8db643b23716e3678ec02bcea6ade0f10a81bf76
setup.py
setup.py
#!/usr/bin/env python """Setup script for PythonTemplateDemo.""" import setuptools from demo import __project__, __version__ import os if os.path.exists('README.rst'): README = open('README.rst').read() else: README = "" # a placeholder until README is generated on release CHANGES = open('CHANGES.md').read() setuptools.setup( name=__project__, version=__version__, description="PythonTemplateDemo is a Python package template.", url='https://github.com/jacebrowning/template-python-demo', author='Jace Browning', author_email='jacebrowning@gmail.com', packages=setuptools.find_packages(), entry_points={'console_scripts': []}, long_description=(README + '\n' + CHANGES), license='MIT', classifiers=[ # TODO: update this list to match your application: https://pypi.python.org/pypi?%3Aaction=list_classifiers 'Development Status :: 1 - Planning', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], install_requires=open('requirements.txt').readlines(), )
#!/usr/bin/env python """Setup script for PythonTemplateDemo.""" import setuptools from demo import __project__, __version__ import os if os.path.exists('README.rst'): README = open('README.rst').read() else: README = "" # a placeholder until README is generated on release CHANGES = open('CHANGES.md').read() setuptools.setup( name=__project__, version=__version__, description="A sample project templated from jacebrowning/template-python.", url='https://github.com/jacebrowning/template-python-demo', author='Jace Browning', author_email='jacebrowning@gmail.com', packages=setuptools.find_packages(), entry_points={'console_scripts': []}, long_description=(README + '\n' + CHANGES), license='MIT', classifiers=[ # TODO: update this list to match your application: https://pypi.python.org/pypi?%3Aaction=list_classifiers 'Development Status :: 1 - Planning', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], install_requires=open('requirements.txt').readlines(), )
Deploy Travis CI build 623 to GitHub
Deploy Travis CI build 623 to GitHub
Python
mit
jacebrowning/template-python-demo
<REPLACE_OLD> description="PythonTemplateDemo is a Python package template.", <REPLACE_NEW> description="A sample project templated from jacebrowning/template-python.", <REPLACE_END> <|endoftext|> #!/usr/bin/env python """Setup script for PythonTemplateDemo.""" import setuptools from demo import __project__, __version__ import os if os.path.exists('README.rst'): README = open('README.rst').read() else: README = "" # a placeholder until README is generated on release CHANGES = open('CHANGES.md').read() setuptools.setup( name=__project__, version=__version__, description="A sample project templated from jacebrowning/template-python.", url='https://github.com/jacebrowning/template-python-demo', author='Jace Browning', author_email='jacebrowning@gmail.com', packages=setuptools.find_packages(), entry_points={'console_scripts': []}, long_description=(README + '\n' + CHANGES), license='MIT', classifiers=[ # TODO: update this list to match your application: https://pypi.python.org/pypi?%3Aaction=list_classifiers 'Development Status :: 1 - Planning', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], install_requires=open('requirements.txt').readlines(), )
Deploy Travis CI build 623 to GitHub #!/usr/bin/env python """Setup script for PythonTemplateDemo.""" import setuptools from demo import __project__, __version__ import os if os.path.exists('README.rst'): README = open('README.rst').read() else: README = "" # a placeholder until README is generated on release CHANGES = open('CHANGES.md').read() setuptools.setup( name=__project__, version=__version__, description="PythonTemplateDemo is a Python package template.", url='https://github.com/jacebrowning/template-python-demo', author='Jace Browning', author_email='jacebrowning@gmail.com', packages=setuptools.find_packages(), entry_points={'console_scripts': []}, long_description=(README + '\n' + CHANGES), license='MIT', classifiers=[ # TODO: update this list to match your application: https://pypi.python.org/pypi?%3Aaction=list_classifiers 'Development Status :: 1 - Planning', 'Natural Language :: English', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Programming Language :: Python :: 3.3', 'Programming Language :: Python :: 3.4', 'Programming Language :: Python :: 3.5', ], install_requires=open('requirements.txt').readlines(), )
33f050ab022626846510a7cbcd4b299612f2ff85
tvmaze/tests.py
tvmaze/tests.py
import unittest from tvmazereader import main class TestMethods(unittest.TestCase): def test_readerMain(self): data = main() self.assertEqual(len(data),2) if __name__ == '__main__': unittest.main()
import unittest from tvmazereader import main class TestMethods(unittest.TestCase): def test_readerMain(self): data = main() self.assertEqual(len(data),2) if __name__ == '__main__': unittest.main() #python -m unittest discover -v
Add comment show test usage from console.
Add comment show test usage from console.
Python
mit
LairdStreak/MyPyPlayGround,LairdStreak/MyPyPlayGround,LairdStreak/MyPyPlayGround
<REPLACE_OLD> unittest.main() <REPLACE_NEW> unittest.main() #python -m unittest discover -v <REPLACE_END> <|endoftext|> import unittest from tvmazereader import main class TestMethods(unittest.TestCase): def test_readerMain(self): data = main() self.assertEqual(len(data),2) if __name__ == '__main__': unittest.main() #python -m unittest discover -v
Add comment show test usage from console. import unittest from tvmazereader import main class TestMethods(unittest.TestCase): def test_readerMain(self): data = main() self.assertEqual(len(data),2) if __name__ == '__main__': unittest.main()
12b8cd254bad5c2cb15de3f0c3e69ab78083fc48
server/app.py
server/app.py
"""This module contains basic functions to instantiate the BigchainDB API. The application is implemented in Flask and runs using Gunicorn. """ import os from flask import Flask from flask.ext.cors import CORS from server.lib.api.views import api_views def create_app(debug): """Return an instance of the Flask application. Args: debug (bool): a flag to activate the debug mode for the app (default: False). """ app = Flask(__name__) CORS(app, origins=("^(https?://)?(www\.)?(" + os.environ.get('DOCKER_MACHINE_IP', 'localhost') + "0|0.0.0.0|dimi-bat.local|localhost|127.0.0.1)(\.com)?:\d{1,5}$"), headers=( 'x-requested-with', 'content-type', 'accept', 'origin', 'authorization', 'x-csrftoken', 'withcredentials', 'cache-control', 'cookie', 'session-id', ), supports_credentials=True, ) app.debug = debug app.register_blueprint(api_views, url_prefix='/api') return app if __name__ == '__main__': app = create_app(debug=True) app.run(host=os.environ.get('FLASK_HOST', '127.0.0.1'), port=os.environ.get('FLASK_PORT', 8000)) app.run()
"""This module contains basic functions to instantiate the BigchainDB API. The application is implemented in Flask and runs using Gunicorn. """ import os from flask import Flask from flask.ext.cors import CORS from server.lib.api.views import api_views def create_app(debug): """Return an instance of the Flask application. Args: debug (bool): a flag to activate the debug mode for the app (default: False). """ app = Flask(__name__) CORS(app, origins=("^(https?://)?(www\.)?(" + os.environ.get('DOCKER_MACHINE_IP', 'localhost') + "|0|0.0.0.0|dimi-bat.local|localhost|127.0.0.1)(\.com)?:\d{1,5}$"), headers=( 'x-requested-with', 'content-type', 'accept', 'origin', 'authorization', 'x-csrftoken', 'withcredentials', 'cache-control', 'cookie', 'session-id', ), supports_credentials=True, ) app.debug = debug app.register_blueprint(api_views, url_prefix='/api') return app if __name__ == '__main__': app = create_app(debug=True) app.run(host=os.environ.get('FLASK_HOST', '127.0.0.1'), port=os.environ.get('FLASK_PORT', 8000)) app.run()
Fix CORS when running the api server with Docker
Fix CORS when running the api server with Docker
Python
apache-2.0
bigchaindb/bigchaindb-examples,bigchaindb/bigchaindb-examples,bigchaindb/bigchaindb-examples
<REPLACE_OLD> "0|0.0.0.0|dimi-bat.local|localhost|127.0.0.1)(\.com)?:\d{1,5}$"), <REPLACE_NEW> "|0|0.0.0.0|dimi-bat.local|localhost|127.0.0.1)(\.com)?:\d{1,5}$"), <REPLACE_END> <|endoftext|> """This module contains basic functions to instantiate the BigchainDB API. The application is implemented in Flask and runs using Gunicorn. """ import os from flask import Flask from flask.ext.cors import CORS from server.lib.api.views import api_views def create_app(debug): """Return an instance of the Flask application. Args: debug (bool): a flag to activate the debug mode for the app (default: False). """ app = Flask(__name__) CORS(app, origins=("^(https?://)?(www\.)?(" + os.environ.get('DOCKER_MACHINE_IP', 'localhost') + "|0|0.0.0.0|dimi-bat.local|localhost|127.0.0.1)(\.com)?:\d{1,5}$"), headers=( 'x-requested-with', 'content-type', 'accept', 'origin', 'authorization', 'x-csrftoken', 'withcredentials', 'cache-control', 'cookie', 'session-id', ), supports_credentials=True, ) app.debug = debug app.register_blueprint(api_views, url_prefix='/api') return app if __name__ == '__main__': app = create_app(debug=True) app.run(host=os.environ.get('FLASK_HOST', '127.0.0.1'), port=os.environ.get('FLASK_PORT', 8000)) app.run()
Fix CORS when running the api server with Docker """This module contains basic functions to instantiate the BigchainDB API. The application is implemented in Flask and runs using Gunicorn. """ import os from flask import Flask from flask.ext.cors import CORS from server.lib.api.views import api_views def create_app(debug): """Return an instance of the Flask application. Args: debug (bool): a flag to activate the debug mode for the app (default: False). """ app = Flask(__name__) CORS(app, origins=("^(https?://)?(www\.)?(" + os.environ.get('DOCKER_MACHINE_IP', 'localhost') + "0|0.0.0.0|dimi-bat.local|localhost|127.0.0.1)(\.com)?:\d{1,5}$"), headers=( 'x-requested-with', 'content-type', 'accept', 'origin', 'authorization', 'x-csrftoken', 'withcredentials', 'cache-control', 'cookie', 'session-id', ), supports_credentials=True, ) app.debug = debug app.register_blueprint(api_views, url_prefix='/api') return app if __name__ == '__main__': app = create_app(debug=True) app.run(host=os.environ.get('FLASK_HOST', '127.0.0.1'), port=os.environ.get('FLASK_PORT', 8000)) app.run()
d6f4afa82118d8c1ced7ddc8152f7b31b4cb898a
setup.py
setup.py
# coding: utf-8 from distutils.core import setup # python setup.py sdist --formats=bztar # python setup.py sdist --formats=bztar upload description = 'National characters transcription module.' import trans long_description = open('documentation.rst', 'rb').read() version = trans.__version__ setup( name = 'trans', version = version, description = description, long_description = long_description, author = 'Zelenyak Aleksandr aka ZZZ', author_email = 'ZZZ.Sochi@GMail.com', url = 'http://www.python.org/pypi/trans/', license = 'GPL', platforms = 'any', classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.4', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ], py_modules = ['trans'], )
# coding: utf-8 from distutils.core import setup # python setup.py sdist --formats=bztar # python setup.py sdist --formats=bztar upload description = 'National characters transcription module.' import trans long_description = open('documentation.rst', 'rb').read() version = trans.__version__ setup( name = 'trans', version = version, description = description, long_description = long_description, author = 'Zelenyak Aleksandr aka ZZZ', author_email = 'ZZZ.Sochi@GMail.com', url = 'http://www.python.org/pypi/trans/', license = 'BSD', platforms = 'any', classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.4', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ], py_modules = ['trans'], )
Change license to BSD. 1.4
Change license to BSD. 1.4
Python
bsd-2-clause
zzzsochi/trans
<REPLACE_OLD> 'GPL', <REPLACE_NEW> 'BSD', <REPLACE_END> <|endoftext|> # coding: utf-8 from distutils.core import setup # python setup.py sdist --formats=bztar # python setup.py sdist --formats=bztar upload description = 'National characters transcription module.' import trans long_description = open('documentation.rst', 'rb').read() version = trans.__version__ setup( name = 'trans', version = version, description = description, long_description = long_description, author = 'Zelenyak Aleksandr aka ZZZ', author_email = 'ZZZ.Sochi@GMail.com', url = 'http://www.python.org/pypi/trans/', license = 'BSD', platforms = 'any', classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.4', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ], py_modules = ['trans'], )
Change license to BSD. 1.4 # coding: utf-8 from distutils.core import setup # python setup.py sdist --formats=bztar # python setup.py sdist --formats=bztar upload description = 'National characters transcription module.' import trans long_description = open('documentation.rst', 'rb').read() version = trans.__version__ setup( name = 'trans', version = version, description = description, long_description = long_description, author = 'Zelenyak Aleksandr aka ZZZ', author_email = 'ZZZ.Sochi@GMail.com', url = 'http://www.python.org/pypi/trans/', license = 'GPL', platforms = 'any', classifiers = [ 'Development Status :: 5 - Production/Stable', 'Intended Audience :: Developers', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 2.4', 'Programming Language :: Python :: 2.5', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', ], py_modules = ['trans'], )
216a9176ecf395a7461c6f8ec926d48fa1634bad
manager/__init__.py
manager/__init__.py
import os from flask import Flask from flask.ext.assets import Bundle, Environment app = Flask(__name__) # Load the app config app.config.from_object("config.Config") assets = Environment(app) assets.load_path = [ os.path.join(os.path.dirname(__file__), 'static'), os.path.join(os.path.dirname(__file__), 'static', 'bower_components') ] assets.register( 'js_all', Bundle( 'jquery/dist/jquery.min.js', 'bootstrap/dist/js/bootstrap.min.js', output='js_all.js' ) ) assets.register( 'css_all', Bundle( 'bootstrap/dist/css/bootstrap.css', 'bootstrap/dist/css/bootstrap-theme.css', 'css/ignition.css', output='css_all.css' ) ) from manager.views import core
import os from flask import Flask from flask.ext.assets import Bundle, Environment app = Flask(__name__) # Load the app config app.config.from_object("config.Config") assets = Environment(app) assets.load_path = [ os.path.join(os.path.dirname(__file__), 'static'), os.path.join(os.path.dirname(__file__), 'static', 'bower_components') ] assets.register( 'js_all', Bundle( 'jquery/dist/jquery.min.js', 'bootstrap/dist/js/bootstrap.min.js', output='js_all.js' ) ) assets.register( 'css_all', Bundle( 'bootswatch/sandstone/bootstrap.css', 'css/ignition.css', output='css_all.css' ) ) from manager.views import core
Change theme to sandstone (bootswatch)
Change theme to sandstone (bootswatch)
Python
mit
hreeder/ignition,hreeder/ignition,hreeder/ignition
<REPLACE_OLD> 'bootstrap/dist/css/bootstrap.css', 'bootstrap/dist/css/bootstrap-theme.css', <REPLACE_NEW> 'bootswatch/sandstone/bootstrap.css', <REPLACE_END> <|endoftext|> import os from flask import Flask from flask.ext.assets import Bundle, Environment app = Flask(__name__) # Load the app config app.config.from_object("config.Config") assets = Environment(app) assets.load_path = [ os.path.join(os.path.dirname(__file__), 'static'), os.path.join(os.path.dirname(__file__), 'static', 'bower_components') ] assets.register( 'js_all', Bundle( 'jquery/dist/jquery.min.js', 'bootstrap/dist/js/bootstrap.min.js', output='js_all.js' ) ) assets.register( 'css_all', Bundle( 'bootswatch/sandstone/bootstrap.css', 'css/ignition.css', output='css_all.css' ) ) from manager.views import core
Change theme to sandstone (bootswatch) import os from flask import Flask from flask.ext.assets import Bundle, Environment app = Flask(__name__) # Load the app config app.config.from_object("config.Config") assets = Environment(app) assets.load_path = [ os.path.join(os.path.dirname(__file__), 'static'), os.path.join(os.path.dirname(__file__), 'static', 'bower_components') ] assets.register( 'js_all', Bundle( 'jquery/dist/jquery.min.js', 'bootstrap/dist/js/bootstrap.min.js', output='js_all.js' ) ) assets.register( 'css_all', Bundle( 'bootstrap/dist/css/bootstrap.css', 'bootstrap/dist/css/bootstrap-theme.css', 'css/ignition.css', output='css_all.css' ) ) from manager.views import core
bcc6d199186953b5ae05f7e93bf61c169ac89c77
opps/archives/admin.py
opps/archives/admin.py
from django.contrib import admin from django.contrib.auth import get_user_model from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from opps.core.admin import apply_opps_rules from opps.contrib.multisite.admin import AdminViewPermission from .models import File @apply_opps_rules('archives') class FileAdmin(AdminViewPermission): search_fields = ['title', 'slug'] raw_id_fields = ['user'] ordering = ('-date_available',) list_filter = ['date_available', 'published'] prepopulated_fields = {"slug": ["title"]} fieldsets = ( (_(u'Identification'), { 'fields': ('site', 'title', 'slug',)}), (_(u'Content'), { 'fields': ('description', 'archive', 'archive_link', 'tags')}), (_(u'Publication'), { 'classes': ('extrapretty'), 'fields': ('published', 'date_available',)}), ) def save_model(self, request, obj, form, change): if not change: obj.user = get_user_model().objects.get(pk=request.user.pk) obj.date_insert = timezone.now() obj.date_update = timezone.now() obj.save() admin.site.register(File, FileAdmin)
# coding: utf-8 from django.contrib import admin from django.contrib.auth import get_user_model from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from opps.core.admin import apply_opps_rules from opps.contrib.multisite.admin import AdminViewPermission from .models import File @apply_opps_rules('archives') class FileAdmin(AdminViewPermission): search_fields = ['title', 'slug'] raw_id_fields = ['user'] list_display = ['title', 'slug', 'download_link', 'published'] ordering = ('-date_available',) list_filter = ['date_available', 'published'] prepopulated_fields = {"slug": ["title"]} fieldsets = ( (_(u'Identification'), { 'fields': ('site', 'title', 'slug',)}), (_(u'Content'), { 'fields': ('description', 'archive', 'archive_link', 'tags')}), (_(u'Publication'), { 'classes': ('extrapretty'), 'fields': ('published', 'date_available',)}), ) def download_link(self, obj): html = '<a href="{}">{}</a>'.format(obj.archive.url, unicode(_(u'Download'))) return html download_link.short_description = _(u'download') download_link.allow_tags = True def save_model(self, request, obj, form, change): if not change: obj.user = get_user_model().objects.get(pk=request.user.pk) obj.date_insert = timezone.now() obj.date_update = timezone.now() obj.save() admin.site.register(File, FileAdmin)
Add list_display on FileAdmin and download_link def
Add list_display on FileAdmin and download_link def
Python
mit
YACOWS/opps,opps/opps,opps/opps,jeanmask/opps,williamroot/opps,jeanmask/opps,opps/opps,YACOWS/opps,YACOWS/opps,williamroot/opps,williamroot/opps,YACOWS/opps,jeanmask/opps,jeanmask/opps,williamroot/opps,opps/opps
<REPLACE_OLD> from <REPLACE_NEW> # coding: utf-8 from <REPLACE_END> <INSERT> list_display = ['title', 'slug', 'download_link', 'published'] <INSERT_END> <INSERT> def download_link(self, obj): html = '<a href="{}">{}</a>'.format(obj.archive.url, unicode(_(u'Download'))) return html download_link.short_description = _(u'download') download_link.allow_tags = True <INSERT_END> <|endoftext|> # coding: utf-8 from django.contrib import admin from django.contrib.auth import get_user_model from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from opps.core.admin import apply_opps_rules from opps.contrib.multisite.admin import AdminViewPermission from .models import File @apply_opps_rules('archives') class FileAdmin(AdminViewPermission): search_fields = ['title', 'slug'] raw_id_fields = ['user'] list_display = ['title', 'slug', 'download_link', 'published'] ordering = ('-date_available',) list_filter = ['date_available', 'published'] prepopulated_fields = {"slug": ["title"]} fieldsets = ( (_(u'Identification'), { 'fields': ('site', 'title', 'slug',)}), (_(u'Content'), { 'fields': ('description', 'archive', 'archive_link', 'tags')}), (_(u'Publication'), { 'classes': ('extrapretty'), 'fields': ('published', 'date_available',)}), ) def download_link(self, obj): html = '<a href="{}">{}</a>'.format(obj.archive.url, unicode(_(u'Download'))) return html download_link.short_description = _(u'download') download_link.allow_tags = True def save_model(self, request, obj, form, change): if not change: obj.user = get_user_model().objects.get(pk=request.user.pk) obj.date_insert = timezone.now() obj.date_update = timezone.now() obj.save() admin.site.register(File, FileAdmin)
Add list_display on FileAdmin and download_link def from django.contrib import admin from django.contrib.auth import get_user_model from django.utils import timezone from django.utils.translation import ugettext_lazy as _ from opps.core.admin import apply_opps_rules from opps.contrib.multisite.admin import AdminViewPermission from .models import File @apply_opps_rules('archives') class FileAdmin(AdminViewPermission): search_fields = ['title', 'slug'] raw_id_fields = ['user'] ordering = ('-date_available',) list_filter = ['date_available', 'published'] prepopulated_fields = {"slug": ["title"]} fieldsets = ( (_(u'Identification'), { 'fields': ('site', 'title', 'slug',)}), (_(u'Content'), { 'fields': ('description', 'archive', 'archive_link', 'tags')}), (_(u'Publication'), { 'classes': ('extrapretty'), 'fields': ('published', 'date_available',)}), ) def save_model(self, request, obj, form, change): if not change: obj.user = get_user_model().objects.get(pk=request.user.pk) obj.date_insert = timezone.now() obj.date_update = timezone.now() obj.save() admin.site.register(File, FileAdmin)
100e0a406551707e92826c2374f9c135613f6858
bin/index_to_contig.py
bin/index_to_contig.py
""" Given a tuple of index1, index2, correlation and a tuple of index, contig rewrite the correlation to be contig1, contig2, correlation """ import os import sys import argparse __author__ = 'Rob Edwards' if __name__ == "__main__": parser = argparse.ArgumentParser(description=' ') parser.add_argument('-i', help='index file', required=True) parser.add_argument('-c', help='correlation file', required=True) parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() i2c = {} with open(args.i, 'r') as fin: for l in fin: if ',' in l: p = l.strip().split(',') elif "\t" in l: p = l.strip().split("\t") else: sys.stderr.write(f"Neither a comma or tab in {args.i}. What is separator?\n") sys.exit(1) with open(args.c, 'r') as fin: for l in fin: if ',' in l: p = l.strip().split(',') elif "\t" in l: p = l.strip().split("\t") else: sys.stderr.write(f"Neither a comma or tab in {args.c}. What is separator?\n") sys.exit(1) if p[0] not in i2c: sys.stderr.write(f"{p[0]} not found in the index file\n") sys.exit(1) if p[1] not in i2c: sys.stderr.write(f"{p[1]} not found in the index file\n") sys.exit(1) print(f"{i2c[p[0]]}\t{i2c[p[1]]}\t{p[2]}")
Convert an index to a contig
Convert an index to a contig
Python
mit
linsalrob/EdwardsLab,linsalrob/EdwardsLab,linsalrob/EdwardsLab,linsalrob/EdwardsLab,linsalrob/EdwardsLab
<REPLACE_OLD> <REPLACE_NEW> """ Given a tuple of index1, index2, correlation and a tuple of index, contig rewrite the correlation to be contig1, contig2, correlation """ import os import sys import argparse __author__ = 'Rob Edwards' if __name__ == "__main__": parser = argparse.ArgumentParser(description=' ') parser.add_argument('-i', help='index file', required=True) parser.add_argument('-c', help='correlation file', required=True) parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() i2c = {} with open(args.i, 'r') as fin: for l in fin: if ',' in l: p = l.strip().split(',') elif "\t" in l: p = l.strip().split("\t") else: sys.stderr.write(f"Neither a comma or tab in {args.i}. What is separator?\n") sys.exit(1) with open(args.c, 'r') as fin: for l in fin: if ',' in l: p = l.strip().split(',') elif "\t" in l: p = l.strip().split("\t") else: sys.stderr.write(f"Neither a comma or tab in {args.c}. What is separator?\n") sys.exit(1) if p[0] not in i2c: sys.stderr.write(f"{p[0]} not found in the index file\n") sys.exit(1) if p[1] not in i2c: sys.stderr.write(f"{p[1]} not found in the index file\n") sys.exit(1) print(f"{i2c[p[0]]}\t{i2c[p[1]]}\t{p[2]}") <REPLACE_END> <|endoftext|> """ Given a tuple of index1, index2, correlation and a tuple of index, contig rewrite the correlation to be contig1, contig2, correlation """ import os import sys import argparse __author__ = 'Rob Edwards' if __name__ == "__main__": parser = argparse.ArgumentParser(description=' ') parser.add_argument('-i', help='index file', required=True) parser.add_argument('-c', help='correlation file', required=True) parser.add_argument('-v', help='verbose output', action='store_true') args = parser.parse_args() i2c = {} with open(args.i, 'r') as fin: for l in fin: if ',' in l: p = l.strip().split(',') elif "\t" in l: p = l.strip().split("\t") else: sys.stderr.write(f"Neither a comma or tab in {args.i}. What is separator?\n") sys.exit(1) with open(args.c, 'r') as fin: for l in fin: if ',' in l: p = l.strip().split(',') elif "\t" in l: p = l.strip().split("\t") else: sys.stderr.write(f"Neither a comma or tab in {args.c}. What is separator?\n") sys.exit(1) if p[0] not in i2c: sys.stderr.write(f"{p[0]} not found in the index file\n") sys.exit(1) if p[1] not in i2c: sys.stderr.write(f"{p[1]} not found in the index file\n") sys.exit(1) print(f"{i2c[p[0]]}\t{i2c[p[1]]}\t{p[2]}")
Convert an index to a contig
dfeb82974768e96efc4cba1388ac4bf098d3fbf4
UM/Qt/Bindings/MeshFileHandlerProxy.py
UM/Qt/Bindings/MeshFileHandlerProxy.py
from PyQt5.QtCore import QObject, pyqtSlot, pyqtProperty, pyqtSignal from UM.Application import Application from UM.Logger import Logger class MeshFileHandlerProxy(QObject): def __init__(self, parent = None): super().__init__(parent) self._mesh_handler = Application.getInstance().getMeshFileHandler() @pyqtProperty("QStringList", constant=True) def supportedReadFileTypes(self): fileTypes = [] fileTypes.append("All Supported Files (*{0})(*{0})".format(' *'.join(self._mesh_handler.getSupportedFileTypesRead()))) for ext in self._mesh_handler.getSupportedFileTypesRead(): fileTypes.append("{0} file (*.{0})(*.{0})".format(ext[1:])) fileTypes.append("All Files (*.*)(*)") return fileTypes @pyqtProperty("QStringList", constant=True) def supportedWriteFileTypes(self): return self._mesh_handler.getSupportedFileTypesWrite() def createMeshFileHandlerProxy(engine, scriptEngine): return MeshFileHandlerProxy()
from PyQt5.QtCore import QObject, pyqtSlot, pyqtProperty, pyqtSignal from UM.Application import Application from UM.Logger import Logger class MeshFileHandlerProxy(QObject): def __init__(self, parent = None): super().__init__(parent) self._mesh_handler = Application.getInstance().getMeshFileHandler() @pyqtProperty("QStringList", constant=True) def supportedReadFileTypes(self): file_types = [] all_types = [] for ext, desc in self._mesh_handler.getSupportedFileTypesRead().items(): file_types.append("{0} (*.{1})(*.{1})".format(desc, ext)) all_types.append("*.{0}".format(ext)) file_types.sort() file_types.insert(0, "All Supported Types ({0})({0})".format(" ".join(all_types))) file_types.append("All Files (*.*)(*)") return file_types @pyqtProperty("QStringList", constant=True) def supportedWriteFileTypes(self): #TODO: Implement return [] def createMeshFileHandlerProxy(engine, script_engine): return MeshFileHandlerProxy()
Update the supported file types list exposed to QML to use the new dict correctly
Update the supported file types list exposed to QML to use the new dict correctly
Python
agpl-3.0
onitake/Uranium,onitake/Uranium
<REPLACE_OLD> fileTypes <REPLACE_NEW> file_types <REPLACE_END> <REPLACE_OLD> fileTypes.append("All Supported Files (*{0})(*{0})".format(' *'.join(self._mesh_handler.getSupportedFileTypesRead()))) <REPLACE_NEW> all_types = [] <REPLACE_END> <REPLACE_OLD> ext <REPLACE_NEW> ext, desc <REPLACE_END> <REPLACE_OLD> self._mesh_handler.getSupportedFileTypesRead(): <REPLACE_NEW> self._mesh_handler.getSupportedFileTypesRead().items(): <REPLACE_END> <REPLACE_OLD> fileTypes.append("{0} file (*.{0})(*.{0})".format(ext[1:])) <REPLACE_NEW> file_types.append("{0} (*.{1})(*.{1})".format(desc, ext)) <REPLACE_END> <REPLACE_OLD> fileTypes.append("All <REPLACE_NEW> all_types.append("*.{0}".format(ext)) file_types.sort() file_types.insert(0, "All Supported Types ({0})({0})".format(" ".join(all_types))) file_types.append("All <REPLACE_END> <REPLACE_OLD> fileTypes <REPLACE_NEW> file_types <REPLACE_END> <INSERT> #TODO: Implement <INSERT_END> <REPLACE_OLD> self._mesh_handler.getSupportedFileTypesWrite() def <REPLACE_NEW> [] def <REPLACE_END> <REPLACE_OLD> scriptEngine): <REPLACE_NEW> script_engine): <REPLACE_END> <|endoftext|> from PyQt5.QtCore import QObject, pyqtSlot, pyqtProperty, pyqtSignal from UM.Application import Application from UM.Logger import Logger class MeshFileHandlerProxy(QObject): def __init__(self, parent = None): super().__init__(parent) self._mesh_handler = Application.getInstance().getMeshFileHandler() @pyqtProperty("QStringList", constant=True) def supportedReadFileTypes(self): file_types = [] all_types = [] for ext, desc in self._mesh_handler.getSupportedFileTypesRead().items(): file_types.append("{0} (*.{1})(*.{1})".format(desc, ext)) all_types.append("*.{0}".format(ext)) file_types.sort() file_types.insert(0, "All Supported Types ({0})({0})".format(" ".join(all_types))) file_types.append("All Files (*.*)(*)") return file_types @pyqtProperty("QStringList", constant=True) def supportedWriteFileTypes(self): #TODO: Implement return [] def createMeshFileHandlerProxy(engine, script_engine): return MeshFileHandlerProxy()
Update the supported file types list exposed to QML to use the new dict correctly from PyQt5.QtCore import QObject, pyqtSlot, pyqtProperty, pyqtSignal from UM.Application import Application from UM.Logger import Logger class MeshFileHandlerProxy(QObject): def __init__(self, parent = None): super().__init__(parent) self._mesh_handler = Application.getInstance().getMeshFileHandler() @pyqtProperty("QStringList", constant=True) def supportedReadFileTypes(self): fileTypes = [] fileTypes.append("All Supported Files (*{0})(*{0})".format(' *'.join(self._mesh_handler.getSupportedFileTypesRead()))) for ext in self._mesh_handler.getSupportedFileTypesRead(): fileTypes.append("{0} file (*.{0})(*.{0})".format(ext[1:])) fileTypes.append("All Files (*.*)(*)") return fileTypes @pyqtProperty("QStringList", constant=True) def supportedWriteFileTypes(self): return self._mesh_handler.getSupportedFileTypesWrite() def createMeshFileHandlerProxy(engine, scriptEngine): return MeshFileHandlerProxy()
cd97e8d8f8578abef246f3780b4c0ec10eebc8fa
tests/test_WListBox.py
tests/test_WListBox.py
import unittest from picotui.widgets import WListBox from picotui.defs import KEY_DOWN from picotui.context import Context class User: def __init__(self, name, age): self.name = name self.age = age class UserListBox(WListBox): def __init__(self, width, height, items): super().__init__(w=width, h=height, items=items) def render_line(self, user): return user.name class WListBoxTest(unittest.TestCase): def test_handle_key_with_custom_type_of_items(self): with Context(): users = [User('admin', 30), User('root', 27)] widget = UserListBox(width=5, height=5, items=users) self.assertIsNone(widget.handle_key(KEY_DOWN))
Add test for rendering WListBox in case of non-str content.
tests: Add test for rendering WListBox in case of non-str content.
Python
mit
pfalcon/picotui
<INSERT> import unittest from picotui.widgets import WListBox from picotui.defs import KEY_DOWN from picotui.context import Context class User: <INSERT_END> <INSERT> def __init__(self, name, age): self.name = name self.age = age class UserListBox(WListBox): def __init__(self, width, height, items): super().__init__(w=width, h=height, items=items) def render_line(self, user): return user.name class WListBoxTest(unittest.TestCase): def test_handle_key_with_custom_type_of_items(self): with Context(): users = [User('admin', 30), User('root', 27)] widget = UserListBox(width=5, height=5, items=users) self.assertIsNone(widget.handle_key(KEY_DOWN)) <INSERT_END> <|endoftext|> import unittest from picotui.widgets import WListBox from picotui.defs import KEY_DOWN from picotui.context import Context class User: def __init__(self, name, age): self.name = name self.age = age class UserListBox(WListBox): def __init__(self, width, height, items): super().__init__(w=width, h=height, items=items) def render_line(self, user): return user.name class WListBoxTest(unittest.TestCase): def test_handle_key_with_custom_type_of_items(self): with Context(): users = [User('admin', 30), User('root', 27)] widget = UserListBox(width=5, height=5, items=users) self.assertIsNone(widget.handle_key(KEY_DOWN))
tests: Add test for rendering WListBox in case of non-str content.
35f267249955b4c09cbf4432e452dc987488454e
tests/test_session.py
tests/test_session.py
#!/usr/bin/env python # coding=utf-8 try: import unittest.mock as mock except ImportError: import mock import unittest try: from StringIO import StringIO except ImportError: from io import StringIO from nessusapi.session import Session from nessusapi.session import Request class SessionTestCase(unittest.TestCase): @mock.patch('nessusapi.session.random') @mock.patch('nessusapi.session.urlopen') @mock.patch('nessusapi.session.Request') def test_init(self, mock_request, mock_urlopen, mock_random): mock_random.randrange.return_value = 2811 mock_urlopen.return_value = StringIO('<?xml version="1.0"?> <reply>' "<seq>2811</seq>" "<status>OK</status>" "<contents><token>ce65ea7</token>" "<user>" "<name>admin</name>" "<admin>TRUE</admin>" "</user></contents>" "</reply>") session = Session('user', 'pass', '192.0.2.3', '8980') mock_request.assert_called_once_with('https://192.0.2.3:8980/login', 'login=user&password=pass&seq=2811') self.assertEqual(session.token, "ce65ea7") if __name__ == '__main__': unittest.main()
Add basic tests for session
Add basic tests for session
Python
mit
sait-berkeley-infosec/pynessus-api
<REPLACE_OLD> <REPLACE_NEW> #!/usr/bin/env python # coding=utf-8 try: import unittest.mock as mock except ImportError: import mock import unittest try: from StringIO import StringIO except ImportError: from io import StringIO from nessusapi.session import Session from nessusapi.session import Request class SessionTestCase(unittest.TestCase): @mock.patch('nessusapi.session.random') @mock.patch('nessusapi.session.urlopen') @mock.patch('nessusapi.session.Request') def test_init(self, mock_request, mock_urlopen, mock_random): mock_random.randrange.return_value = 2811 mock_urlopen.return_value = StringIO('<?xml version="1.0"?> <reply>' "<seq>2811</seq>" "<status>OK</status>" "<contents><token>ce65ea7</token>" "<user>" "<name>admin</name>" "<admin>TRUE</admin>" "</user></contents>" "</reply>") session = Session('user', 'pass', '192.0.2.3', '8980') mock_request.assert_called_once_with('https://192.0.2.3:8980/login', 'login=user&password=pass&seq=2811') self.assertEqual(session.token, "ce65ea7") if __name__ == '__main__': unittest.main() <REPLACE_END> <|endoftext|> #!/usr/bin/env python # coding=utf-8 try: import unittest.mock as mock except ImportError: import mock import unittest try: from StringIO import StringIO except ImportError: from io import StringIO from nessusapi.session import Session from nessusapi.session import Request class SessionTestCase(unittest.TestCase): @mock.patch('nessusapi.session.random') @mock.patch('nessusapi.session.urlopen') @mock.patch('nessusapi.session.Request') def test_init(self, mock_request, mock_urlopen, mock_random): mock_random.randrange.return_value = 2811 mock_urlopen.return_value = StringIO('<?xml version="1.0"?> <reply>' "<seq>2811</seq>" "<status>OK</status>" "<contents><token>ce65ea7</token>" "<user>" "<name>admin</name>" "<admin>TRUE</admin>" "</user></contents>" "</reply>") session = Session('user', 'pass', '192.0.2.3', '8980') mock_request.assert_called_once_with('https://192.0.2.3:8980/login', 'login=user&password=pass&seq=2811') self.assertEqual(session.token, "ce65ea7") if __name__ == '__main__': unittest.main()
Add basic tests for session
d8c1c7da47e2568cecc1fd6dff0fec7661b39125
turbosms/routers.py
turbosms/routers.py
class SMSRouter(object): app_label = 'sms' db_name = 'sms' def db_for_read(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def db_for_write(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def allow_relation(self, obj1, obj2, **hints): if obj1._meta.app_label == self.app_label or \ obj2._meta.app_label == self.app_label: return False return None def allow_migrate(self, db, app_label, model_name=None, **hints): if app_label == self.app_label: return False return None
class TurboSMSRouter(object): app_label = 'turbosms' db_name = 'turbosms' def db_for_read(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def db_for_write(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def allow_relation(self, obj1, obj2, **hints): if obj1._meta.app_label == self.app_label or \ obj2._meta.app_label == self.app_label: return False return None def allow_migrate(self, db, app_label, model_name=None, **hints): if app_label == self.app_label: return False return None
Fix bug in sms router.
Fix bug in sms router.
Python
isc
pmaigutyak/mp-turbosms
<REPLACE_OLD> SMSRouter(object): <REPLACE_NEW> TurboSMSRouter(object): <REPLACE_END> <REPLACE_OLD> 'sms' <REPLACE_NEW> 'turbosms' <REPLACE_END> <REPLACE_OLD> 'sms' <REPLACE_NEW> 'turbosms' <REPLACE_END> <|endoftext|> class TurboSMSRouter(object): app_label = 'turbosms' db_name = 'turbosms' def db_for_read(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def db_for_write(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def allow_relation(self, obj1, obj2, **hints): if obj1._meta.app_label == self.app_label or \ obj2._meta.app_label == self.app_label: return False return None def allow_migrate(self, db, app_label, model_name=None, **hints): if app_label == self.app_label: return False return None
Fix bug in sms router. class SMSRouter(object): app_label = 'sms' db_name = 'sms' def db_for_read(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def db_for_write(self, model, **hints): if model._meta.app_label == self.app_label: return self.db_name return None def allow_relation(self, obj1, obj2, **hints): if obj1._meta.app_label == self.app_label or \ obj2._meta.app_label == self.app_label: return False return None def allow_migrate(self, db, app_label, model_name=None, **hints): if app_label == self.app_label: return False return None
27112881583e53d790e66d31a2bb4d2a996ee405
python/sparknlp/functions.py
python/sparknlp/functions.py
from pyspark.sql.functions import udf from pyspark.sql.types import * from pyspark.sql import DataFrame import sys import sparknlp def map_annotations(f, output_type: DataType): sys.modules['sparknlp.annotation'] = sparknlp # Makes Annotation() pickle serializable in top-level return udf( lambda content: f(content), output_type ) def map_annotations_strict(f): from sparknlp.annotation import Annotation sys.modules['sparknlp.annotation'] = sparknlp # Makes Annotation() pickle serializable in top-level return udf( lambda content: f(content), ArrayType(Annotation.dataType()) ) def map_annotations_col(dataframe: DataFrame, f, column, output_column, output_type): dataframe.withColumn(output_column, map_annotations(f, output_type)(column)) def filter_by_annotations_col(dataframe, f, column): this_udf = udf( lambda content: f(content), BooleanType() ) return dataframe.filter(this_udf(column)) def explode_annotations_col(dataframe: DataFrame, column, output_column): from pyspark.sql.functions import explode return dataframe.withColumn(output_column, explode(column))
from pyspark.sql.functions import udf from pyspark.sql.types import * from pyspark.sql import DataFrame from sparknlp.annotation import Annotation import sys import sparknlp def map_annotations(f, output_type: DataType): sys.modules['sparknlp.annotation'] = sparknlp # Makes Annotation() pickle serializable in top-level return udf( lambda content: f(content), output_type ) def map_annotations_strict(f): sys.modules['sparknlp.annotation'] = sparknlp # Makes Annotation() pickle serializable in top-level return udf( lambda content: f(content), ArrayType(Annotation.dataType()) ) def map_annotations_col(dataframe: DataFrame, f, column, output_column, output_type): dataframe.withColumn(output_column, map_annotations(f, output_type)(column)) def filter_by_annotations_col(dataframe, f, column): this_udf = udf( lambda content: f(content), BooleanType() ) return dataframe.filter(this_udf(column)) def explode_annotations_col(dataframe: DataFrame, column, output_column): from pyspark.sql.functions import explode return dataframe.withColumn(output_column, explode(column))
Move import to top level to avoid import fail after fist time on sys.modules hack
Move import to top level to avoid import fail after fist time on sys.modules hack
Python
apache-2.0
JohnSnowLabs/spark-nlp,JohnSnowLabs/spark-nlp,JohnSnowLabs/spark-nlp,JohnSnowLabs/spark-nlp
<REPLACE_OLD> DataFrame import <REPLACE_NEW> DataFrame from sparknlp.annotation import Annotation import <REPLACE_END> <DELETE> from sparknlp.annotation import Annotation <DELETE_END> <|endoftext|> from pyspark.sql.functions import udf from pyspark.sql.types import * from pyspark.sql import DataFrame from sparknlp.annotation import Annotation import sys import sparknlp def map_annotations(f, output_type: DataType): sys.modules['sparknlp.annotation'] = sparknlp # Makes Annotation() pickle serializable in top-level return udf( lambda content: f(content), output_type ) def map_annotations_strict(f): sys.modules['sparknlp.annotation'] = sparknlp # Makes Annotation() pickle serializable in top-level return udf( lambda content: f(content), ArrayType(Annotation.dataType()) ) def map_annotations_col(dataframe: DataFrame, f, column, output_column, output_type): dataframe.withColumn(output_column, map_annotations(f, output_type)(column)) def filter_by_annotations_col(dataframe, f, column): this_udf = udf( lambda content: f(content), BooleanType() ) return dataframe.filter(this_udf(column)) def explode_annotations_col(dataframe: DataFrame, column, output_column): from pyspark.sql.functions import explode return dataframe.withColumn(output_column, explode(column))
Move import to top level to avoid import fail after fist time on sys.modules hack from pyspark.sql.functions import udf from pyspark.sql.types import * from pyspark.sql import DataFrame import sys import sparknlp def map_annotations(f, output_type: DataType): sys.modules['sparknlp.annotation'] = sparknlp # Makes Annotation() pickle serializable in top-level return udf( lambda content: f(content), output_type ) def map_annotations_strict(f): from sparknlp.annotation import Annotation sys.modules['sparknlp.annotation'] = sparknlp # Makes Annotation() pickle serializable in top-level return udf( lambda content: f(content), ArrayType(Annotation.dataType()) ) def map_annotations_col(dataframe: DataFrame, f, column, output_column, output_type): dataframe.withColumn(output_column, map_annotations(f, output_type)(column)) def filter_by_annotations_col(dataframe, f, column): this_udf = udf( lambda content: f(content), BooleanType() ) return dataframe.filter(this_udf(column)) def explode_annotations_col(dataframe: DataFrame, column, output_column): from pyspark.sql.functions import explode return dataframe.withColumn(output_column, explode(column))
9f0b9b68a3c9dfaa64942e55fc97e435b8eb6f50
bayespy/nodes/__init__.py
bayespy/nodes/__init__.py
################################################################################ # Copyright (C) 2013 Jaakko Luttinen # # This file is licensed under the MIT License. ################################################################################ """ Package for nodes used to construct the model. Stochastic nodes ================ .. currentmodule:: bayespy.nodes Nodes for Gaussian variables: .. autosummary:: :toctree: generated/ Gaussian GaussianARD Nodes for precision and scale variables: .. autosummary:: :toctree: generated/ Gamma Wishart Exponential Nodes for modelling Gaussian and precision variables jointly (useful as prior for Gaussian nodes): .. autosummary:: :toctree: generated/ GaussianGammaISO GaussianGammaARD GaussianWishart Nodes for discrete count variables: .. autosummary:: :toctree: generated/ Bernoulli Binomial Categorical Multinomial Poisson Nodes for probabilities: .. autosummary:: :toctree: generated/ Beta Dirichlet Nodes for dynamic variables: .. autosummary:: :toctree: generated/ CategoricalMarkovChain GaussianMarkovChain SwitchingGaussianMarkovChain VaryingGaussianMarkovChain Other stochastic nodes: .. autosummary:: :toctree: generated/ Mixture Deterministic nodes =================== .. autosummary:: :toctree: generated/ Dot SumMultiply Gate """ # Currently, model construction and the inference network are not separated so # the model is constructed using variational message passing nodes. from bayespy.inference.vmp.nodes import *
################################################################################ # Copyright (C) 2013 Jaakko Luttinen # # This file is licensed under the MIT License. ################################################################################ """ Package for nodes used to construct the model. Stochastic nodes ================ .. currentmodule:: bayespy.nodes Nodes for Gaussian variables: .. autosummary:: :toctree: generated/ Gaussian GaussianARD Nodes for precision and scale variables: .. autosummary:: :toctree: generated/ Gamma Wishart Exponential Nodes for modelling Gaussian and precision variables jointly (useful as prior for Gaussian nodes): .. autosummary:: :toctree: generated/ GaussianGammaISO GaussianGammaARD GaussianWishart Nodes for discrete count variables: .. autosummary:: :toctree: generated/ Bernoulli Binomial Categorical Multinomial Poisson Nodes for probabilities: .. autosummary:: :toctree: generated/ Beta Dirichlet Nodes for dynamic variables: .. autosummary:: :toctree: generated/ CategoricalMarkovChain GaussianMarkovChain SwitchingGaussianMarkovChain VaryingGaussianMarkovChain Other stochastic nodes: .. autosummary:: :toctree: generated/ Mixture Deterministic nodes =================== .. autosummary:: :toctree: generated/ Dot SumMultiply Add Gate """ # Currently, model construction and the inference network are not separated so # the model is constructed using variational message passing nodes. from bayespy.inference.vmp.nodes import *
Include Add node in user API documentation
DOC: Include Add node in user API documentation
Python
mit
bayespy/bayespy,jluttine/bayespy
<INSERT> Add <INSERT_END> <|endoftext|> ################################################################################ # Copyright (C) 2013 Jaakko Luttinen # # This file is licensed under the MIT License. ################################################################################ """ Package for nodes used to construct the model. Stochastic nodes ================ .. currentmodule:: bayespy.nodes Nodes for Gaussian variables: .. autosummary:: :toctree: generated/ Gaussian GaussianARD Nodes for precision and scale variables: .. autosummary:: :toctree: generated/ Gamma Wishart Exponential Nodes for modelling Gaussian and precision variables jointly (useful as prior for Gaussian nodes): .. autosummary:: :toctree: generated/ GaussianGammaISO GaussianGammaARD GaussianWishart Nodes for discrete count variables: .. autosummary:: :toctree: generated/ Bernoulli Binomial Categorical Multinomial Poisson Nodes for probabilities: .. autosummary:: :toctree: generated/ Beta Dirichlet Nodes for dynamic variables: .. autosummary:: :toctree: generated/ CategoricalMarkovChain GaussianMarkovChain SwitchingGaussianMarkovChain VaryingGaussianMarkovChain Other stochastic nodes: .. autosummary:: :toctree: generated/ Mixture Deterministic nodes =================== .. autosummary:: :toctree: generated/ Dot SumMultiply Add Gate """ # Currently, model construction and the inference network are not separated so # the model is constructed using variational message passing nodes. from bayespy.inference.vmp.nodes import *
DOC: Include Add node in user API documentation ################################################################################ # Copyright (C) 2013 Jaakko Luttinen # # This file is licensed under the MIT License. ################################################################################ """ Package for nodes used to construct the model. Stochastic nodes ================ .. currentmodule:: bayespy.nodes Nodes for Gaussian variables: .. autosummary:: :toctree: generated/ Gaussian GaussianARD Nodes for precision and scale variables: .. autosummary:: :toctree: generated/ Gamma Wishart Exponential Nodes for modelling Gaussian and precision variables jointly (useful as prior for Gaussian nodes): .. autosummary:: :toctree: generated/ GaussianGammaISO GaussianGammaARD GaussianWishart Nodes for discrete count variables: .. autosummary:: :toctree: generated/ Bernoulli Binomial Categorical Multinomial Poisson Nodes for probabilities: .. autosummary:: :toctree: generated/ Beta Dirichlet Nodes for dynamic variables: .. autosummary:: :toctree: generated/ CategoricalMarkovChain GaussianMarkovChain SwitchingGaussianMarkovChain VaryingGaussianMarkovChain Other stochastic nodes: .. autosummary:: :toctree: generated/ Mixture Deterministic nodes =================== .. autosummary:: :toctree: generated/ Dot SumMultiply Gate """ # Currently, model construction and the inference network are not separated so # the model is constructed using variational message passing nodes. from bayespy.inference.vmp.nodes import *
67a0f6c0aa8015f5dea7dcc8c7bc6cae809016f5
setup.py
setup.py
import os from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup(name='windpowerlib', version='0.1.2dev', description='Creating time series of wind power plants.', url='http://github.com/wind-python/windpowerlib', author='oemof developer group', author_email='windpowerlib@rl-institut.de', license=None, packages=['windpowerlib'], package_data={ 'windpowerlib': [os.path.join('data', '*.csv')]}, long_description=read('README.rst'), zip_safe=False, install_requires=['pandas >= 0.19.1', 'requests'], extras_require={ 'dev': ['pytest', 'jupyter', 'sphinx_rtd_theme', 'nbformat']})
import os from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup(name='windpowerlib', version='0.1.2dev', description='Creating time series of wind power plants.', url='http://github.com/wind-python/windpowerlib', author='oemof developer group', author_email='windpowerlib@rl-institut.de', license=None, packages=['windpowerlib'], package_data={ 'windpowerlib': [os.path.join('data', '*.csv')]}, long_description=read('README.rst'), zip_safe=False, install_requires=['pandas >= 0.19.1, < 0.25.0', 'requests'], extras_require={ 'dev': ['pytest', 'jupyter', 'sphinx_rtd_theme', 'nbformat']})
Add upper limit of pandas
Add upper limit of pandas
Python
mit
wind-python/windpowerlib
<REPLACE_OLD> 0.19.1', <REPLACE_NEW> 0.19.1, < 0.25.0', <REPLACE_END> <|endoftext|> import os from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup(name='windpowerlib', version='0.1.2dev', description='Creating time series of wind power plants.', url='http://github.com/wind-python/windpowerlib', author='oemof developer group', author_email='windpowerlib@rl-institut.de', license=None, packages=['windpowerlib'], package_data={ 'windpowerlib': [os.path.join('data', '*.csv')]}, long_description=read('README.rst'), zip_safe=False, install_requires=['pandas >= 0.19.1, < 0.25.0', 'requests'], extras_require={ 'dev': ['pytest', 'jupyter', 'sphinx_rtd_theme', 'nbformat']})
Add upper limit of pandas import os from setuptools import setup def read(fname): return open(os.path.join(os.path.dirname(__file__), fname)).read() setup(name='windpowerlib', version='0.1.2dev', description='Creating time series of wind power plants.', url='http://github.com/wind-python/windpowerlib', author='oemof developer group', author_email='windpowerlib@rl-institut.de', license=None, packages=['windpowerlib'], package_data={ 'windpowerlib': [os.path.join('data', '*.csv')]}, long_description=read('README.rst'), zip_safe=False, install_requires=['pandas >= 0.19.1', 'requests'], extras_require={ 'dev': ['pytest', 'jupyter', 'sphinx_rtd_theme', 'nbformat']})
21af3dbed471c9f6c860db4d2ae84d1e0fed4077
demo/option_example.py
demo/option_example.py
from sparts.tasks.periodic import PeriodicTask from sparts.vservice import VService from sparts.sparts import option import socket class HostCheckTask(PeriodicTask): INTERVAL=5 check_name = option(default=socket.getfqdn(), type=str, help='Name to check [%(default)s]') def execute(self, *args, **kwargs): self.logger.info("LOOKUP %s => %s", self.check_name, socket.gethostbyname(self.check_name)) class DNSChecker(VService): TASKS=[HostCheckTask] if __name__ == '__main__': DNSChecker.initFromCLI()
from sparts.tasks.periodic import PeriodicTask from sparts.vservice import VService from sparts.sparts import option, samples, SampleType import socket class HostCheckTask(PeriodicTask): INTERVAL=5 check_name = option(default=socket.getfqdn(), type=str, help='Name to check [%(default)s]') def execute(self, *args, **kwargs): self.logger.info("LOOKUP %s => %s", self.check_name, socket.gethostbyname(self.check_name)) class PrintCountersTask(PeriodicTask): INTERVAL=6 execute_duration = samples(windows=[60], types=[SampleType.MAX, SampleType.MIN]) def execute(self, *args, **kwargs): hostcheck = self.service.requireTask(HostCheckTask) self.logger.info("hostcheck.duration :: %s", hostcheck.execute_duration.getCounters()) self.logger.info("this.duration :: %s", self.execute_duration.getCounters()) class DNSChecker(VService): TASKS=[HostCheckTask, PrintCountersTask] if __name__ == '__main__': DNSChecker.initFromCLI()
Update option example to highlight samples as well
Update option example to highlight samples as well And overriding samples
Python
bsd-3-clause
facebook/sparts,fmoo/sparts,bboozzoo/sparts,djipko/sparts,pshuff/sparts,pshuff/sparts,fmoo/sparts,facebook/sparts,djipko/sparts,bboozzoo/sparts
<REPLACE_OLD> option import <REPLACE_NEW> option, samples, SampleType import <REPLACE_END> <REPLACE_OLD> socket.gethostbyname(self.check_name)) class <REPLACE_NEW> socket.gethostbyname(self.check_name)) class PrintCountersTask(PeriodicTask): INTERVAL=6 execute_duration = samples(windows=[60], types=[SampleType.MAX, SampleType.MIN]) def execute(self, *args, **kwargs): hostcheck = self.service.requireTask(HostCheckTask) self.logger.info("hostcheck.duration :: %s", hostcheck.execute_duration.getCounters()) self.logger.info("this.duration :: %s", self.execute_duration.getCounters()) class <REPLACE_END> <REPLACE_OLD> TASKS=[HostCheckTask] if <REPLACE_NEW> TASKS=[HostCheckTask, PrintCountersTask] if <REPLACE_END> <|endoftext|> from sparts.tasks.periodic import PeriodicTask from sparts.vservice import VService from sparts.sparts import option, samples, SampleType import socket class HostCheckTask(PeriodicTask): INTERVAL=5 check_name = option(default=socket.getfqdn(), type=str, help='Name to check [%(default)s]') def execute(self, *args, **kwargs): self.logger.info("LOOKUP %s => %s", self.check_name, socket.gethostbyname(self.check_name)) class PrintCountersTask(PeriodicTask): INTERVAL=6 execute_duration = samples(windows=[60], types=[SampleType.MAX, SampleType.MIN]) def execute(self, *args, **kwargs): hostcheck = self.service.requireTask(HostCheckTask) self.logger.info("hostcheck.duration :: %s", hostcheck.execute_duration.getCounters()) self.logger.info("this.duration :: %s", self.execute_duration.getCounters()) class DNSChecker(VService): TASKS=[HostCheckTask, PrintCountersTask] if __name__ == '__main__': DNSChecker.initFromCLI()
Update option example to highlight samples as well And overriding samples from sparts.tasks.periodic import PeriodicTask from sparts.vservice import VService from sparts.sparts import option import socket class HostCheckTask(PeriodicTask): INTERVAL=5 check_name = option(default=socket.getfqdn(), type=str, help='Name to check [%(default)s]') def execute(self, *args, **kwargs): self.logger.info("LOOKUP %s => %s", self.check_name, socket.gethostbyname(self.check_name)) class DNSChecker(VService): TASKS=[HostCheckTask] if __name__ == '__main__': DNSChecker.initFromCLI()
b67b677d4092e5bec445649321b142d31cfc0fb6
linkatos/activities.py
linkatos/activities.py
from . import parser from . import printer from . import firebase as fb from . import reaction as react def is_empty(events): return ((events is None) or (len(events) == 0)) def is_url(url_cache): return url_cache is not None def is_reaction(index): return index is not None def event_consumer(expecting_url, url_cache_list, slack_client, fb_credentials, firebase): # Read slack events events = slack_client.rtm_read() if is_empty(events): return (expecting_url, url_cache) for event in events: print(event) if expecting_url and event['type'] == 'message': new_url_cache = parser.parse_url_message(event) url_cache_list.append(new_url_cache) if is_url(new_url_cache): printer.ask_confirmation(new_url_cache, slack_client) if event['type'] == 'reaction_added': reaction = parser.parse_reaction_added(event) index = react.is_confirmation(reaction['reaction'], url_cache_list, reaction['to_id']): if is_reaction(index): react.handle(reaction['reaction'], url_cache_list[index]['url'], fb_credentials, firebase) remove_url_from(url_cache_list) return (expecting_url, url_cache)
from . import parser from . import printer from . import firebase as fb from . import reaction as react def is_empty(events): return ((events is None) or (len(events) == 0)) def is_url(url_cache): return url_cache is not None def is_reaction(index): return index is not None def remove_url_from(url_cache_list, index): url_cache_list.pop(index) def event_consumer(expecting_url, url_cache_list, slack_client, fb_credentials, firebase): # Read slack events events = slack_client.rtm_read() if is_empty(events): return (expecting_url, url_cache) for event in events: print(event) if expecting_url and event['type'] == 'message': new_url_cache = parser.parse_url_message(event) url_cache_list.append(new_url_cache) if is_url(new_url_cache): printer.ask_confirmation(new_url_cache, slack_client) if event['type'] == 'reaction_added': reaction = parser.parse_reaction_added(event) index = react.is_confirmation(reaction['reaction'], url_cache_list, reaction['to_id']): if is_reaction(index): react.handle(reaction['reaction'], url_cache_list[index]['url'], fb_credentials, firebase) remove_url_from(url_cache_list, index) return (expecting_url, url_cache_list)
Add function to remove reacted to urls
feat: Add function to remove reacted to urls
Python
mit
iwi/linkatos,iwi/linkatos
<INSERT> remove_url_from(url_cache_list, index): url_cache_list.pop(index) def <INSERT_END> <REPLACE_OLD> remove_url_from(url_cache_list) <REPLACE_NEW> remove_url_from(url_cache_list, index) <REPLACE_END> <REPLACE_OLD> url_cache) <REPLACE_NEW> url_cache_list) <REPLACE_END> <|endoftext|> from . import parser from . import printer from . import firebase as fb from . import reaction as react def is_empty(events): return ((events is None) or (len(events) == 0)) def is_url(url_cache): return url_cache is not None def is_reaction(index): return index is not None def remove_url_from(url_cache_list, index): url_cache_list.pop(index) def event_consumer(expecting_url, url_cache_list, slack_client, fb_credentials, firebase): # Read slack events events = slack_client.rtm_read() if is_empty(events): return (expecting_url, url_cache) for event in events: print(event) if expecting_url and event['type'] == 'message': new_url_cache = parser.parse_url_message(event) url_cache_list.append(new_url_cache) if is_url(new_url_cache): printer.ask_confirmation(new_url_cache, slack_client) if event['type'] == 'reaction_added': reaction = parser.parse_reaction_added(event) index = react.is_confirmation(reaction['reaction'], url_cache_list, reaction['to_id']): if is_reaction(index): react.handle(reaction['reaction'], url_cache_list[index]['url'], fb_credentials, firebase) remove_url_from(url_cache_list, index) return (expecting_url, url_cache_list)
feat: Add function to remove reacted to urls from . import parser from . import printer from . import firebase as fb from . import reaction as react def is_empty(events): return ((events is None) or (len(events) == 0)) def is_url(url_cache): return url_cache is not None def is_reaction(index): return index is not None def event_consumer(expecting_url, url_cache_list, slack_client, fb_credentials, firebase): # Read slack events events = slack_client.rtm_read() if is_empty(events): return (expecting_url, url_cache) for event in events: print(event) if expecting_url and event['type'] == 'message': new_url_cache = parser.parse_url_message(event) url_cache_list.append(new_url_cache) if is_url(new_url_cache): printer.ask_confirmation(new_url_cache, slack_client) if event['type'] == 'reaction_added': reaction = parser.parse_reaction_added(event) index = react.is_confirmation(reaction['reaction'], url_cache_list, reaction['to_id']): if is_reaction(index): react.handle(reaction['reaction'], url_cache_list[index]['url'], fb_credentials, firebase) remove_url_from(url_cache_list) return (expecting_url, url_cache)
d8bcdced24e9787711cbf5787011d88a086d4956
seleniumbase/console_scripts/logo_helper.py
seleniumbase/console_scripts/logo_helper.py
""" SeleniumBase Logo Processing (for the console scripts interface) Logo generated from: http://www.patorjk.com/software/taag/#p=display&f=Slant&t=SeleniumBase """ import colorama r''' ______ __ _ ____ / ____/__ / /__ ____ (_)_ ______ ___ / __ `____ ________ \__ \/ _ \/ / _ \/ __ \/ / / / / __ `__ \/ /_/ / __ `/ ___/ _ \ ___/ / __/ / __/ / / / / /_/ / / / / / / /_) / /_/ (__ ) __/ /____/\___/_/\___/_/ /_/_/\__,_/_/ /_/ /_/_____/\__,_/____/\___/ ''' def get_seleniumbase_logo(): colorama.init(autoreset=True) c1 = colorama.Fore.BLUE + colorama.Back.CYAN c2 = colorama.Fore.CYAN + colorama.Back.BLUE cr = colorama.Style.RESET_ALL sb = c1 sb += "\n" sb += " ______ __ _ " sb += c2 sb += "____ " sb += c1 sb += "\n" sb += c1 sb += " / ____/__ / /__ ____ (_)_ ______ ___ " sb += c2 sb += "/ __ `____ ________ " sb += c1 sb += "\n" sb += c1 sb += " \\__ \\/ _ \\/ / _ \\/ __ \\/ / / / / __ `__ \\" sb += c2 sb += "/ /_/ / __ `/ ___/ _ \\" sb += c1 sb += "\n" sb += c1 sb += " ___/ / __/ / __/ / / / / /_/ / / / / / " sb += c2 sb += "/ /_) / /_/ (__ ) __/" sb += c1 sb += "\n" sb += c1 sb += "/____/\\___/_/\\___/_/ /_/_/\\__,_/_/ /_/ /_" sb += c2 sb += "/_____/\\__,_/____/\\___/ " sb += c1 sb += "\n" sb += c1 sb += " " sb += c2 sb += " " sb += c1 sb += cr return sb
Create a SeleniumBase logo for console interfaces
Create a SeleniumBase logo for console interfaces
Python
mit
mdmintz/seleniumspot,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,seleniumbase/SeleniumBase,mdmintz/seleniumspot,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase,mdmintz/SeleniumBase
<REPLACE_OLD> <REPLACE_NEW> """ SeleniumBase Logo Processing (for the console scripts interface) Logo generated from: http://www.patorjk.com/software/taag/#p=display&f=Slant&t=SeleniumBase """ import colorama r''' ______ __ _ ____ / ____/__ / /__ ____ (_)_ ______ ___ / __ `____ ________ \__ \/ _ \/ / _ \/ __ \/ / / / / __ `__ \/ /_/ / __ `/ ___/ _ \ ___/ / __/ / __/ / / / / /_/ / / / / / / /_) / /_/ (__ ) __/ /____/\___/_/\___/_/ /_/_/\__,_/_/ /_/ /_/_____/\__,_/____/\___/ ''' def get_seleniumbase_logo(): colorama.init(autoreset=True) c1 = colorama.Fore.BLUE + colorama.Back.CYAN c2 = colorama.Fore.CYAN + colorama.Back.BLUE cr = colorama.Style.RESET_ALL sb = c1 sb += "\n" sb += " ______ __ _ " sb += c2 sb += "____ " sb += c1 sb += "\n" sb += c1 sb += " / ____/__ / /__ ____ (_)_ ______ ___ " sb += c2 sb += "/ __ `____ ________ " sb += c1 sb += "\n" sb += c1 sb += " \\__ \\/ _ \\/ / _ \\/ __ \\/ / / / / __ `__ \\" sb += c2 sb += "/ /_/ / __ `/ ___/ _ \\" sb += c1 sb += "\n" sb += c1 sb += " ___/ / __/ / __/ / / / / /_/ / / / / / " sb += c2 sb += "/ /_) / /_/ (__ ) __/" sb += c1 sb += "\n" sb += c1 sb += "/____/\\___/_/\\___/_/ /_/_/\\__,_/_/ /_/ /_" sb += c2 sb += "/_____/\\__,_/____/\\___/ " sb += c1 sb += "\n" sb += c1 sb += " " sb += c2 sb += " " sb += c1 sb += cr return sb <REPLACE_END> <|endoftext|> """ SeleniumBase Logo Processing (for the console scripts interface) Logo generated from: http://www.patorjk.com/software/taag/#p=display&f=Slant&t=SeleniumBase """ import colorama r''' ______ __ _ ____ / ____/__ / /__ ____ (_)_ ______ ___ / __ `____ ________ \__ \/ _ \/ / _ \/ __ \/ / / / / __ `__ \/ /_/ / __ `/ ___/ _ \ ___/ / __/ / __/ / / / / /_/ / / / / / / /_) / /_/ (__ ) __/ /____/\___/_/\___/_/ /_/_/\__,_/_/ /_/ /_/_____/\__,_/____/\___/ ''' def get_seleniumbase_logo(): colorama.init(autoreset=True) c1 = colorama.Fore.BLUE + colorama.Back.CYAN c2 = colorama.Fore.CYAN + colorama.Back.BLUE cr = colorama.Style.RESET_ALL sb = c1 sb += "\n" sb += " ______ __ _ " sb += c2 sb += "____ " sb += c1 sb += "\n" sb += c1 sb += " / ____/__ / /__ ____ (_)_ ______ ___ " sb += c2 sb += "/ __ `____ ________ " sb += c1 sb += "\n" sb += c1 sb += " \\__ \\/ _ \\/ / _ \\/ __ \\/ / / / / __ `__ \\" sb += c2 sb += "/ /_/ / __ `/ ___/ _ \\" sb += c1 sb += "\n" sb += c1 sb += " ___/ / __/ / __/ / / / / /_/ / / / / / " sb += c2 sb += "/ /_) / /_/ (__ ) __/" sb += c1 sb += "\n" sb += c1 sb += "/____/\\___/_/\\___/_/ /_/_/\\__,_/_/ /_/ /_" sb += c2 sb += "/_____/\\__,_/____/\\___/ " sb += c1 sb += "\n" sb += c1 sb += " " sb += c2 sb += " " sb += c1 sb += cr return sb
Create a SeleniumBase logo for console interfaces
91165642fb40165987ab0ff734959f88712e514c
humblemedia/resources/migrations/0001_initial.py
humblemedia/resources/migrations/0001_initial.py
# encoding: utf8 from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Resource', fields=[ ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')), ('title', models.CharField(max_length=64)), ('description', models.TextField()), ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, to_field='id')), ('file', models.FileField(upload_to='resources/')), ('min_price', models.PositiveIntegerField(default=1, blank=True)), ('is_published', models.BooleanField(default=False)), ('is_verified', models.BooleanField(default=True)), ], options={ }, bases=(models.Model,), ), ]
# encoding: utf8 from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('auth', '__first__'), ('contenttypes', '__first__'), ] operations = [ migrations.CreateModel( name='Resource', fields=[ ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')), ('title', models.CharField(max_length=64)), ('description', models.TextField()), ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, to_field='id')), ('file', models.FileField(upload_to='resources/')), ('min_price', models.PositiveIntegerField(default=1, blank=True)), ('is_published', models.BooleanField(default=False)), ('is_verified', models.BooleanField(default=True)), ], options={ }, bases=(models.Model,), ), ]
Add dependencies to contenttypes to the migration
Add dependencies to contenttypes to the migration
Python
mit
vladimiroff/humble-media,vladimiroff/humble-media
<INSERT> ('auth', '__first__'), ('contenttypes', '__first__'), <INSERT_END> <|endoftext|> # encoding: utf8 from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('auth', '__first__'), ('contenttypes', '__first__'), ] operations = [ migrations.CreateModel( name='Resource', fields=[ ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')), ('title', models.CharField(max_length=64)), ('description', models.TextField()), ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, to_field='id')), ('file', models.FileField(upload_to='resources/')), ('min_price', models.PositiveIntegerField(default=1, blank=True)), ('is_published', models.BooleanField(default=False)), ('is_verified', models.BooleanField(default=True)), ], options={ }, bases=(models.Model,), ), ]
Add dependencies to contenttypes to the migration # encoding: utf8 from __future__ import unicode_literals from django.db import models, migrations from django.conf import settings class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Resource', fields=[ ('id', models.AutoField(serialize=False, primary_key=True, auto_created=True, verbose_name='ID')), ('title', models.CharField(max_length=64)), ('description', models.TextField()), ('author', models.ForeignKey(to=settings.AUTH_USER_MODEL, to_field='id')), ('file', models.FileField(upload_to='resources/')), ('min_price', models.PositiveIntegerField(default=1, blank=True)), ('is_published', models.BooleanField(default=False)), ('is_verified', models.BooleanField(default=True)), ], options={ }, bases=(models.Model,), ), ]
9145be89c1a5ba1a2c47bfeef571d40b9eb060bc
test/integration/test_user_args.py
test/integration/test_user_args.py
from . import * class TestUserArgs(IntegrationTest): def __init__(self, *args, **kwargs): IntegrationTest.__init__( self, os.path.join(examples_dir, '10_custom_args'), configure=False, *args, **kwargs ) def test_build_default(self): self.configure() self.build(executable('simple')) self.assertOutput([executable('simple')], 'hello from unnamed!\n') def test_build_with_args(self): self.configure(extra_args=['--name=foo']) self.build(executable('simple')) self.assertOutput([executable('simple')], 'hello from foo!\n')
from six import assertRegex from . import * class TestUserArgs(IntegrationTest): def __init__(self, *args, **kwargs): IntegrationTest.__init__( self, os.path.join(examples_dir, '10_custom_args'), configure=False, *args, **kwargs ) def test_build_default(self): self.configure() self.build(executable('simple')) self.assertOutput([executable('simple')], 'hello from unnamed!\n') def test_build_with_args(self): self.configure(extra_args=['--name=foo']) self.build(executable('simple')) self.assertOutput([executable('simple')], 'hello from foo!\n') def test_help(self): os.chdir(self.srcdir) output = self.assertPopen( ['bfg9000', 'help', 'configure'] ) assertRegex(self, output, r'(?m)^project-defined arguments:$') assertRegex(self, output, r'(?m)^\s+--name NAME\s+set the name to greet$') def test_help_explicit_srcdir(self): output = self.assertPopen( ['bfg9000', 'help', 'configure', self.srcdir] ) assertRegex(self, output, r'(?m)^project-defined arguments:$') assertRegex(self, output, r'(?m)^\s+--name NAME\s+set the name to greet$')
Add integration test for user-args help
Add integration test for user-args help
Python
bsd-3-clause
jimporter/bfg9000,jimporter/bfg9000,jimporter/bfg9000,jimporter/bfg9000
<INSERT> six import assertRegex from <INSERT_END> <REPLACE_OLD> foo!\n') <REPLACE_NEW> foo!\n') def test_help(self): os.chdir(self.srcdir) output = self.assertPopen( ['bfg9000', 'help', 'configure'] ) assertRegex(self, output, r'(?m)^project-defined arguments:$') assertRegex(self, output, r'(?m)^\s+--name NAME\s+set the name to greet$') def test_help_explicit_srcdir(self): output = self.assertPopen( ['bfg9000', 'help', 'configure', self.srcdir] ) assertRegex(self, output, r'(?m)^project-defined arguments:$') assertRegex(self, output, r'(?m)^\s+--name NAME\s+set the name to greet$') <REPLACE_END> <|endoftext|> from six import assertRegex from . import * class TestUserArgs(IntegrationTest): def __init__(self, *args, **kwargs): IntegrationTest.__init__( self, os.path.join(examples_dir, '10_custom_args'), configure=False, *args, **kwargs ) def test_build_default(self): self.configure() self.build(executable('simple')) self.assertOutput([executable('simple')], 'hello from unnamed!\n') def test_build_with_args(self): self.configure(extra_args=['--name=foo']) self.build(executable('simple')) self.assertOutput([executable('simple')], 'hello from foo!\n') def test_help(self): os.chdir(self.srcdir) output = self.assertPopen( ['bfg9000', 'help', 'configure'] ) assertRegex(self, output, r'(?m)^project-defined arguments:$') assertRegex(self, output, r'(?m)^\s+--name NAME\s+set the name to greet$') def test_help_explicit_srcdir(self): output = self.assertPopen( ['bfg9000', 'help', 'configure', self.srcdir] ) assertRegex(self, output, r'(?m)^project-defined arguments:$') assertRegex(self, output, r'(?m)^\s+--name NAME\s+set the name to greet$')
Add integration test for user-args help from . import * class TestUserArgs(IntegrationTest): def __init__(self, *args, **kwargs): IntegrationTest.__init__( self, os.path.join(examples_dir, '10_custom_args'), configure=False, *args, **kwargs ) def test_build_default(self): self.configure() self.build(executable('simple')) self.assertOutput([executable('simple')], 'hello from unnamed!\n') def test_build_with_args(self): self.configure(extra_args=['--name=foo']) self.build(executable('simple')) self.assertOutput([executable('simple')], 'hello from foo!\n')
b0f47f2d9b75ac777c3cf4a45c1930de9a42c6bc
heutagogy/heutagogy.py
heutagogy/heutagogy.py
from heutagogy import app import heutagogy.persistence import os from datetime import timedelta app.config.from_object(__name__) app.config.update(dict( USERS={ 'myuser': {'password': 'mypassword'}, 'user2': {'password': 'pass2'}, }, JWT_AUTH_URL_RULE='/api/v1/login', JWT_EXPIRATION_DELTA=timedelta(seconds=2592000), # 1 month DATABASE=os.path.join(app.root_path, 'heutagogy.sqlite3'), DEBUG=True)) app.config.from_envvar('HEUTAGOGY_SETTINGS', silent=True) if not app.config['SECRET_KEY']: app.config['SECRET_KEY'] = 'super-secret' @app.cli.command('initdb') def initdb_command(): """Creates the database tables.""" heutagogy.persistence.initialize()
from heutagogy import app import heutagogy.persistence import os from datetime import timedelta app.config.from_object(__name__) app.config.update(dict( USERS={ 'myuser': {'password': 'mypassword'}, 'user2': {'password': 'pass2'}, }, JWT_AUTH_URL_RULE='/api/v1/login', JWT_EXPIRATION_DELTA=timedelta(seconds=2592000), # 1 month DATABASE=os.path.join(app.root_path, 'heutagogy.sqlite3'), DEBUG=True)) app.config.from_envvar('HEUTAGOGY_SETTINGS', silent=True) if not app.config['SECRET_KEY']: app.config['SECRET_KEY'] = 'super-secret' @app.cli.command('initdb') def initdb_command(): """Creates the database tables.""" heutagogy.persistence.initialize() with app.app_context(): if not os.path.isfile(app.config['DATABASE']): heutagogy.persistence.initialize()
Initialize database if it does not exist
Initialize database if it does not exist
Python
agpl-3.0
heutagogy/heutagogy-backend,heutagogy/heutagogy-backend
<INSERT> heutagogy.persistence.initialize() with app.app_context(): if not os.path.isfile(app.config['DATABASE']): <INSERT_END> <|endoftext|> from heutagogy import app import heutagogy.persistence import os from datetime import timedelta app.config.from_object(__name__) app.config.update(dict( USERS={ 'myuser': {'password': 'mypassword'}, 'user2': {'password': 'pass2'}, }, JWT_AUTH_URL_RULE='/api/v1/login', JWT_EXPIRATION_DELTA=timedelta(seconds=2592000), # 1 month DATABASE=os.path.join(app.root_path, 'heutagogy.sqlite3'), DEBUG=True)) app.config.from_envvar('HEUTAGOGY_SETTINGS', silent=True) if not app.config['SECRET_KEY']: app.config['SECRET_KEY'] = 'super-secret' @app.cli.command('initdb') def initdb_command(): """Creates the database tables.""" heutagogy.persistence.initialize() with app.app_context(): if not os.path.isfile(app.config['DATABASE']): heutagogy.persistence.initialize()
Initialize database if it does not exist from heutagogy import app import heutagogy.persistence import os from datetime import timedelta app.config.from_object(__name__) app.config.update(dict( USERS={ 'myuser': {'password': 'mypassword'}, 'user2': {'password': 'pass2'}, }, JWT_AUTH_URL_RULE='/api/v1/login', JWT_EXPIRATION_DELTA=timedelta(seconds=2592000), # 1 month DATABASE=os.path.join(app.root_path, 'heutagogy.sqlite3'), DEBUG=True)) app.config.from_envvar('HEUTAGOGY_SETTINGS', silent=True) if not app.config['SECRET_KEY']: app.config['SECRET_KEY'] = 'super-secret' @app.cli.command('initdb') def initdb_command(): """Creates the database tables.""" heutagogy.persistence.initialize()