hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4ecd621ab56bfd508e9835987ea7537a72ff3b56 | 1,093 | py | Python | fuc/cli/vcf_index.py | sbslee/fuc | f4eb5f6b95b533252ee877920278cd4e4c964bb8 | [
"MIT"
] | 17 | 2021-06-09T23:23:56.000Z | 2022-03-10T11:58:46.000Z | fuc/cli/vcf_index.py | sbslee/fuc | f4eb5f6b95b533252ee877920278cd4e4c964bb8 | [
"MIT"
] | 27 | 2021-04-21T06:25:22.000Z | 2022-03-30T23:25:36.000Z | fuc/cli/vcf_index.py | sbslee/fuc | f4eb5f6b95b533252ee877920278cd4e4c964bb8 | [
"MIT"
] | null | null | null | import sys
from .. import api
import pysam
description = """
Index a VCF file.
This command will create an index file (.tbi) for the input VCF.
"""
epilog = f"""
[Example] Index a compressed VCF file:
$ fuc {api.common._script_name()} in.vcf.gz
[Example] Index an uncompressed VCF file (will create a compressed VCF first):
$ fuc {api.common._script_name()} in.vcf
"""
| 25.418605 | 78 | 0.643184 |
4ecd71c762bc771fde1ea85f54d06c0a60939363 | 1,174 | py | Python | config.py | mazanax/identity-quiz | b9468b305b23701d027a3fc1cfd2536da8371a4e | [
"MIT"
] | null | null | null | config.py | mazanax/identity-quiz | b9468b305b23701d027a3fc1cfd2536da8371a4e | [
"MIT"
] | null | null | null | config.py | mazanax/identity-quiz | b9468b305b23701d027a3fc1cfd2536da8371a4e | [
"MIT"
] | null | null | null | import logging
import os
import sys
from peewee import SqliteDatabase, PostgresqlDatabase
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
if not os.getenv('POSTGRES_DB_NAME'):
logger.warning('[DB] using sqlite')
db = SqliteDatabase('quiz.db')
else:
logger.info('[DB] Connected to postgresql')
db_name = os.getenv('POSTGRES_DB_NAME')
db_user = os.getenv('POSTGRES_DB_USER')
db_pass = os.getenv('POSTGRES_DB_PASS')
db_host = os.getenv('POSTGRES_DB_HOST')
db_port = int(os.getenv('POSTGRES_DB_PORT', 5432))
db = PostgresqlDatabase(db_name, user=db_user, password=db_pass, host=db_host, port=db_port)
token_length = 64
site_host = os.getenv('APP_SITE_HOST')
# ---- SOCIAL NETWORKS CREDENTIALS ---- #
vk_client_id = os.getenv('VK_CLIENT_ID')
vk_client_secret = os.getenv('VK_CLIENT_SECRET')
fb_client_id = os.getenv('FB_CLIENT_ID')
fb_client_secret = os.getenv('FB_CLIENT_SECRET')
google_client_id = os.getenv('GOOGLE_CLIENT_ID')
google_client_secret = os.getenv('GOOGLE_CLIENT_SECRET')
# ---- END OF CREDENTIALS ---- #
| 30.102564 | 96 | 0.749574 |
4ece31e4c80ccf74cc98a1222f00653c142ee026 | 65 | py | Python | scuttlecrab/main.py | PUMBA-1997/scuttlecrab.py | 13e0074b7d94af81bf5c13feb5a3d036bc71f133 | [
"Apache-2.0"
] | 4 | 2022-01-05T14:16:07.000Z | 2022-01-09T07:29:08.000Z | scuttlecrab/main.py | Fabrizio1663/scuttlecrab.py | 13e0074b7d94af81bf5c13feb5a3d036bc71f133 | [
"Apache-2.0"
] | null | null | null | scuttlecrab/main.py | Fabrizio1663/scuttlecrab.py | 13e0074b7d94af81bf5c13feb5a3d036bc71f133 | [
"Apache-2.0"
] | null | null | null | from scuttlecrab.classes.bot import CustomBot
bot = CustomBot()
| 16.25 | 45 | 0.8 |
4ece3ece6512a9e1cbd43be4fb424a421b22f700 | 1,384 | py | Python | python/paddle_fl/split_learning/core/reader/reader_base.py | jhjiangcs/PaddleFL | debcc3809f634f696637e1fd8f15ca2430b0c1df | [
"Apache-2.0"
] | 2 | 2021-03-02T09:24:31.000Z | 2021-05-27T21:00:29.000Z | python/paddle_fl/split_learning/core/reader/reader_base.py | JedHong/PaddleFL | 4b10985f808511d63f2efc76e387103ccde14e32 | [
"Apache-2.0"
] | null | null | null | python/paddle_fl/split_learning/core/reader/reader_base.py | JedHong/PaddleFL | 4b10985f808511d63f2efc76e387103ccde14e32 | [
"Apache-2.0"
] | 1 | 2020-05-18T11:07:38.000Z | 2020-05-18T11:07:38.000Z | # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
| 39.542857 | 112 | 0.680636 |
4ecec912c81fe613d6387ded4c0e5121003a14a5 | 640 | py | Python | main.py | sergioyahni/captcha | f8235a4c3b64fadf71c00d9932fae7f1bf1962f5 | [
"MIT"
] | null | null | null | main.py | sergioyahni/captcha | f8235a4c3b64fadf71c00d9932fae7f1bf1962f5 | [
"MIT"
] | null | null | null | main.py | sergioyahni/captcha | f8235a4c3b64fadf71c00d9932fae7f1bf1962f5 | [
"MIT"
] | null | null | null | from captcha.image import ImageCaptcha
import random
check_captcha()
| 23.703704 | 83 | 0.6 |
4ed15e1a4c599c5f5acf73a58c9805ac84372eae | 4,045 | py | Python | openstates/openstates-master/openstates/mi/events.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/mi/events.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | openstates/openstates-master/openstates/mi/events.py | Jgorsick/Advocacy_Angular | 8906af3ba729b2303880f319d52bce0d6595764c | [
"CC-BY-4.0"
] | null | null | null | from openstates.utils import LXMLMixin
import datetime as dt
import re
from billy.scrape.events import Event, EventScraper
import lxml.html
import pytz
mi_events = "http://legislature.mi.gov/doc.aspx?CommitteeMeetings"
| 32.36 | 85 | 0.528307 |
4ed2ebd3e68752d3caa55e15dd92ce5cc345106b | 418 | py | Python | code/0190-reverseBits.py | RRRoger/LeetCodeExercise | 0019a048fcfac9ac9e6f37651b17d01407c92c7d | [
"MIT"
] | null | null | null | code/0190-reverseBits.py | RRRoger/LeetCodeExercise | 0019a048fcfac9ac9e6f37651b17d01407c92c7d | [
"MIT"
] | null | null | null | code/0190-reverseBits.py | RRRoger/LeetCodeExercise | 0019a048fcfac9ac9e6f37651b17d01407c92c7d | [
"MIT"
] | null | null | null |
if "__main__" == __name__:
solution = Solution()
res = solution.reverseBits(43261596)
print(res)
| 19.904762 | 69 | 0.5 |
4ed342ee0815f43f923a49b70459817dc28094de | 1,018 | py | Python | muria/db/preload.py | xakiy/muria | 0d16ae02f65d2a4b8cfe31419a4d9343ccbe6905 | [
"MIT"
] | 1 | 2020-02-10T00:12:27.000Z | 2020-02-10T00:12:27.000Z | muria/db/preload.py | xakiy/muria | 0d16ae02f65d2a4b8cfe31419a4d9343ccbe6905 | [
"MIT"
] | 8 | 2019-12-07T16:48:08.000Z | 2021-08-31T06:31:34.000Z | muria/db/preload.py | xakiy/muria | 0d16ae02f65d2a4b8cfe31419a4d9343ccbe6905 | [
"MIT"
] | null | null | null | """Some preloads of database content."""
tables = list()
roles = list()
roles.append({"id": 1, "name": "administrator"})
roles.append({"id": 2, "name": "contributor"})
roles.append({"id": 3, "name": "staff"})
roles.append({"id": 4, "name": "parent"})
roles.append({"id": 5, "name": "caretaker"})
roles.append({"id": 6, "name": "student"})
tables.append({"model": "Role", "data": roles})
responsibilities = list()
responsibilities.append({"id": 1, "name": "manager"})
responsibilities.append({"id": 2, "name": "user"})
responsibilities.append({"id": 3, "name": "journalist"})
tables.append({"model": "Responsibility", "data": responsibilities})
sets = list()
responsibility_role = [
(1, 1),
(1, 3),
(2, 1),
(2, 2),
(2, 3),
(2, 4),
(2, 5),
(2, 6),
(3, 2),
(3, 3),
(3, 6),
]
sets.append(
{
"parent": "Responsibility",
"rel": "roles",
"child": "Role",
"data": responsibility_role,
}
)
| 22.130435 | 69 | 0.52554 |
4ed3a96b67e22aff964a1489de5d4c55aa41991d | 6,689 | py | Python | src/meeting_timer/settings.py | andrewjrobinson/meeting_timer | cad3303f6925d2e8961b262c6cfbecf4a30a1ce5 | [
"MIT"
] | null | null | null | src/meeting_timer/settings.py | andrewjrobinson/meeting_timer | cad3303f6925d2e8961b262c6cfbecf4a30a1ce5 | [
"MIT"
] | null | null | null | src/meeting_timer/settings.py | andrewjrobinson/meeting_timer | cad3303f6925d2e8961b262c6cfbecf4a30a1ce5 | [
"MIT"
] | null | null | null | #
# MIT License
#
# Copyright (c) 2020 Andrew Robinson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import collections.abc
import json
import os
import tkinter as tk
## end class SettingsWrapper() ##
## end class Settings() ##
| 32.470874 | 87 | 0.575123 |
4ed623c2f06e37c570057cd2950ac913943aac09 | 651 | py | Python | python/191122.py | Xanonymous-GitHub/main | 53120110bd8dc9ab33424fa26d1a8ca5b9256ebe | [
"Apache-2.0"
] | 1 | 2019-09-27T17:46:41.000Z | 2019-09-27T17:46:41.000Z | python/191122.py | Xanonymous-GitHub/main | 53120110bd8dc9ab33424fa26d1a8ca5b9256ebe | [
"Apache-2.0"
] | null | null | null | python/191122.py | Xanonymous-GitHub/main | 53120110bd8dc9ab33424fa26d1a8ca5b9256ebe | [
"Apache-2.0"
] | 5 | 2019-09-30T16:41:14.000Z | 2019-10-25T11:13:39.000Z | from os import getcwd
if __name__ == '__main__':
main()
| 21.7 | 78 | 0.533026 |
4ed6b577e511cc21f5108b75969a300169a86b9c | 5,534 | py | Python | treeplotter/plotter.py | Luke-Poeppel/treeplotter | 940e08b02d30f69972b0df1a5668f3b2ade02027 | [
"MIT"
] | 7 | 2021-06-12T17:48:17.000Z | 2022-01-27T09:47:12.000Z | treeplotter/plotter.py | Luke-Poeppel/treeplotter | 940e08b02d30f69972b0df1a5668f3b2ade02027 | [
"MIT"
] | 36 | 2021-06-09T18:31:44.000Z | 2022-03-17T12:06:59.000Z | treeplotter/plotter.py | Luke-Poeppel/treeplotter | 940e08b02d30f69972b0df1a5668f3b2ade02027 | [
"MIT"
] | 2 | 2021-12-07T18:41:53.000Z | 2022-03-09T10:46:52.000Z | ####################################################################################################
# File: plotter.py
# Purpose: Plotting module.
#
# Author: Luke Poeppel
#
# Location: Kent, 2021
####################################################################################################
import logging
import os
import json
import sys
import subprocess
import shutil
import tempfile
from .style import (
write_index_html,
write_treant_css,
write_node_css
)
here = os.path.abspath(os.path.dirname(__file__))
treant_templates = here + "/templates"
def get_logger(name, print_to_console=True, write_to_file=None):
"""
A simple helper for logging. Copied from my `decitala` package.
"""
logger = logging.getLogger(name)
if not len(logger.handlers):
logger.setLevel(logging.INFO)
if write_to_file is not None:
file_handler = logging.FileHandler(write_to_file)
logger.addHandler(file_handler)
if print_to_console:
stdout_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stdout_handler)
return logger
def prepare_arrow(dict_in):
"""
Raphal's arrow formatting is a bit more involved. This parsing is done here.
"""
arrow_end = dict_in["arrow_end"]
arrow_width = dict_in["arrow_width"]
arrow_length = dict_in["arrow_length"]
return "-".join([arrow_end, arrow_width, arrow_length])
def create_tree_diagram(
tree,
background_color="#868DEE",
save_path=None,
webshot=False,
verbose=False
):
"""
This function creates a visualization of a given `tree.Tree` by wrapping the TreantJS library.
Parameters
----------
tree : tree.Tree
A `tree.Tree` object.
background_color : str
Color (given in Hex) of the desired background color of the visualization.
save_path : str
Optional path to the directory in which all the relevant files will be saved. Default is `None`.
webshot : bool
Whether or not to invoke Rs webshot library to create a high-res screenshot of the tree.
Default is `False`.
verbose : bool
Whether to print logging messages in the plotting process. Useful for debugging.
"""
if verbose:
logger = get_logger(name=__name__, print_to_console=True)
else:
logger = get_logger(name=__name__, print_to_console=False)
serialized = tree.serialize(for_treant=True)
logger.info("-> Creating directory and writing tree to JSON...")
if save_path:
if not(os.path.isdir(save_path)):
os.mkdir(save_path)
os.chdir(save_path)
_prepare_chart_config(tree=tree)
_prepare_docs_and_screenshot(
path=save_path,
tree=tree,
serialized_tree=serialized,
background_color=background_color,
webshot=webshot,
logger=logger
)
logger.info("Done ")
return save_path
else:
with tempfile.TemporaryDirectory() as tmpdir:
os.chdir(tmpdir)
_prepare_docs_and_screenshot(tmpdir, serialized_tree=serialized, logger=logger)
logger.info("Done ")
with tempfile.NamedTemporaryFile(delete=False) as tmpfile:
shutil.copyfile(tmpdir + "/shot.png", tmpfile.name)
return tmpfile.name | 28.091371 | 100 | 0.696061 |
4ed7072eb26c7d3dbe4f2527653e38fa3cf65c67 | 638 | py | Python | app/__init__.py | PabloEckardt/Flask-Login-Example | a230a6ce6678b52bb4c62b0b62b167edd927ebd0 | [
"MIT"
] | null | null | null | app/__init__.py | PabloEckardt/Flask-Login-Example | a230a6ce6678b52bb4c62b0b62b167edd927ebd0 | [
"MIT"
] | null | null | null | app/__init__.py | PabloEckardt/Flask-Login-Example | a230a6ce6678b52bb4c62b0b62b167edd927ebd0 | [
"MIT"
] | null | null | null | from flask import current_app, Flask, redirect, url_for
from flask_cors import CORS
from flask_sqlalchemy import SQLAlchemy
import config
from flask_login import LoginManager
app = Flask(__name__)
app.config.from_object(config) # load config.py
app.secret_key = 'super duper mega secret key'
login_manager = LoginManager() # Login manager for the application
login_manager.init_app(app) # apply login manager
login_manager.login_view = 'home' # set the default redirect page
db = SQLAlchemy(app)
# This imports are necessary for the scope of the directory structure
from app import views
from app import models
from app.views import *
| 31.9 | 69 | 0.80721 |
4ed70f9df4c3c063308c836d1a779ff6d33f1046 | 3,814 | py | Python | filewriter.py | FrederikBjorne/python-serial-logging | e553bc2421699a2bb38f21abffbb08ee70c81a21 | [
"MIT"
] | null | null | null | filewriter.py | FrederikBjorne/python-serial-logging | e553bc2421699a2bb38f21abffbb08ee70c81a21 | [
"MIT"
] | null | null | null | filewriter.py | FrederikBjorne/python-serial-logging | e553bc2421699a2bb38f21abffbb08ee70c81a21 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging
from threading import Thread, Event
from Queue import Queue, Empty as QueueEmpty
import codecs
| 40.574468 | 113 | 0.588621 |
4ed7b53a4e6b728656b2c884c550c9f3728497ff | 361 | py | Python | ejercicios/ejercicio4.py | Ironwilly/python | f6d42c685b4026b018089edb4ae8cc0ca9614e86 | [
"CC0-1.0"
] | null | null | null | ejercicios/ejercicio4.py | Ironwilly/python | f6d42c685b4026b018089edb4ae8cc0ca9614e86 | [
"CC0-1.0"
] | null | null | null | ejercicios/ejercicio4.py | Ironwilly/python | f6d42c685b4026b018089edb4ae8cc0ca9614e86 | [
"CC0-1.0"
] | null | null | null | #Dados dos nmeros, mostrar la suma, resta, divisin y multiplicacin de ambos.
a = int(input("Dime el primer nmero: "))
b = int(input("Dime el segundo nmero: "))
print("La suma de los dos nmeros es: ",a+b)
print("La resta de los dos nmeros es: ",a-b)
print("La multiplicacin de los dos nmeros es: ",a*b)
print("La divisin de los dos nmeros es: ",a/b)
| 40.111111 | 79 | 0.700831 |
4ed8c0b61feb32ca367f3590a99a8b047fcbbc95 | 610 | py | Python | adv/pipple.py | XenoXilus/dl | cdfce03835cd67aac553140d6d88bc4c5c5d60ff | [
"Apache-2.0"
] | null | null | null | adv/pipple.py | XenoXilus/dl | cdfce03835cd67aac553140d6d88bc4c5c5d60ff | [
"Apache-2.0"
] | null | null | null | adv/pipple.py | XenoXilus/dl | cdfce03835cd67aac553140d6d88bc4c5c5d60ff | [
"Apache-2.0"
] | null | null | null | from core.advbase import *
if __name__ == '__main__':
from core.simulate import test_with_argv
test_with_argv(None, *sys.argv) | 25.416667 | 64 | 0.568852 |
4edad0b70551d7b3c45fcd8cf2f69ef8cc0ea351 | 3,799 | py | Python | test/testFactorMethods.py | turkeydonkey/nzmath3 | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 | [
"BSD-3-Clause"
] | 1 | 2021-05-26T19:22:17.000Z | 2021-05-26T19:22:17.000Z | test/testFactorMethods.py | turkeydonkey/nzmath3 | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 | [
"BSD-3-Clause"
] | null | null | null | test/testFactorMethods.py | turkeydonkey/nzmath3 | a48ae9efcf0d9ad1485c2e9863c948a7f1b20311 | [
"BSD-3-Clause"
] | null | null | null | import unittest
import logging
import nzmath.factor.methods as mthd
try:
_log = logging.getLogger('test.testFactorMethod')
except:
try:
_log = logging.getLogger('nzmath.test.testFactorMethod')
except:
_log = logging.getLogger('testFactorMethod')
_log.setLevel(logging.INFO)
def suite(suffix = "Test"):
suite = unittest.TestSuite()
all_names = globals()
for name in all_names:
if name.endswith(suffix):
suite.addTest(unittest.makeSuite(all_names[name], "test"))
return suite
if __name__ == '__main__':
logging.basicConfig()
runner = unittest.TextTestRunner()
runner.run(suite())
| 39.164948 | 84 | 0.609108 |
14c1c00575d1e7a958fc95661cce6a81b4fbbd6f | 2,057 | py | Python | LeetCode/0151-reverse-words-in-a-string/solution.py | RyouMon/road-of-master | 02e18c2e524db9c7df4e6f8db56b3c8408a9fc6b | [
"Apache-2.0"
] | null | null | null | LeetCode/0151-reverse-words-in-a-string/solution.py | RyouMon/road-of-master | 02e18c2e524db9c7df4e6f8db56b3c8408a9fc6b | [
"Apache-2.0"
] | null | null | null | LeetCode/0151-reverse-words-in-a-string/solution.py | RyouMon/road-of-master | 02e18c2e524db9c7df4e6f8db56b3c8408a9fc6b | [
"Apache-2.0"
] | null | null | null | import collections
| 22.855556 | 62 | 0.427807 |
14c1f4a62cb93b24d14dc7d0ea4f4f2eb0f1a413 | 3,154 | py | Python | setup.py | Tiksagol/hype | 1485b80fe16a7678605afe209b2494a2a875df3f | [
"MIT"
] | 13 | 2021-07-31T12:07:06.000Z | 2022-03-24T15:00:50.000Z | setup.py | Tiksagol/hype | 1485b80fe16a7678605afe209b2494a2a875df3f | [
"MIT"
] | 2 | 2021-08-02T14:04:58.000Z | 2021-09-06T09:35:20.000Z | setup.py | Tiksagol/hype | 1485b80fe16a7678605afe209b2494a2a875df3f | [
"MIT"
] | 3 | 2021-08-07T13:23:54.000Z | 2022-01-24T13:23:08.000Z |
# Copyright (c) 2021, Serum Studio
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
from setuptools import setup, find_packages
from hype import __license__, __author__, __version__, __desc__
BASE_URL = "https://github.com/serumstudio/hype"
extras_require = {
'color': ['colorama==0.4.4'], #: Color support
'standard': ['colorama==0.4.4'], #: Standard installation with color support
'progress': ['alive-progress==1.6.2'], #: With progressbar support
'table': ['tabulate==0.8.9'] #: With Table support
}
setup(
name = "hypecli",
author = __author__,
description =__desc__,
long_description=get_long_description(),
long_description_content_type='text/markdown',
project_urls={
'Documentation': 'https://hype.serum.studio',
'Source': BASE_URL,
'Tracker': "%s/issues" % (BASE_URL)
},
version = __version__,
license = __license__,
url=BASE_URL,
keywords='cli,commandline-toolkit,command line toolkit,python cli,python 3'.split(','),
packages = [p for p in find_packages() if 'test' not in p],
extras_require = extras_require,
classifiers = [
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Programming Language :: Python",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development",
"Typing :: Typed",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"License :: OSI Approved :: MIT License"
],
) | 38.938272 | 91 | 0.679138 |
14c231909289d40787ee027c30489129b5c603c6 | 300 | py | Python | visualize_d_tree_generator/dummy_data.py | dan-silver/machine-learning-visualizer | 13e31b953dd24fbed5970f54487a9bb65d2e6cd4 | [
"MIT"
] | 2 | 2015-09-23T03:32:26.000Z | 2017-07-24T12:03:37.000Z | visualize_d_tree_generator/dummy_data.py | dan-silver/machine-learning-visualizer | 13e31b953dd24fbed5970f54487a9bb65d2e6cd4 | [
"MIT"
] | null | null | null | visualize_d_tree_generator/dummy_data.py | dan-silver/machine-learning-visualizer | 13e31b953dd24fbed5970f54487a9bb65d2e6cd4 | [
"MIT"
] | null | null | null | import sklearn
from sklearn import datasets | 42.857143 | 167 | 0.79 |
14c4fe8cfdba355e578ef806e7db3a2e2f8ba8db | 947 | py | Python | Recommender_System/algorithm/KGCN/main.py | Holldean/Recommender-System | 5c1508b4fb430dc06979353627c4cb873aad490c | [
"MIT"
] | 348 | 2019-11-12T12:20:08.000Z | 2022-03-31T12:34:45.000Z | Recommender_System/algorithm/KGCN/main.py | Runjeo/Recommender-System | 6a93e6ee970b32c76e2f71043383bf24a7e865d5 | [
"MIT"
] | 15 | 2019-12-04T15:16:15.000Z | 2021-07-21T06:27:38.000Z | Recommender_System/algorithm/KGCN/main.py | Runjeo/Recommender-System | 6a93e6ee970b32c76e2f71043383bf24a7e865d5 | [
"MIT"
] | 87 | 2019-11-24T10:26:26.000Z | 2022-03-11T05:35:39.000Z | if __name__ == '__main__':
import Recommender_System.utility.gpu_memory_growth
from Recommender_System.algorithm.KGCN.tool import construct_undirected_kg, get_adj_list
from Recommender_System.algorithm.KGCN.model import KGCN_model
from Recommender_System.algorithm.KGCN.train import train
from Recommender_System.data import kg_loader, data_process
import tensorflow as tf
n_user, n_item, n_entity, n_relation, train_data, test_data, kg, topk_data = data_process.pack_kg(kg_loader.ml1m_kg1m, negative_sample_threshold=4)
neighbor_size = 16
adj_entity, adj_relation = get_adj_list(construct_undirected_kg(kg), n_entity, neighbor_size)
model = KGCN_model(n_user, n_entity, n_relation, adj_entity, adj_relation, neighbor_size, iter_size=1, dim=16, l2=1e-7, aggregator='sum')
train(model, train_data, test_data, topk_data, optimizer=tf.keras.optimizers.Adam(0.01), epochs=10, batch=512)
| 55.705882 | 152 | 0.779303 |
14c57c94bb76c89fd6223c07cfaec40385ecbc9c | 1,133 | py | Python | setup.py | travisliu/data-spec-validator | 7ee0944ca9899d565ad04ed82ca26bb402970958 | [
"MIT"
] | 23 | 2021-08-11T08:53:15.000Z | 2022-02-14T04:44:13.000Z | setup.py | travisliu/data-spec-validator | 7ee0944ca9899d565ad04ed82ca26bb402970958 | [
"MIT"
] | 2 | 2021-09-11T08:59:12.000Z | 2022-03-29T00:40:42.000Z | setup.py | travisliu/data-spec-validator | 7ee0944ca9899d565ad04ed82ca26bb402970958 | [
"MIT"
] | 1 | 2022-01-04T07:45:22.000Z | 2022-01-04T07:45:22.000Z | import os
import setuptools
CUR_DIR = os.path.abspath(os.path.dirname(__file__))
about = {}
with open(os.path.join(CUR_DIR, "data_spec_validator", "__version__.py"), "r") as f:
exec(f.read(), about)
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setuptools.setup(
name="data-spec-validator",
version=about['__version__'],
author="CJHwong, falldog, HardCoreLewis, kilikkuo, xeonchen",
author_email="pypi@hardcoretech.co",
description="Simple validation tool for API",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/hardcoretech/data-spec-validator",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
package_dir={"data_spec_validator": "data_spec_validator"},
packages=setuptools.find_packages(),
install_requires=[
"python-dateutil",
],
extras_require={
'decorator': ['Django', 'djangorestframework'],
},
python_requires=">=3.6",
)
| 29.815789 | 84 | 0.672551 |
14c7234590ee0036166bb3c285dac3557145714c | 9,204 | py | Python | prisms_influxdb.py | VDL-PRISM/home-assistant-components | 2041d2a257aede70613ddf8fe1e76bcc1877ef2e | [
"Apache-2.0"
] | null | null | null | prisms_influxdb.py | VDL-PRISM/home-assistant-components | 2041d2a257aede70613ddf8fe1e76bcc1877ef2e | [
"Apache-2.0"
] | null | null | null | prisms_influxdb.py | VDL-PRISM/home-assistant-components | 2041d2a257aede70613ddf8fe1e76bcc1877ef2e | [
"Apache-2.0"
] | null | null | null | """
A component which allows you to send data to an Influx database.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/influxdb/
"""
from datetime import timedelta
import functools
import logging
import itertools
import json
from persistent_queue import PersistentQueue
import requests
import voluptuous as vol
from homeassistant.const import (EVENT_STATE_CHANGED, STATE_UNAVAILABLE,
STATE_UNKNOWN, EVENT_HOMEASSISTANT_STOP)
from homeassistant.helpers import state as state_helper
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_point_in_time
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DOMAIN = "prisms_influxdb"
DEPENDENCIES = []
DEFAULT_HOST = 'localhost'
DEFAULT_PORT = 8086
DEFAULT_DATABASE = 'home_assistant'
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
DEFAULT_BATCH_TIME = 10
DEFAULT_CHUNK_SIZE = 500
REQUIREMENTS = ['influxdb==3.0.0', 'python-persistent-queue==1.3.0']
CONF_HOST = 'host'
CONF_DEPLOYMENT_ID = 'home_id'
CONF_PORT = 'port'
CONF_DB_NAME = 'database'
CONF_USERNAME = 'username'
CONF_PASSWORD = 'password'
CONF_SSL = 'ssl'
CONF_VERIFY_SSL = 'verify_ssl'
CONF_BLACKLIST = 'blacklist'
CONF_WHITELIST = 'whitelist'
CONF_TAGS = 'tags'
CONF_BATCH_TIME = 'batch_time'
CONF_CHUNK_SIZE = 'chunk_size'
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_DEPLOYMENT_ID): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.positive_int,
vol.Optional(CONF_DB_NAME, default=DEFAULT_DATABASE): cv.string,
vol.Optional(CONF_USERNAME, default=None): vol.Any(cv.string, None),
vol.Optional(CONF_PASSWORD, default=None): vol.Any(cv.string, None),
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL,
default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_BLACKLIST, default=[]): cv.ensure_list,
vol.Optional(CONF_WHITELIST, default=[]): cv.ensure_list,
vol.Optional(CONF_TAGS, default={}): dict,
vol.Optional(CONF_BATCH_TIME,
default=DEFAULT_BATCH_TIME): cv.positive_int,
vol.Optional(CONF_CHUNK_SIZE,
default=DEFAULT_CHUNK_SIZE): cv.positive_int,
})
}, extra=vol.ALLOW_EXTRA)
RUNNING = True
# pylint: disable=too-many-locals
def setup(hass, config):
"""Setup the InfluxDB component."""
from influxdb import InfluxDBClient
conf = config[DOMAIN]
blacklist = conf[CONF_BLACKLIST]
whitelist = conf[CONF_WHITELIST]
tags = conf[CONF_TAGS]
batch_time = conf[CONF_BATCH_TIME]
chunk_size = conf[CONF_CHUNK_SIZE]
tags[CONF_DEPLOYMENT_ID] = conf[CONF_DEPLOYMENT_ID]
influx = InfluxDBClient(host=conf[CONF_HOST],
port=conf[CONF_PORT],
username=conf[CONF_USERNAME],
password=conf[CONF_PASSWORD],
database=conf[CONF_DB_NAME],
ssl=conf[CONF_SSL],
verify_ssl=conf[CONF_VERIFY_SSL])
events = PersistentQueue('prisms_influxdb.queue',
path=hass.config.config_dir)
render = functools.partial(get_json_body, hass=hass, tags=tags)
def influx_event_listener(event):
"""Listen for new messages on the bus and sends them to Influx."""
state = event.data.get('new_state')
if state is None or state.state in (
STATE_UNKNOWN, '', STATE_UNAVAILABLE) or \
state.entity_id in blacklist:
# The state is unknown or it is on the black list
return
if len(whitelist) > 0 and state.entity_id not in whitelist:
# It is not on the white list
return
if batch_time == 0:
# Since batch time hasn't been set, just upload as soon as an event
# occurs
try:
_LOGGER.debug("Since batch_time == 0, writing data")
json_body = render(event)
write_data(influx, json_body)
except ValueError as e:
_LOGGER.error("Something is wrong with the provided template: %s", e)
return
else:
# Convert object to pickle-able. Since State.attributes uses
# MappingProxyType, it is not pickle-able
if event.data['new_state']:
event.data['new_state'].attributes = dict(event.data['new_state'].attributes)
if event.data['old_state']:
event.data['old_state'].attributes = dict(event.data['old_state'].attributes)
# Store event to be uploaded later
events.push(event)
_LOGGER.debug("Saving event for later (%s)", len(events))
hass.bus.listen(EVENT_STATE_CHANGED, influx_event_listener)
if batch_time != 0:
# Set up task to upload batch data
_LOGGER.debug("Starting task to upload batch data")
write_batch_data(hass, events, influx, render, batch_time, chunk_size)
# Register to know when home assistant is stopping
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop)
return True
| 34.215613 | 94 | 0.624402 |
14c9c1f833fdc6508d89df41045c267b53031119 | 587 | py | Python | utils/callbacks/callbacks_weather.py | Chris1nexus/carla-data-collector | 333019622cb07dc53bbe8f1c07cfb12fbfaae60c | [
"MIT"
] | null | null | null | utils/callbacks/callbacks_weather.py | Chris1nexus/carla-data-collector | 333019622cb07dc53bbe8f1c07cfb12fbfaae60c | [
"MIT"
] | null | null | null | utils/callbacks/callbacks_weather.py | Chris1nexus/carla-data-collector | 333019622cb07dc53bbe8f1c07cfb12fbfaae60c | [
"MIT"
] | null | null | null | import numpy as np
import os
from ..helpers import save_json
| 29.35 | 79 | 0.722317 |
14cb05cd02a2460b30efdd3be4e6a69dc0d1eedd | 114 | py | Python | change_fw_name.py | maxgerhardt/gd32-bootloader-dfu-dapboot | fcb8c47e17b2bee813ca8c6b33cb52b547538719 | [
"ISC"
] | 1 | 2021-10-03T17:26:38.000Z | 2021-10-03T17:26:38.000Z | change_fw_name.py | maxgerhardt/gd32-bootloader-dfu-dapboot | fcb8c47e17b2bee813ca8c6b33cb52b547538719 | [
"ISC"
] | null | null | null | change_fw_name.py | maxgerhardt/gd32-bootloader-dfu-dapboot | fcb8c47e17b2bee813ca8c6b33cb52b547538719 | [
"ISC"
] | 1 | 2021-11-03T22:06:01.000Z | 2021-11-03T22:06:01.000Z | Import("env")
# original Makefile builds into dapboot.bin/elf, let's do the same
env.Replace(PROGNAME="dapboot")
| 22.8 | 66 | 0.754386 |
14cbaa1dbf623ab97aaa48323072de223e8374d1 | 1,393 | py | Python | exp2.py | advaithca/CG_LAB | 07c4424be2f37d21ed7af804361f0a992a8124ac | [
"MIT"
] | null | null | null | exp2.py | advaithca/CG_LAB | 07c4424be2f37d21ed7af804361f0a992a8124ac | [
"MIT"
] | null | null | null | exp2.py | advaithca/CG_LAB | 07c4424be2f37d21ed7af804361f0a992a8124ac | [
"MIT"
] | null | null | null | #drawing a line using DDA
from OpenGL.GL import *
from OpenGL.GLU import *
from OpenGL.GLUT import *
import sys
import math
x1 = 0
x2 = 0
y1 = 0
y2 = 0
if __name__ == "__main__":
main() | 21.106061 | 52 | 0.557789 |
14cc76852586183e306354dd7443e72f19468e4e | 4,884 | py | Python | landlab/io/netcdf/dump.py | clebouteiller/landlab | e6f47db76ea0814c4c5a24e695bbafb74c722ff7 | [
"MIT"
] | 1 | 2022-01-07T02:36:07.000Z | 2022-01-07T02:36:07.000Z | landlab/io/netcdf/dump.py | clebouteiller/landlab | e6f47db76ea0814c4c5a24e695bbafb74c722ff7 | [
"MIT"
] | 1 | 2021-11-11T21:23:46.000Z | 2021-11-11T21:23:46.000Z | landlab/io/netcdf/dump.py | clebouteiller/landlab | e6f47db76ea0814c4c5a24e695bbafb74c722ff7 | [
"MIT"
] | 2 | 2019-08-19T08:58:10.000Z | 2022-01-07T02:36:01.000Z | import pathlib
import numpy as np
import xarray as xr
def to_netcdf(
grid, path, include="*", exclude=None, time=None, format="NETCDF4", mode="w"
):
"""Write landlab a grid to a netcdf file.
Write the data and grid information for *grid* to *path* as NetCDF.
If the *append* keyword argument in True, append the data to an existing
file, if it exists. Otherwise, clobber an existing files.
Parameters
----------
grid : ModelGrid
Landlab grid object that holds a grid and field values.
path : str
Path to which to save this grid.
include : str or iterable of str, optional
A list of unix-style glob patterns of field names to include. Fully
qualified field names that match any of these patterns will be
written to the output file. A fully qualified field name is one that
that has a prefix that indicates what grid element is defined on
(e.g. "at_node:topographic__elevation"). The default is to include
all fields.
exclude : str or iterable of str, optional
Like the *include* keyword but, instead, fields matching these
patterns will be excluded from the output file.
format : {'NETCDF3_CLASSIC', 'NETCDF3_64BIT', 'NETCDF4_CLASSIC', 'NETCDF4'}
Format of output netcdf file.
attrs : dict
Attributes to add to netcdf file.
mode : {"w", "a"}, optional
Write ("w") or append ("a") mode. If mode="w", any existing file at
this location will be overwritten. If mode="a", existing variables
will be overwritten.
Parameters
----------
Examples
--------
>>> import numpy as np
>>> from landlab import RasterModelGrid
>>> from landlab.io.netcdf import to_netcdf
Create a uniform rectilinear grid with four rows and 3 columns, and add
some data fields to it.
>>> rmg = RasterModelGrid((4, 3))
>>> rmg.at_node["topographic__elevation"] = np.arange(12.0)
>>> rmg.at_node["uplift_rate"] = 2.0 * np.arange(12.0)
Create a temporary directory to write the netcdf file into.
>>> import tempfile, os
>>> temp_dir = tempfile.mkdtemp()
>>> os.chdir(temp_dir)
Write the grid to a netcdf3 file but only include the *uplift_rate*
data in the file.
>>> to_netcdf(
... rmg, "test.nc", format="NETCDF3_64BIT", include="at_node:uplift_rate"
... )
Read the file back in and check its contents.
>>> from scipy.io import netcdf
>>> fp = netcdf.netcdf_file('test.nc', 'r')
>>> 'at_node:uplift_rate' in fp.variables
True
>>> 'at_node:topographic__elevation' in fp.variables
False
>>> fp.variables['at_node:uplift_rate'][:].flatten()
array([ 0., 2., 4., 6., 8., 10., 12., 14., 16., 18., 20.,
22.])
>>> rmg.at_cell["air__temperature"] = np.arange(2.0)
>>> to_netcdf(
... rmg,
... "test-cell.nc",
... format="NETCDF3_64BIT",
... include="at_cell:*",
... # names="air__temperature", at="cell",
... )
"""
path = pathlib.Path(path)
if not path.is_file():
mode = "w"
if time is None and mode == "a":
time = np.nan
this_dataset = grid.as_dataset(include=include, exclude=exclude, time=time)
if format != "NETCDF4":
this_dataset["status_at_node"] = (
("node",),
this_dataset["status_at_node"].values.astype(dtype=int),
)
if mode == "a":
with xr.open_dataset(path) as that_dataset:
if "time" not in that_dataset.dims:
_add_time_dimension_to_dataset(that_dataset, time=np.nan)
new_vars = set(this_dataset.variables) - set(that_dataset.variables)
for var in new_vars:
that_dataset[var] = (
this_dataset[var].dims,
np.full_like(this_dataset[var].values, np.nan),
)
for var in list(that_dataset.variables):
if var.startswith("at_layer"):
del that_dataset[var]
this_dataset = xr.concat(
[that_dataset, this_dataset], dim="time", data_vars="minimal"
)
if np.isnan(this_dataset["time"][-1]):
this_dataset["time"].values[-1] = this_dataset["time"][-2] + 1.0
this_dataset.to_netcdf(path, format=format, mode="w", unlimited_dims=("time",))
def _add_time_dimension_to_dataset(dataset, time=0.0):
"""Add a time dimension to all variables except those at_layer."""
names = set(
[
name
for name in dataset.variables
if name.startswith("at_") and not name.startswith("at_layer")
]
)
for name in names:
dataset[name] = (("time",) + dataset[name].dims, dataset[name].values[None])
dataset["time"] = (("time",), [time])
| 33.682759 | 84 | 0.600328 |
14cccb90d3e5e893e8714d97f092815310280afd | 4,053 | py | Python | app.py | ethylomat/MathPhysTheoTS | 76144c3990d9511817cfaa007a75ec55bc8e7310 | [
"MIT"
] | 1 | 2019-04-29T22:23:22.000Z | 2019-04-29T22:23:22.000Z | app.py | ethylomat/MathPhysTheoTS | 76144c3990d9511817cfaa007a75ec55bc8e7310 | [
"MIT"
] | 2 | 2016-08-11T14:26:47.000Z | 2016-08-11T14:29:44.000Z | app.py | ethylomat/MathPhysTheoTS | 76144c3990d9511817cfaa007a75ec55bc8e7310 | [
"MIT"
] | null | null | null | from flask import request, url_for, g
from flask_api import FlaskAPI, status, exceptions
from flask_sqlalchemy import SQLAlchemy
import arrow
from flask_admin import Admin
from flask_admin.contrib.sqla import ModelView
from flask_cors import CORS
app = FlaskAPI(__name__)
cors = CORS(app, resources={r"/*": {"origins": "*"}})
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///tickets.db'
db = SQLAlchemy(app)
if __name__ == "__main__":
admin = Admin(app)
admin.add_view(ModelView(Ticket, db.session))
app.run(debug=True, host="0.0.0.0")
| 29.583942 | 100 | 0.635085 |
14cd52d75b110058b96680b7258b9682ab53013c | 592 | py | Python | python/0496.toy-factory.py | Ubastic/lintcode | 9f600eece075410221a24859331a810503c76014 | [
"MIT"
] | 6 | 2019-10-02T02:24:49.000Z | 2021-11-18T10:08:07.000Z | python/0496.toy-factory.py | Ubastic/lintcode | 9f600eece075410221a24859331a810503c76014 | [
"MIT"
] | 1 | 2020-02-28T03:42:36.000Z | 2020-03-07T09:26:00.000Z | src/0496.toy-factory/0496.toy-factory.py | jiangshanmeta/lintcode | 7d7003825b5a7b9fd5b0be57aa2d84391e0d1fa5 | [
"MIT"
] | 2 | 2020-07-25T08:42:38.000Z | 2021-05-07T06:16:46.000Z | """
Your object will be instantiated and called as such:
ty = ToyFactory()
toy = ty.getToy(type)
toy.talk()
"""
| 20.413793 | 73 | 0.584459 |
14cdf01dc867ab894916d46f7f85f97ee82b9f96 | 143 | py | Python | Dataflow/dimension.py | duseok/CNNDataflowAnalysis | a8e53ac1a1da47cfff16850efa365da9f9a72664 | [
"BSD-2-Clause"
] | 1 | 2021-04-02T07:17:15.000Z | 2021-04-02T07:17:15.000Z | Dataflow/dimension.py | duseok/CNNDataflowAnalysis | a8e53ac1a1da47cfff16850efa365da9f9a72664 | [
"BSD-2-Clause"
] | null | null | null | Dataflow/dimension.py | duseok/CNNDataflowAnalysis | a8e53ac1a1da47cfff16850efa365da9f9a72664 | [
"BSD-2-Clause"
] | null | null | null | from enum import IntEnum, unique
| 13 | 32 | 0.615385 |
14d350e4c24338a388b8fa1fb69e9c619ba5502a | 4,746 | py | Python | autohandshake/src/Pages/LoginPage.py | cedwards036/autohandshake | 7f57b242a612b0f0aad634bc111a3db3050c6597 | [
"MIT"
] | 3 | 2018-05-18T16:15:32.000Z | 2019-08-01T23:06:44.000Z | autohandshake/src/Pages/LoginPage.py | cedwards036/autohandshake | 7f57b242a612b0f0aad634bc111a3db3050c6597 | [
"MIT"
] | null | null | null | autohandshake/src/Pages/LoginPage.py | cedwards036/autohandshake | 7f57b242a612b0f0aad634bc111a3db3050c6597 | [
"MIT"
] | null | null | null | from autohandshake.src.Pages.Page import Page
from autohandshake.src.HandshakeBrowser import HandshakeBrowser
from autohandshake.src.exceptions import InvalidURLError, NoSuchElementError, \
InvalidEmailError, InvalidPasswordError
import re
| 46.529412 | 107 | 0.630004 |
14d5f7d082a22edb6ba40c486b8faa869556d8a1 | 2,649 | py | Python | simsiam/engine/supervised.py | tillaczel/simsiam | d4d03aae625314ac2f24155fac3ca5bfc31502c7 | [
"MIT"
] | null | null | null | simsiam/engine/supervised.py | tillaczel/simsiam | d4d03aae625314ac2f24155fac3ca5bfc31502c7 | [
"MIT"
] | null | null | null | simsiam/engine/supervised.py | tillaczel/simsiam | d4d03aae625314ac2f24155fac3ca5bfc31502c7 | [
"MIT"
] | null | null | null | from omegaconf import DictConfig
import pytorch_lightning as pl
import numpy as np
import torch
import wandb
from simsiam.models import get_resnet
from simsiam.metrics import get_accuracy
from simsiam.optimizer import get_optimizer, get_scheduler
| 33.531646 | 123 | 0.645527 |
14da4fb90332f13ce9a537a25767a0c5d2699a55 | 5,568 | py | Python | app/auth/routes.py | Jumballaya/save-energy-tx | 1aa75cfdabe169c05f845cd47e477560f5319883 | [
"FSFAP"
] | null | null | null | app/auth/routes.py | Jumballaya/save-energy-tx | 1aa75cfdabe169c05f845cd47e477560f5319883 | [
"FSFAP"
] | 7 | 2021-03-09T00:51:13.000Z | 2022-03-11T23:40:46.000Z | app/auth/routes.py | Jumballaya/save-energy-tx | 1aa75cfdabe169c05f845cd47e477560f5319883 | [
"FSFAP"
] | 1 | 2019-03-20T16:58:23.000Z | 2019-03-20T16:58:23.000Z | from flask import render_template, redirect, url_for, flash
from flask_login import current_user, login_user, logout_user
from sqlalchemy import func
import stripe
from app import db
from app.auth import bp
from app.auth.forms import LoginForm, RegistrationForm, ResetPasswordRequestForm, ResetPasswordForm
from app.models.user import User
from app.auth.email import send_password_reset_email, send_verification_email
# Login route
# Logout route
# Register
# Verify Email
# Reset Password Request
# Reset Password with token
| 36.155844 | 116 | 0.681214 |
14de090b5ed8c8188f4a83029df00bd8928fb8be | 607 | py | Python | rtcloud/ui.py | Brainiak/rtcloud | 43c7525c9a9be12d33426b24fac353dc4d92c35a | [
"Apache-2.0"
] | null | null | null | rtcloud/ui.py | Brainiak/rtcloud | 43c7525c9a9be12d33426b24fac353dc4d92c35a | [
"Apache-2.0"
] | 43 | 2017-11-16T22:05:42.000Z | 2017-12-12T16:20:04.000Z | rtcloud/ui.py | Brainiak/rtcloud | 43c7525c9a9be12d33426b24fac353dc4d92c35a | [
"Apache-2.0"
] | 1 | 2017-11-26T15:42:02.000Z | 2017-11-26T15:42:02.000Z | from nilearn import plotting
from IPython import display
| 30.35 | 63 | 0.642504 |
14de21cf53b113f2413b7d529932853ff2790fae | 2,420 | py | Python | demo.py | allenjhuang/rsys_api | 41bc05fbeda5b5c76232a548aa16d33d05bfa8e4 | [
"Unlicense"
] | null | null | null | demo.py | allenjhuang/rsys_api | 41bc05fbeda5b5c76232a548aa16d33d05bfa8e4 | [
"Unlicense"
] | null | null | null | demo.py | allenjhuang/rsys_api | 41bc05fbeda5b5c76232a548aa16d33d05bfa8e4 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
import config
import rsys_api
import secrets
import json
import logging
import sys
if __name__ == '__main__':
main()
| 30.632911 | 73 | 0.605372 |
14debfd1d4eddfbeadc1ea54fc7d19ccc2df866b | 4,377 | py | Python | algorithms/common/runner.py | Fluidy/twc2020 | 0c65ab3508675a81e3edc831e45d59729dab159d | [
"MIT"
] | 1 | 2021-09-05T01:56:45.000Z | 2021-09-05T01:56:45.000Z | algorithms/common/runner.py | Fluidy/twc2020 | 0c65ab3508675a81e3edc831e45d59729dab159d | [
"MIT"
] | null | null | null | algorithms/common/runner.py | Fluidy/twc2020 | 0c65ab3508675a81e3edc831e45d59729dab159d | [
"MIT"
] | null | null | null | from utils import save_params, load_params
from importlib import import_module
from environments.env import Env
def run(algorithm_name, exp_name, env_name, agent_params, train_params, use_ray, use_gpu, is_train,
num_runs=None, test_run_id=None, test_model_id=None):
"""
Runner for training or testing DRL algorithms
"""
exp_dir = 'experiments/' + exp_name
if use_ray:
try:
import ray
ray.init(num_cpus=train_params['num_cpus'], num_gpus=1)
except ImportError:
ray = None
use_ray = 0
print('Ray is not installed. I will run in serial training/testing mode.')
"""
Import DRL agent and training function according to algorithm_name
"""
if algorithm_name in ['ddpg', 'ddpg_pds', 'td3', 'td3_pds']:
train = import_module('algorithms.ddpg.train').train
if algorithm_name == 'ddpg':
Agent = import_module('algorithms.ddpg.agent').DDPGAgent
elif algorithm_name == 'ddpg_pds':
Agent = import_module('algorithms.ddpg_pds.agent').PDSDDPGAgent
elif algorithm_name == 'td3':
Agent = import_module('algorithms.td3.agent').TD3Agent
else:
Agent = import_module('algorithms.td3_pds.agent').PDSTD3Agent
elif algorithm_name in ['qprop', 'qprop_pds']:
train = import_module('algorithms.qprop.train').train
if algorithm_name == 'qprop':
Agent = import_module('algorithms.qprop.agent').QPropAgent
else:
Agent = import_module('algorithms.qprop_pds.agent').PDSQPropAgent
elif algorithm_name in ['preplan', 'perfect']:
train = None
Agent = import_module('algorithms.preplan.agent').PrePlanAgent
elif algorithm_name == 'non_predictive':
train = None
Agent = import_module('algorithms.non_predictive.agent').NonPredictiveAgent
else:
print('Unsupported algorithm')
return
if is_train:
"""
Training
"""
env_params = import_module('environments.' + env_name).env_params
# Save all the experiment settings to a json file
save_params([agent_params, train_params, env_params], exp_dir, 'exp_config')
# Create environment
env = Env(env_params)
if use_ray:
# Parallel training
train = ray.remote(train)
train_op = [train.remote(env, Agent, agent_params, train_params, exp_dir, run_id, use_gpu=use_gpu)
for run_id in range(num_runs)]
ray.get(train_op)
else:
# Serial training
[train(env, Agent, agent_params, train_params, exp_dir, run_id, use_gpu=use_gpu)
for run_id in range(num_runs)]
else:
"""
Testing
"""
# Get test set path
test_set_dir = 'data/' + env_name
# Load agent and env parameters from exp_dir
env_params = load_params('data/' + env_name, 'env_config')
if algorithm_name != 'perfect':
if algorithm_name == 'preplan':
env_params_train = load_params(exp_dir, 'env_config')
elif algorithm_name == 'non_predictive':
env_params_train = env_params
else:
agent_params, _, env_params_train = load_params(exp_dir, 'exp_config')
if env_params_train != env_params:
print('Warning: Testing and training env settings do not match!')
# Create environment
env = Env(env_params)
# Import testing function
test = import_module('algorithms.common.test').test
if use_ray:
# Parallel testing
test = ray.remote(test)
test_op = [test.remote(env, Agent, agent_params, exp_dir, run_id, model_id,
test_set_dir=test_set_dir, use_gpu=use_gpu)
for run_id in test_run_id for model_id in test_model_id]
ray.get(test_op)
else:
# Serial testing
[test(env, Agent, agent_params, exp_dir, run_id, model_id,
test_set_dir=test_set_dir, use_gpu=use_gpu)
for run_id in test_run_id for model_id in test_model_id]
| 39.432432 | 111 | 0.59767 |
14dfa0d9c76706f000826c67f074640fd5155034 | 679 | py | Python | src/database/conn.py | ninaamorim/sentiment-analysis-2018-president-election | a5c12f1b659186edbc2dfa916bc82a2cfa2dd67f | [
"MIT"
] | 39 | 2018-09-05T14:42:05.000Z | 2021-09-24T20:21:56.000Z | src/database/conn.py | ninaamorim/sentiment-analysis-2018-president-election | a5c12f1b659186edbc2dfa916bc82a2cfa2dd67f | [
"MIT"
] | null | null | null | src/database/conn.py | ninaamorim/sentiment-analysis-2018-president-election | a5c12f1b659186edbc2dfa916bc82a2cfa2dd67f | [
"MIT"
] | 11 | 2018-12-07T19:43:44.000Z | 2021-05-21T21:54:43.000Z | from decouple import config
from peewee import SqliteDatabase
from playhouse.pool import PooledSqliteExtDatabase, PooledPostgresqlExtDatabase
# db = SqliteDatabase(config('DATABASE_PATH', default='sentiment_analysis.db'))
db = PooledSqliteExtDatabase(
config('DATABASE_PATH', default='sentiment_analysis.db'),
pragmas=[('journal_mode', 'wal')],
max_connections=50,
stale_timeout=3600,
check_same_thread=False)
# Caso utilize-se do postgresql como banco de dados
# db = PooledPostgresqlExtDatabase(
# 'database',
# max_connections=32,
# stale_timeout=300, # 5 minutes.
# host='localhost',
# user='username',
# password='password')
| 30.863636 | 79 | 0.733432 |
14e0f7d00154bf2e7af79e4ad4be7d9c4b233cd5 | 347 | py | Python | src/server/main.py | IsaacLean/project-owl | ba1b995f28abe461d40af5884d974bee15e0625f | [
"MIT"
] | 1 | 2018-10-23T01:42:14.000Z | 2018-10-23T01:42:14.000Z | src/server/main.py | IsaacLean/project-owl | ba1b995f28abe461d40af5884d974bee15e0625f | [
"MIT"
] | 1 | 2015-10-03T18:26:42.000Z | 2015-10-03T18:26:42.000Z | src/server/main.py | IsaacLean/project-owl | ba1b995f28abe461d40af5884d974bee15e0625f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import webapp2
from pkg.controllers.transactionctrl import TransactionCtrl
from pkg.controllers.appctrl import AppCtrl
from pkg.controllers.debug import Debug
app = webapp2.WSGIApplication([
('/transaction', TransactionCtrl),
('/transaction/([0-9]+)', TransactionCtrl),
('/', AppCtrl),
('/debug', Debug)
], debug=True)
| 24.785714 | 59 | 0.746398 |
14e118dd6032aaabd75d35019107d6e409ebb6bc | 875 | py | Python | login/middleWare/auth.py | csk17k/WebPanel | fdb0ae1b2fd12d006fbca65c779369e2d3d62928 | [
"Apache-2.0"
] | null | null | null | login/middleWare/auth.py | csk17k/WebPanel | fdb0ae1b2fd12d006fbca65c779369e2d3d62928 | [
"Apache-2.0"
] | null | null | null | login/middleWare/auth.py | csk17k/WebPanel | fdb0ae1b2fd12d006fbca65c779369e2d3d62928 | [
"Apache-2.0"
] | 1 | 2021-06-24T13:38:23.000Z | 2021-06-24T13:38:23.000Z | import re
from django.conf import settings
from django.shortcuts import redirect
from django.http import HttpResponseRedirect
EXEMPT_URLS=[]
if hasattr(settings,'LOGIN_EXEMPT_URLS'):
EXEMPT_URLS+=[re.compile(url) for url in settings.LOGIN_EXEMPT_URLS]
| 31.25 | 69 | 0.76 |
14e28f82f57d04fe78acc078756343daa686d910 | 579 | py | Python | tests/domain/entities/metadata_test.py | keigohtr/autify-web-scraper | 007ed78c461b31007328b5560957278856908979 | [
"Apache-2.0"
] | null | null | null | tests/domain/entities/metadata_test.py | keigohtr/autify-web-scraper | 007ed78c461b31007328b5560957278856908979 | [
"Apache-2.0"
] | null | null | null | tests/domain/entities/metadata_test.py | keigohtr/autify-web-scraper | 007ed78c461b31007328b5560957278856908979 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime, timedelta, timezone
import freezegun
from autifycli.domain.entities.metadata import Metadata
JST = timezone(timedelta(hours=+9), "JST")
| 25.173913 | 55 | 0.716753 |
14e2f68640f152f69f9e7b649672501b2bacc025 | 128 | py | Python | demeter/admin/model/__load__.py | shemic/demeter | 01f91aac43c325c48001dda86af17da43fb8d6fe | [
"MIT"
] | 1 | 2017-12-05T08:17:53.000Z | 2017-12-05T08:17:53.000Z | demos/helloworld/model/__load__.py | shemic/demeter | 01f91aac43c325c48001dda86af17da43fb8d6fe | [
"MIT"
] | null | null | null | demos/helloworld/model/__load__.py | shemic/demeter | 01f91aac43c325c48001dda86af17da43fb8d6fe | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
demeter database
name:__load__.py
"""
from demeter.model import *
from demeter.core import * | 18.285714 | 27 | 0.640625 |
14e532fb903c9f210c1888329335296a3e6816c7 | 148 | py | Python | rheem-build-parent/rheem/rheem-api-python/pyrheem/graph/Visitor.py | DLUTLiuFengyi/rheem-integration | 34c437d73761ab44b4c6b7dbd5cab91875f0933a | [
"Apache-2.0"
] | null | null | null | rheem-build-parent/rheem/rheem-api-python/pyrheem/graph/Visitor.py | DLUTLiuFengyi/rheem-integration | 34c437d73761ab44b4c6b7dbd5cab91875f0933a | [
"Apache-2.0"
] | null | null | null | rheem-build-parent/rheem/rheem-api-python/pyrheem/graph/Visitor.py | DLUTLiuFengyi/rheem-integration | 34c437d73761ab44b4c6b7dbd5cab91875f0933a | [
"Apache-2.0"
] | null | null | null | import abc
| 21.142857 | 60 | 0.709459 |
14e6bab7bbc2ab9b9311ff6dc777217f71212f04 | 8,217 | py | Python | src/tie/__init__.py | shadowbq/opendxl-arctic-phase | 730a60d7e81c843115c341cb48225a30af001996 | [
"Apache-2.0"
] | 1 | 2019-07-24T14:48:06.000Z | 2019-07-24T14:48:06.000Z | src/tie/__init__.py | shadowbq/opendxl-arctic-phase | 730a60d7e81c843115c341cb48225a30af001996 | [
"Apache-2.0"
] | 1 | 2018-02-20T03:11:21.000Z | 2018-02-20T03:11:21.000Z | src/tie/__init__.py | shadowbq/opendxl-arctic-phase | 730a60d7e81c843115c341cb48225a30af001996 | [
"Apache-2.0"
] | null | null | null | # TIE Methods
import utils
from dxltieclient import TieClient
from dxltieclient.constants import HashType, ReputationProp, FileProvider, FileEnterpriseAttrib, \
CertProvider, CertEnterpriseAttrib, TrustLevel
# TIE Reputation Average Map
tiescoreMap = {0: 'Not Set', 1: 'Known Malicious', 15: 'Most Likely Malicious', 30: 'Might Be Malicious', 50: 'Unknown',
70: "Might Be Trusted", 85: "Most Likely Trusted", 99: "Known Trusted", 100: "Known Trusted Installer"}
# TIE Provider Map
providerMap = {1: 'GTI', 3: 'Enterprise Reputation', 5: 'ATD', 7: "MWG"}
#TODO: rename this to TieSample
## Debug functions
def __printTIE(reputations_dict):
# Display the Global Threat Intelligence (GTI) trust level for the file
if FileProvider.GTI in reputations_dict:
gti_rep = reputations_dict[FileProvider.GTI]
print "Global Threat Intelligence (GTI) trust level: " + \
str(gti_rep[ReputationProp.TRUST_LEVEL])
# Display the Enterprise reputation information
if FileProvider.ENTERPRISE in reputations_dict:
ent_rep = reputations_dict[FileProvider.ENTERPRISE]
print "Threat Intelligence Exchange (Local) trust level: " + \
str(ent_rep[ReputationProp.TRUST_LEVEL])
# Retrieve the enterprise reputation attributes
ent_rep_attribs = ent_rep[ReputationProp.ATTRIBUTES]
# Display prevalence (if it exists)
if FileEnterpriseAttrib.PREVALENCE in ent_rep_attribs:
print "Enterprise prevalence: " + \
ent_rep_attribs[FileEnterpriseAttrib.PREVALENCE]
# Display first contact date (if it exists)
if FileEnterpriseAttrib.FIRST_CONTACT in ent_rep_attribs:
print "First contact: " + \
FileEnterpriseAttrib.to_localtime_string(
ent_rep_attribs[FileEnterpriseAttrib.FIRST_CONTACT])
if FileProvider.ATD in reputations_dict:
atd_rep = reputations_dict[FileProvider.ATD]
print "ATD (sandbox) trust level: " + \
str(atd_rep[ReputationProp.TRUST_LEVEL])
if FileProvider.MWG in reputations_dict:
mwg_rep = reputations_dict[FileProvider.MWG]
print "MWG (WebGatewayy) trust level: " + \
str(mwg_rep[ReputationProp.TRUST_LEVEL])
| 41.5 | 120 | 0.643909 |
14e8b8ee0a1f85b70e2cc66661f3d254f3aee85e | 3,720 | py | Python | keepthis/KeepThis.py | puhoshville/keepthis | 70447ec367b78caba03c302470f591df2dcc1e7e | [
"MIT"
] | 4 | 2020-02-18T12:29:29.000Z | 2020-11-12T10:19:37.000Z | keepthis/KeepThis.py | puhoshville/keepthis | 70447ec367b78caba03c302470f591df2dcc1e7e | [
"MIT"
] | 79 | 2019-12-26T14:00:11.000Z | 2022-03-18T02:20:45.000Z | keepthis/KeepThis.py | puhoshville/keepthis | 70447ec367b78caba03c302470f591df2dcc1e7e | [
"MIT"
] | 3 | 2019-09-25T22:47:25.000Z | 2019-10-03T15:07:36.000Z | import hashlib
import json
import numpy as np
import pandas as pd
from pymemcache import serde
from pymemcache.client import base
from keepthis.MemcachedConnection import MemcachedConnection
from keepthis.exceptions import KeepThisValueError
| 32.631579 | 92 | 0.608065 |
14ec81da7a7909c65783eff82c284b4266341daf | 1,016 | py | Python | sbx_bgsvc_starterpack/sbx_cfg.py | parkssie/sbx-bgsvc-starterpack | 9f2cb80cc677b9ab73cbf085a910d30c40194449 | [
"MIT"
] | null | null | null | sbx_bgsvc_starterpack/sbx_cfg.py | parkssie/sbx-bgsvc-starterpack | 9f2cb80cc677b9ab73cbf085a910d30c40194449 | [
"MIT"
] | null | null | null | sbx_bgsvc_starterpack/sbx_cfg.py | parkssie/sbx-bgsvc-starterpack | 9f2cb80cc677b9ab73cbf085a910d30c40194449 | [
"MIT"
] | null | null | null | import json
from pathlib import Path
from sbx_bgsvc_starterpack.sbx_json_default import json_default
| 31.75 | 100 | 0.662402 |
14edc23ecedc5fce9202c1d0ece77446d5db16e6 | 7,804 | py | Python | lsp_shiloh/common/scan/aspscan/random_scan.py | internaru/Pinetree_P | 1f1525454c8b20c6c589529ff4bc159404611297 | [
"FSFAP"
] | null | null | null | lsp_shiloh/common/scan/aspscan/random_scan.py | internaru/Pinetree_P | 1f1525454c8b20c6c589529ff4bc159404611297 | [
"FSFAP"
] | null | null | null | lsp_shiloh/common/scan/aspscan/random_scan.py | internaru/Pinetree_P | 1f1525454c8b20c6c589529ff4bc159404611297 | [
"FSFAP"
] | null | null | null | #!/usr/bin/python
#
# ============================================================================
# Copyright (c) 2011 Marvell International, Ltd. All Rights Reserved
#
# Marvell Confidential
# ============================================================================
#
# Run a random scan. Random color/mono, random DPI, random area (subject to
# constraints).
# Written to do overnight testing.
# davep 6-Mar-2007
import sys
import random
import time
import getopt
import scan
dpi_range = ( 75, 1200 )
#dpi_choices= ( 75, 100, 150, 200, 300 )
dpi_choices= ( 300, 600, 1200 )
#valid_scan_types = ( "color", "mono" )
valid_scan_types = ( "rgbx", "xrgb", "rgb", "color", "mono" )
x_area_range = ( 0, 850 )
y_area_range = ( 0, 1169 )
#y_area_range = ( 0, 1100 )
area_min = 100
# fraction: scale = [0]/[1]
min_scale = ( 1, 16 )
max_scale = ( 8, 1 )
# davep 02-Apr-2009 ; allow option to disable random scaling for platforms that
# don't support scaler (e.g., ICE Lite color scaling broken)
use_random_scale = True
# return random.randint( dpi_range[0], dpi_range[1] )
if __name__ == '__main__' :
main()
| 29.673004 | 94 | 0.580984 |
14ededd86abda0dc6be68373dfe57be0e413a26e | 10,880 | py | Python | pyi_updater/client/patcher.py | rsumner31/PyUpdater1 | d9658000472e57453267ee8fa174ae914dd8d33c | [
"BSD-2-Clause"
] | null | null | null | pyi_updater/client/patcher.py | rsumner31/PyUpdater1 | d9658000472e57453267ee8fa174ae914dd8d33c | [
"BSD-2-Clause"
] | null | null | null | pyi_updater/client/patcher.py | rsumner31/PyUpdater1 | d9658000472e57453267ee8fa174ae914dd8d33c | [
"BSD-2-Clause"
] | null | null | null | # --------------------------------------------------------------------------
# Copyright 2014 Digital Sapphire Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# --------------------------------------------------------------------------
import logging
import os
try:
import bsdiff4
except ImportError:
bsdiff4 = None
from pyi_updater.client.downloader import FileDownloader
from pyi_updater.exceptions import PatcherError
from pyi_updater import settings
from pyi_updater.utils import (get_package_hashes,
EasyAccessDict,
lazy_import,
Version)
if bsdiff4 is None:
from pyi_updater.utils import bsdiff4_py as bsdiff4
log = logging.getLogger(__name__)
platform_ = jms_utils.system.get_system()
| 36.881356 | 79 | 0.583732 |
14eebabc8dc87995f7ccb85df841dd281ddfc7b5 | 649 | py | Python | app/recommendations/forms.py | ExiledNarwal28/glo-2005-project | 3b5b5f9cdcfe53d1e6e702609587068c4bd3310d | [
"MIT"
] | null | null | null | app/recommendations/forms.py | ExiledNarwal28/glo-2005-project | 3b5b5f9cdcfe53d1e6e702609587068c4bd3310d | [
"MIT"
] | null | null | null | app/recommendations/forms.py | ExiledNarwal28/glo-2005-project | 3b5b5f9cdcfe53d1e6e702609587068c4bd3310d | [
"MIT"
] | 1 | 2020-05-21T10:07:07.000Z | 2020-05-21T10:07:07.000Z | from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, SelectField
from wtforms.validators import DataRequired, Length
| 36.055556 | 99 | 0.707242 |
14eee5ee3d7b6b1d697c697b8f6b60cc9529087d | 3,090 | py | Python | tests/test_absort.py | MapleCCC/ABSort | fa020d7f2d6025603910c12fdfe775922d33afbc | [
"MIT"
] | null | null | null | tests/test_absort.py | MapleCCC/ABSort | fa020d7f2d6025603910c12fdfe775922d33afbc | [
"MIT"
] | null | null | null | tests/test_absort.py | MapleCCC/ABSort | fa020d7f2d6025603910c12fdfe775922d33afbc | [
"MIT"
] | null | null | null | from __future__ import annotations
import ast
import os
import re
import sys
from itertools import product
from pathlib import Path
import attr
from hypothesis import given, settings
from hypothesis.strategies import sampled_from
from absort.__main__ import (
CommentStrategy,
FormatOption,
NameRedefinition,
SortOrder,
absort_str,
)
from absort.ast_utils import ast_deep_equal
from absort.utils import constantfunc, contains
from .strategies import products
# Use third-party library hypothesmith to generate random valid Python source code, to
# conduct property-based testing on the absort*() interface.
# The guy who use such tool to test on black library and CPython stdlib and report issues is Zac-HD (https://github.com/Zac-HD).
STDLIB_DIR = Path(sys.executable).with_name("Lib")
# Reference: https://docs.travis-ci.com/user/environment-variables/#default-environment-variables
if os.getenv("CI") and os.getenv("TRAVIS"):
py_version = os.getenv("TRAVIS_PYTHON_VERSION")
assert py_version
# Reference: https://docs.travis-ci.com/user/languages/python/#python-versions
# Reference: https://docs.travis-ci.com/user/languages/python/#development-releases-support
py_version_num = re.fullmatch(r"(?P<num>[0-9.]+)(?:-dev)?", py_version).group("num")
STDLIB_DIR = Path(f"/opt/python/{py_version}/lib/python{py_version_num}/")
TEST_FILES = list(STDLIB_DIR.rglob("*.py"))
all_comment_strategies = list(CommentStrategy)
all_format_options = [
FormatOption(*p) # type: ignore
for p in product(*([(True, False)] * len(attr.fields(FormatOption))))
]
all_sort_orders = list(SortOrder)
arg_options = constantfunc(
products(all_comment_strategies, all_format_options, all_sort_orders).map(
Option.from_tuple
)
)
# TODO add unit test for absort_file()
# TODO add unit test for absort_files()
| 30 | 128 | 0.726537 |
14ef95586e2cc40aadbf1094d06743d8533ef65a | 4,593 | py | Python | BrickBreaker/brick_breaker.py | Urosh91/BrickBreaker | 527564eb7fbab31e215a60ca8d46843a5a13791b | [
"MIT"
] | null | null | null | BrickBreaker/brick_breaker.py | Urosh91/BrickBreaker | 527564eb7fbab31e215a60ca8d46843a5a13791b | [
"MIT"
] | null | null | null | BrickBreaker/brick_breaker.py | Urosh91/BrickBreaker | 527564eb7fbab31e215a60ca8d46843a5a13791b | [
"MIT"
] | null | null | null | import pygame
from BrickBreaker import *
from BrickBreaker.Scenes import *
from BrickBreaker.Shared import *
if __name__ == '__main__':
BrickBreaker().start()
| 28.886792 | 95 | 0.609188 |
14f0031f20c1d451293a9e4ffe1e1cb773cf31df | 57 | py | Python | flyeye/dynamics/__init__.py | sbernasek/flyeye | 95be4c6b52785d5ff3d0c68362308cb0fd1e8ae8 | [
"MIT"
] | 2 | 2020-02-22T09:53:17.000Z | 2020-02-24T19:02:01.000Z | flyeye/dynamics/__init__.py | sbernasek/flyeye | 95be4c6b52785d5ff3d0c68362308cb0fd1e8ae8 | [
"MIT"
] | 1 | 2019-11-20T17:11:07.000Z | 2019-11-20T17:11:07.000Z | flyeye/dynamics/__init__.py | sebastianbernasek/flyeye | 95be4c6b52785d5ff3d0c68362308cb0fd1e8ae8 | [
"MIT"
] | null | null | null | from .visualization import plot_mean, plot_mean_interval
| 28.5 | 56 | 0.877193 |
14f0fe0a265ae04fc3df046e751c6650ca481d2f | 2,188 | py | Python | mow/strong/phase2/predict.py | tychen5/Audio_Tagging_Challenge | 4602400433d37958d95ebf40a3c0798d17cc53c6 | [
"MIT"
] | 3 | 2019-01-22T03:14:32.000Z | 2019-08-17T02:22:06.000Z | mow/strong/phase2/predict.py | tychen5/Audio_Tagging_Challenge | 4602400433d37958d95ebf40a3c0798d17cc53c6 | [
"MIT"
] | null | null | null | mow/strong/phase2/predict.py | tychen5/Audio_Tagging_Challenge | 4602400433d37958d95ebf40a3c0798d17cc53c6 | [
"MIT"
] | null | null | null | '''
###################################
Modified from Mike's predict_acc.py
###################################
'''
import os
import sys
import random
import pickle
import numpy as np
import pandas as pd
from keras.utils import to_categorical
from keras.models import load_model
from sklearn.metrics import accuracy_score
with open('map.pkl', 'rb') as f:
map_dict = pickle.load(f)
with open('map_reverse.pkl', 'rb') as f:
map_reverse = pickle.load(f)
Y_train = pd.read_csv('/tmp2/b03902110/phase2/data/train_label.csv')
Y_dict = Y_train['label'].map(map_dict)
Y_dict = np.array(Y_dict)
print(Y_dict.shape)
print(Y_dict)
Y_fname_train = Y_train['fname'].tolist()
Y_test = pd.read_csv('./sample_submission.csv')
Y_fname_test = Y_test['fname'].tolist()
Y_all = []
for i in Y_dict:
Y_all.append(to_categorical(i, num_classes=41))
Y_all = np.array(Y_all)
print(Y_all)
print(Y_all.shape)
X_train = np.load('/tmp2/b03902110/phase2/data/X_train.npy')
X_test = np.load('/tmp2/b03902110/phase2/data/X_test.npy')
mean = np.mean(X_train, axis=0)
std = np.std(X_train, axis=0)
X_train = (X_train - mean) / std
X_test = (X_test - mean) / std
base = '/tmp2/b03902110/newphase2'
modelbase = os.path.join(base, '10_fold_model')
name = sys.argv[1]
fold_num = int(sys.argv[2])
filename = os.path.join(modelbase, name)
X_val = np.load('/tmp2/b03902110/newphase1/data/X/X{}.npy'.format(fold_num+1))
X_val = (X_val - mean) / std
Y_val = np.load('/tmp2/b03902110/newphase1/data/y/y{}.npy'.format(fold_num+1))
npy_predict = os.path.join(base, 'npy_predict')
if not os.path.exists(npy_predict):
os.makedirs(npy_predict)
csv_predict = os.path.join(base, 'csv_predict')
if not os.path.exists(csv_predict):
os.makedirs(csv_predict)
model = load_model(filename)
print('Evaluating {}'.format(name))
score = model.evaluate(X_val, Y_val)
print(score)
print('Predicting X_test...')
result = model.predict(X_test)
np.save(os.path.join(npy_predict, 'mow_cnn2d_semi_test_{}.npy'.format(fold_num+1)), result)
df = pd.DataFrame(result)
df.insert(0, 'fname', Y_fname_test)
df.to_csv(os.path.join(csv_predict, 'mow_cnn2d_semi_test_{}.csv'.format(fold_num+1)), index=False, header=True)
| 25.741176 | 111 | 0.706581 |
14f1a8447efc963a4a6ad15b82d5aee9bf59542f | 4,408 | py | Python | tests/test_date_utils.py | rob-blackbourn/aiofix | 2a07822e07414c1ea850708d7660c16a0564c21d | [
"Apache-2.0"
] | 1 | 2021-03-25T21:52:36.000Z | 2021-03-25T21:52:36.000Z | tests/test_date_utils.py | rob-blackbourn/jetblack-fixengine | 2a07822e07414c1ea850708d7660c16a0564c21d | [
"Apache-2.0"
] | null | null | null | tests/test_date_utils.py | rob-blackbourn/jetblack-fixengine | 2a07822e07414c1ea850708d7660c16a0564c21d | [
"Apache-2.0"
] | null | null | null | """Tests for date utils"""
from datetime import time, datetime
import pytz
from jetblack_fixengine.utils.date_utils import (
is_dow_in_range,
is_time_in_range,
delay_for_time_period
)
MONDAY = 0
TUESDAY = 1
WEDNESDAY = 2
THURSDAY = 3
FRIDAY = 4
SATURDAY = 5
SUNDAY = 6
def test_dow_range():
"""Test day of week range"""
assert is_dow_in_range(MONDAY, FRIDAY, MONDAY)
assert is_dow_in_range(MONDAY, FRIDAY, WEDNESDAY)
assert is_dow_in_range(MONDAY, FRIDAY, FRIDAY)
assert not is_dow_in_range(MONDAY, FRIDAY, SATURDAY)
assert not is_dow_in_range(TUESDAY, THURSDAY, MONDAY)
assert not is_dow_in_range(TUESDAY, THURSDAY, FRIDAY)
assert is_dow_in_range(WEDNESDAY, WEDNESDAY, WEDNESDAY)
assert not is_dow_in_range(WEDNESDAY, WEDNESDAY, TUESDAY)
assert not is_dow_in_range(WEDNESDAY, WEDNESDAY, THURSDAY)
assert is_dow_in_range(FRIDAY, TUESDAY, FRIDAY)
assert is_dow_in_range(FRIDAY, TUESDAY, SUNDAY)
assert is_dow_in_range(FRIDAY, TUESDAY, TUESDAY)
assert not is_dow_in_range(FRIDAY, TUESDAY, THURSDAY)
assert not is_dow_in_range(SATURDAY, SUNDAY, MONDAY)
def test_time_range():
"""Test time range"""
assert is_time_in_range(time(0, 0, 0), time(17, 30, 0), time(0, 0, 0))
assert is_time_in_range(time(0, 0, 0), time(17, 30, 0), time(12, 0, 0))
assert is_time_in_range(time(0, 0, 0), time(17, 30, 0), time(17, 30, 0))
assert not is_time_in_range(time(0, 0, 0), time(17, 30, 0), time(20, 0, 0))
assert not is_time_in_range(time(9, 30, 0), time(17, 30, 0), time(0, 0, 0))
def test_seconds_for_period():
"""Test seconds in a period"""
# now=6am, star=8am, end=4pm
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 1, 1, 6, 0, 0),
time(8, 0, 0),
time(16, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 2
assert end_datetime == datetime(2019, 1, 1, 16, 0, 0)
# now=10am, start=8am, end=4pm
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 1, 1, 10, 0, 0),
time(8, 0, 0),
time(16, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 0
assert end_datetime == datetime(2019, 1, 1, 16, 0, 0)
# now=6pm, start=8am, end=4pm
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 1, 1, 18, 0, 0),
time(8, 0, 0),
time(16, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 14
assert end_datetime == datetime(2019, 1, 2, 16, 0, 0)
# now=6pm,start=8pm, end=4am
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 1, 1, 18, 0, 0),
time(20, 0, 0),
time(4, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 2
assert end_datetime == datetime(2019, 1, 2, 4, 0, 0)
# now=10pm,start=8pm, end=4am
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 1, 1, 22, 0, 0),
time(20, 0, 0),
time(4, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 0
assert end_datetime == datetime(2019, 1, 2, 4, 0, 0)
# now=6am,start=8pm, end=4am
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 1, 1, 6, 0, 0),
time(20, 0, 0),
time(4, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 14
assert end_datetime == datetime(2019, 1, 2, 4, 0, 0)
london = pytz.timezone('Europe/London')
# now=6pm,start=8pm, end=4am, London clocks forward.
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 3, 31, 18, 0, 0, tzinfo=london),
time(20, 0, 0),
time(4, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 2
assert end_datetime == datetime(2019, 4, 1, 4, 0, 0, tzinfo=london)
# now=10pm,start=8pm, end=4am
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 3, 31, 22, 0, 0, tzinfo=london),
time(20, 0, 0),
time(4, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 0
assert end_datetime == datetime(2019, 4, 1, 4, 0, 0, tzinfo=london)
# now=6am,start=8pm, end=4am
time_to_wait, end_datetime = delay_for_time_period(
datetime(2019, 3, 31, 6, 0, 0, tzinfo=london),
time(20, 0, 0),
time(4, 0, 0))
assert time_to_wait.total_seconds() / 60 / 60 == 14
assert end_datetime == datetime(2019, 4, 1, 4, 0, 0, tzinfo=london)
| 34.708661 | 79 | 0.639292 |
14f209a2e4864dbd925cb1c73c1a1f7110e0b62f | 400 | py | Python | xscratch/exceptions.py | gabaconrado/mainecoon | e8c9eb0c28ed874728315e386c9ec86dc06f1d7a | [
"Apache-2.0"
] | null | null | null | xscratch/exceptions.py | gabaconrado/mainecoon | e8c9eb0c28ed874728315e386c9ec86dc06f1d7a | [
"Apache-2.0"
] | null | null | null | xscratch/exceptions.py | gabaconrado/mainecoon | e8c9eb0c28ed874728315e386c9ec86dc06f1d7a | [
"Apache-2.0"
] | null | null | null | '''
xScratch exceptions
'''
| 16 | 69 | 0.6525 |
14f3c981162924e41ccbbaedac2e774e7979b26d | 2,267 | py | Python | environments/locomotion/scene_stadium.py | wx-b/unsup-3d-keypoints | 8a2e687b802d19b750aeadffa9bb6970f5956d4d | [
"MIT"
] | 28 | 2021-06-15T03:38:14.000Z | 2022-03-15T04:12:41.000Z | environments/locomotion/scene_stadium.py | wx-b/unsup-3d-keypoints | 8a2e687b802d19b750aeadffa9bb6970f5956d4d | [
"MIT"
] | 3 | 2021-12-25T17:57:47.000Z | 2022-03-24T09:52:43.000Z | environments/locomotion/scene_stadium.py | wx-b/unsup-3d-keypoints | 8a2e687b802d19b750aeadffa9bb6970f5956d4d | [
"MIT"
] | 5 | 2021-11-02T17:38:36.000Z | 2021-12-11T02:57:39.000Z | import os
import pybullet_data
from environments.locomotion.scene_abstract import Scene
import pybullet as p
| 42.773585 | 123 | 0.666961 |
14f46540bddbc3d9b12cae1ca8aeeee6d852e367 | 522 | py | Python | src/python/providers/movement/standard_move.py | daboross/dxnr | 8f73e9d5f4473b97dcfe05804a40c9a0826e51b6 | [
"MIT"
] | null | null | null | src/python/providers/movement/standard_move.py | daboross/dxnr | 8f73e9d5f4473b97dcfe05804a40c9a0826e51b6 | [
"MIT"
] | null | null | null | src/python/providers/movement/standard_move.py | daboross/dxnr | 8f73e9d5f4473b97dcfe05804a40c9a0826e51b6 | [
"MIT"
] | null | null | null | from defs import *
from utilities import warnings
| 27.473684 | 89 | 0.603448 |
14f8101f9071baa5ade2230825bde845717654bf | 4,817 | py | Python | pyshop/helpers/timeseries.py | sintef-energy/pyshop | 2991372f023e75c69ab83ece54a47fa9c3b73d60 | [
"MIT"
] | 1 | 2022-03-08T07:20:16.000Z | 2022-03-08T07:20:16.000Z | pyshop/helpers/timeseries.py | sintef-energy/pyshop | 2991372f023e75c69ab83ece54a47fa9c3b73d60 | [
"MIT"
] | 2 | 2022-02-09T13:53:16.000Z | 2022-03-16T14:36:21.000Z | pyshop/helpers/timeseries.py | sintef-energy/pyshop | 2991372f023e75c69ab83ece54a47fa9c3b73d60 | [
"MIT"
] | null | null | null | from typing import Dict, Sequence, Union
from .typing_annotations import DataFrameOrSeries
import pandas as pd
import numpy as np
def remove_consecutive_duplicates(df:DataFrameOrSeries) -> DataFrameOrSeries:
"""
Compress timeseries by only keeping the first row of consecutive duplicates. This is done by comparing a copied
DataFrame/Series that has been shifted by one, with the original, and only keeping the rows in which at least one
one column value is different from the previous row. The first row will always be kept
"""
if isinstance(df, pd.DataFrame):
df = df.loc[(df.shift() != df).any(1)]
else:
df = df.loc[df.shift() != df]
return df
def resample_resolution(time:Dict, df:DataFrameOrSeries, delta:float, time_resolution:pd.Series) -> DataFrameOrSeries:
"""
Resample timeseries when time resolution is non-constant
"""
# Convert timeseries index to integers based on the time unit
df.index = ((df.index - time['starttime']).total_seconds() * delta).astype(int)
# Compress the time resolution returned from shop, by only keeping the first of consecutive duplicate resolutions
resolution_format = time_resolution.astype(int)
compressed_resolution_format = remove_consecutive_duplicates(resolution_format)
# Extract the different time resolutions and their respective time of enactment
resolution_tuples = list(compressed_resolution_format.iteritems())
# Add a dummy time at the optimization end time to serve as a well defined bound
resolution = resolution_tuples[-1][1]
end_unit_index = int((time['endtime'] - time['starttime']).total_seconds() * delta)
resolution_tuples.append((end_unit_index, resolution))
# Build the resampled output
output_parts = []
index = 0
for i, res_tuple in enumerate(resolution_tuples[:-1]):
unit_index, resolution = res_tuple
next_unit_index = resolution_tuples[i+1][0]
selection = df.iloc[unit_index:next_unit_index]
# Normalize index
# line below is commented out since it gives wrong result after concating output parts
# selection.index = selection.index - unit_index
# Resample by taking the mean of all datapoints in "resolution" sized windows
selection = selection.rolling(window=resolution).mean().shift(-(resolution-1))
# Extract the correct means from the rolling means
selection = selection.iloc[::resolution]
# Handle any remaining intervals that are less than "resolution" sized
if (next_unit_index - unit_index) % resolution != 0:
reduced_res = (next_unit_index - unit_index) % resolution
last_selection_index = next_unit_index - reduced_res
last_row = df.iloc[last_selection_index:next_unit_index].mean()
if isinstance(df, pd.Series):
last_row = pd.Series(index=[last_selection_index], data=[last_row])
else:
last_row = last_row.to_frame().T
last_row.index = [last_selection_index]
# Replace the last row, as this has been set to "nan" by the rolling mean
selection = pd.concat([selection[:-1], last_row])
output_parts.append(selection)
index = index + (next_unit_index-unit_index)//resolution
output_df = pd.concat(output_parts)
return output_df
| 44.192661 | 166 | 0.686734 |
14f8a6f3308057e78995708d4b904e36cb6a06da | 841 | py | Python | hlsclt/classes.py | qarlosalberto/hlsclt | cc657b780aac3a617f48c1a80e263a6945f8b7c9 | [
"MIT"
] | 34 | 2017-07-03T09:56:11.000Z | 2022-03-22T02:03:27.000Z | hlsclt/classes.py | qarlosalberto/hlsclt | cc657b780aac3a617f48c1a80e263a6945f8b7c9 | [
"MIT"
] | 22 | 2017-06-18T03:49:02.000Z | 2021-10-06T12:41:09.000Z | hlsclt/classes.py | qarlosalberto/hlsclt | cc657b780aac3a617f48c1a80e263a6945f8b7c9 | [
"MIT"
] | 11 | 2018-06-02T04:38:26.000Z | 2021-06-10T11:57:27.000Z | # -*- coding: utf-8 -*-
""" Class definitions for the HLSCLT Command Line Tool.
Copyright (c) 2017 Ben Marshall
"""
# Generic error class
# Specific error class for local config file errors
# Class to hold application specific info within the Click context.
| 28.033333 | 88 | 0.699168 |
14f9e7e5dad9d0b30bd98785f713bf50cb29033e | 718 | py | Python | office_test_word/test_platform/core/BasePage.py | yag8009/office_test_team | edf06f3c0818b08ec39541bdcd04bcc537fc9ed1 | [
"MIT"
] | null | null | null | office_test_word/test_platform/core/BasePage.py | yag8009/office_test_team | edf06f3c0818b08ec39541bdcd04bcc537fc9ed1 | [
"MIT"
] | null | null | null | office_test_word/test_platform/core/BasePage.py | yag8009/office_test_team | edf06f3c0818b08ec39541bdcd04bcc537fc9ed1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
| 26.592593 | 58 | 0.739554 |
14f9fc6ca518d45dcde0a27042ff52217603dcff | 9,740 | py | Python | nex/parsing/utils.py | eddiejessup/nex | d61005aacb3b87f8cf1a1e2080ca760d757d5751 | [
"MIT"
] | null | null | null | nex/parsing/utils.py | eddiejessup/nex | d61005aacb3b87f8cf1a1e2080ca760d757d5751 | [
"MIT"
] | null | null | null | nex/parsing/utils.py | eddiejessup/nex | d61005aacb3b87f8cf1a1e2080ca760d757d5751 | [
"MIT"
] | null | null | null | import logging
from collections import deque
from ..tokens import BuiltToken
from ..utils import LogicError
from ..router import NoSuchControlSequence
from ..constants.instructions import Instructions
logger = logging.getLogger(__name__)
# Stuff specific to *my parsing*.
letter_to_non_active_uncased_type_map = {
# For hex characters, need to look for the composite production, not the
# terminal production, because could be, for example, 'A' or
# 'NON_ACTIVE_UNCASED_A', so we should look for the composite production,
# 'non_active_uncased_a'.
'A': 'non_active_uncased_a',
'B': 'non_active_uncased_b',
'C': 'non_active_uncased_c',
'D': 'non_active_uncased_d',
'E': 'non_active_uncased_e',
'F': 'non_active_uncased_f',
'a': Instructions.non_active_uncased_a.value,
'b': Instructions.non_active_uncased_b.value,
'c': Instructions.non_active_uncased_c.value,
'd': Instructions.non_active_uncased_d.value,
'e': Instructions.non_active_uncased_e.value,
'f': Instructions.non_active_uncased_f.value,
'g': Instructions.non_active_uncased_g.value,
'h': Instructions.non_active_uncased_h.value,
'i': Instructions.non_active_uncased_i.value,
'j': Instructions.non_active_uncased_j.value,
'k': Instructions.non_active_uncased_k.value,
'l': Instructions.non_active_uncased_l.value,
'm': Instructions.non_active_uncased_m.value,
'n': Instructions.non_active_uncased_n.value,
'o': Instructions.non_active_uncased_o.value,
'p': Instructions.non_active_uncased_p.value,
'q': Instructions.non_active_uncased_q.value,
'r': Instructions.non_active_uncased_r.value,
's': Instructions.non_active_uncased_s.value,
't': Instructions.non_active_uncased_t.value,
'u': Instructions.non_active_uncased_u.value,
'v': Instructions.non_active_uncased_v.value,
'w': Instructions.non_active_uncased_w.value,
'x': Instructions.non_active_uncased_x.value,
'y': Instructions.non_active_uncased_y.value,
'z': Instructions.non_active_uncased_z.value,
'G': Instructions.non_active_uncased_g.value,
'H': Instructions.non_active_uncased_h.value,
'I': Instructions.non_active_uncased_i.value,
'J': Instructions.non_active_uncased_j.value,
'K': Instructions.non_active_uncased_k.value,
'L': Instructions.non_active_uncased_l.value,
'M': Instructions.non_active_uncased_m.value,
'N': Instructions.non_active_uncased_n.value,
'O': Instructions.non_active_uncased_o.value,
'P': Instructions.non_active_uncased_p.value,
'Q': Instructions.non_active_uncased_q.value,
'R': Instructions.non_active_uncased_r.value,
'S': Instructions.non_active_uncased_s.value,
'T': Instructions.non_active_uncased_t.value,
'U': Instructions.non_active_uncased_u.value,
'V': Instructions.non_active_uncased_v.value,
'W': Instructions.non_active_uncased_w.value,
'X': Instructions.non_active_uncased_x.value,
'Y': Instructions.non_active_uncased_y.value,
'Z': Instructions.non_active_uncased_z.value,
}
# More generic utilities.
def wrap(pg, func, rule):
f = pg.production(rule)
return f(func)
end_tag = '$end'
def get_chunk(banisher, parser, initial=None):
"""
Return a chunk satisfying the objective of a `parser`, by collecting input
tokens from `banisher`.
"""
# Processing input tokens might return many tokens, so store them in a
# buffer.
input_buffer = GetBuffer(getter=banisher.get_next_output_list,
initial=initial)
# Get the actual chunk.
chunk, parse_queue = _get_chunk(input_buffer, parser)
# We might want to reverse the composition of terminal tokens we just
# did in the parser, so save the bits in a special place.
chunk._terminal_tokens = list(parse_queue)
# Replace any tokens left in the buffer onto the banisher's queue.
if input_buffer.queue:
logger.info(f"Cleaning up tokens on chunk grabber's buffer: {input_buffer.queue}")
banisher.replace_tokens_on_input(input_buffer.queue)
return chunk
def _get_chunk(input_queue, parser):
"""
Return a chunk satisfying the objective of a `parser`, by collecting input
tokens from `input_queue`.
"""
# Get enough tokens to grab a parse-chunk. We know to stop adding tokens
# when we see a switch from failing because we run out of tokens
# (ExhaustedTokensError) to an actual syntax error (ParsingSyntaxError).
# Want to extend the queue-to-be-parsed one token at a time,
# so we can break as soon as we have all we need.
parse_queue = deque()
# We keep track of if we have parsed, just for checking for weird
# situations.
have_parsed = False
while True:
try:
chunk = parser.parse(iter(parse_queue))
# If we got a syntax error, this should mean we have spilled over
# into parsing the next chunk.
except ParsingSyntaxError as exc:
# If we have already parsed a chunk, then we use this as our
# result.
if have_parsed:
# We got one token of fluff due to extra read, to make the
# parse queue not-parse. So put it back on the buffer.
fluff_tok = parse_queue.pop()
logger.debug(f'Replacing fluff token {fluff_tok} on to-parse queue.')
input_queue.queue.appendleft(fluff_tok)
logger.info(f'Got chunk "{chunk}", through failed parsing')
return chunk, parse_queue
# If we have not yet parsed, then something is wrong.
else:
exc.bad_token = parse_queue[-1]
exc.bad_chunk = parse_queue
exc.args += (f'Tokens: {list(parse_queue)}',)
exc.args += (f'Tokens: {list(parse_queue)}',)
raise
except ExhaustedTokensError:
# Carry on getting more tokens, because it seems we can.
pass
else:
# In our modified version of rply, we annotate the
# output token to indicate whether the only action from the
# current parse state could be to end. In this case, we do not
# bother adding another token, and just return the chunk.
# This reduces the number of cases where we expand too far, and
# must handle bad handling of the post-chunk tokens caused by
# not acting on this chunk.
if chunk._could_only_end:
logger.info(f'Got chunk "{chunk}", through inevitability')
return chunk, parse_queue
have_parsed = True
try:
t = next(input_queue)
except EOFError:
# If we get an EOFError, and we have just started trying to
# get a parse-chunk, we are done, so just propagate the
# exception to wrap things up.
if not parse_queue:
raise
# If we get an EOFError and we have already parsed, we need to
# return this parse-chunk, then next time round we will be
# done.
elif have_parsed:
logger.info(f'Got chunk "{chunk}", through end-of-file')
return chunk, parse_queue
# If we get to the end of the file and we have a chunk queue
# that can't be parsed, something is wrong.
else:
raise ValueError(f'Got to end-of-file but still have '
f'unparsed tokens: {parse_queue}')
# If we get an expansion error, it might be because we need to
# act on the chunk we have so far first.
except NoSuchControlSequence as e:
# This is only possible if we have already parsed the chunk-so-
# far.
if have_parsed:
# This might always be fine, but log it anyway.
logger.warning('Ignoring failed expansion in chunk grabber')
logger.info(f'Got chunk "{chunk}", through failed expansion')
return chunk, parse_queue
# Otherwise, indeed something is wrong.
else:
raise
parse_queue.append(t)
raise LogicError('Broke from command parsing loop unexpectedly')
| 37.751938 | 90 | 0.658316 |
14fbcce3feb4c4d3755700befad3fb8381ba83ea | 719 | py | Python | carnival/migrations/0011_auto_20191017_1045.py | farro4069/allez | c6ba374ee03cb01a494a4f6fe8ae0d0de5ce463c | [
"BSD-2-Clause"
] | null | null | null | carnival/migrations/0011_auto_20191017_1045.py | farro4069/allez | c6ba374ee03cb01a494a4f6fe8ae0d0de5ce463c | [
"BSD-2-Clause"
] | null | null | null | carnival/migrations/0011_auto_20191017_1045.py | farro4069/allez | c6ba374ee03cb01a494a4f6fe8ae0d0de5ce463c | [
"BSD-2-Clause"
] | null | null | null | # Generated by Django 2.1.4 on 2019-10-17 00:45
from django.db import migrations, models
| 24.793103 | 53 | 0.5758 |
14fbebb9df421f915a2a0442fc7bcdd045fbbef0 | 2,787 | py | Python | openweave/tlv/schema/tests/test_VENDOR.py | robszewczyk/openweave-tlv-schema | c0acbccce4fcaf213a09261f79d6a141ae94f7e8 | [
"Apache-2.0"
] | 1 | 2020-05-19T22:52:27.000Z | 2020-05-19T22:52:27.000Z | openweave/tlv/schema/tests/test_VENDOR.py | robszewczyk/openweave-tlv-schema | c0acbccce4fcaf213a09261f79d6a141ae94f7e8 | [
"Apache-2.0"
] | null | null | null | openweave/tlv/schema/tests/test_VENDOR.py | robszewczyk/openweave-tlv-schema | c0acbccce4fcaf213a09261f79d6a141ae94f7e8 | [
"Apache-2.0"
] | 1 | 2021-02-15T16:14:17.000Z | 2021-02-15T16:14:17.000Z | #!/usr/bin/env python3
#
# Copyright (c) 2020 Google LLC.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
# @file
# Unit tests for VENDOR definitions.
#
import unittest
from .testutils import TLVSchemaTestCase
if __name__ == '__main__':
unittest.main()
| 34.8375 | 76 | 0.614281 |
14fc3caa752fb624866d5cfe60083c14dfb17ed9 | 336 | py | Python | app/services/events.py | fufuok/FF.PyAdmin | 031fcafe70ecb78488876d0c61e30ca4fb4290af | [
"MIT"
] | 56 | 2019-11-26T15:42:29.000Z | 2022-03-10T12:28:07.000Z | app/services/events.py | fufuok/FF.PyAdmin | 031fcafe70ecb78488876d0c61e30ca4fb4290af | [
"MIT"
] | 4 | 2020-03-20T01:51:47.000Z | 2022-03-30T22:10:56.000Z | app/services/events.py | fufuok/FF.PyAdmin | 031fcafe70ecb78488876d0c61e30ca4fb4290af | [
"MIT"
] | 15 | 2019-11-26T15:42:33.000Z | 2022-03-09T05:41:44.000Z | # -*- coding:utf-8 -*-
"""
events.py
~~~~~~~~
,
:author: Fufu, 2019/12/20
"""
from blinker import signal
#
event_user_logined = signal('event_user_logined')
# (/)
event_sys_admin = signal('event_sys_admin')
# app
event_async_with_app_demo = signal('event_async_with_app_demo')
| 17.684211 | 63 | 0.690476 |
14fe3c3a6b1c35d2aade0c7366e77fd7418c122a | 1,181 | py | Python | oura_to_sqlite/utils.py | mfa/oura-to-sqlite | 724dab55e94df0c3a3e6e2faafa758cf20ea0792 | [
"Apache-2.0"
] | null | null | null | oura_to_sqlite/utils.py | mfa/oura-to-sqlite | 724dab55e94df0c3a3e6e2faafa758cf20ea0792 | [
"Apache-2.0"
] | 2 | 2021-10-31T15:16:34.000Z | 2021-10-31T15:22:17.000Z | oura_to_sqlite/utils.py | mfa/oura-to-sqlite | 724dab55e94df0c3a3e6e2faafa758cf20ea0792 | [
"Apache-2.0"
] | null | null | null | import datetime
import click
from oura import OuraClient
| 25.12766 | 81 | 0.628281 |
14fe677b2376deed69fc96644a350773e0c985ca | 1,635 | py | Python | ai_finger_counting.py | dnovai/advancedCVProject | de3e75247c7b7ae617a578800c51c42fadbdc844 | [
"MIT"
] | 1 | 2022-02-25T02:36:02.000Z | 2022-02-25T02:36:02.000Z | ai_finger_counting.py | dnovai/advancedCVProject | de3e75247c7b7ae617a578800c51c42fadbdc844 | [
"MIT"
] | null | null | null | ai_finger_counting.py | dnovai/advancedCVProject | de3e75247c7b7ae617a578800c51c42fadbdc844 | [
"MIT"
] | null | null | null | import cv2
import os
import time
import advancedcv.hand_tracking as htm
import numpy as np
import itertools
patterns = np.array(list(itertools.product([0, 1], repeat=5)))
p_time = 0
cap = cv2.VideoCapture(0)
# w_cam, h_cam = 648, 480
# cap.set(3, w_cam)
# cap.set(4, h_cam)
folder_path = "finger_images"
my_list = os.listdir(folder_path)
my_list.sort()
overlay_list = []
detector = htm.HandDetector()
for im_path in my_list:
image = cv2.imread(f'{folder_path}/{im_path}')
print(f'{folder_path}/{im_path}')
overlay_list.append(image)
key_ids = [4, 8, 12, 16, 20]
while True:
success, img = cap.read()
img = detector.find_hands(img, draw=False)
lm_list = detector.get_position(img, hand_number=0, draw=False)
if len(lm_list) != 0:
fingers = []
# Thumb
if lm_list[key_ids[0]][1] > lm_list[key_ids[0]-1][1]:
fingers.append(1)
else:
fingers.append(0)
# Other fingers
for idx in range(1, len(key_ids)):
if lm_list[key_ids[idx]][2] < lm_list[key_ids[idx]-2][2]:
fingers.append(1)
else:
fingers.append(0)
dist = (patterns - fingers)**2
dist = np.sum(dist, axis=1)
min_index = np.argmin(dist)
print(min_index)
h, w, c = overlay_list[min_index+1].shape
img[0:h, 0:w] = overlay_list[min_index+1]
c_time = time.time()
fps = 1/(c_time-p_time)
p_time = c_time
cv2.putText(img, f'FPS: {str(round(fps))}', (50, 70), cv2.FONT_HERSHEY_PLAIN, 5, (255, 0, 0), 3)
cv2.imshow("Image", img)
cv2.waitKey(1)
| 24.044118 | 100 | 0.601223 |
14ff6bd96aa976b58904b681f23b026afedef8de | 12,852 | py | Python | PaddleFSL/examples/image_classification/maml_image_classification.py | tianxin1860/FSL-Mate | 74dde9a3e1f789ec92710b9ecdf9c5b060d26fd3 | [
"MIT"
] | null | null | null | PaddleFSL/examples/image_classification/maml_image_classification.py | tianxin1860/FSL-Mate | 74dde9a3e1f789ec92710b9ecdf9c5b060d26fd3 | [
"MIT"
] | null | null | null | PaddleFSL/examples/image_classification/maml_image_classification.py | tianxin1860/FSL-Mate | 74dde9a3e1f789ec92710b9ecdf9c5b060d26fd3 | [
"MIT"
] | null | null | null | import paddle
import paddlefsl
from paddlefsl.model_zoo import maml
# Set computing device
paddle.set_device('gpu:0')
# """ ---------------------------------------------------------------------------------
# Config: MAML, Omniglot, MLP, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28))
VALID_DATASET = paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28))
TEST_DATASET = paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28))
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.MLP(input_size=(28, 28), output_size=WAYS)
META_LR = 0.005
INNER_LR = 0.5
ITERATIONS = 60000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 1
TEST_INNER_ADAPT_STEPS = 3
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration60000.params'
# ----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Omniglot, MLP, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28))
VALID_DATASET = paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28))
TEST_DATASET = paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28))
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.MLP(input_size=(28, 28), output_size=WAYS)
META_LR = 0.005
INNER_LR = 0.5
ITERATIONS = 20000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 1
TEST_INNER_ADAPT_STEPS = 3
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration20000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Omniglot, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28))
VALID_DATASET = paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28))
TEST_DATASET = paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28))
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(1, 28, 28), output_size=WAYS, pooling=False)
META_LR = 0.005
INNER_LR = 0.5
ITERATIONS = 60000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 1
TEST_INNER_ADAPT_STEPS = 3
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration60000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Omniglot, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.Omniglot(mode='train', image_size=(28, 28))
VALID_DATASET = paddlefsl.datasets.Omniglot(mode='valid', image_size=(28, 28))
TEST_DATASET = paddlefsl.datasets.Omniglot(mode='test', image_size=(28, 28))
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(1, 28, 28), output_size=WAYS, pooling=False)
META_LR = 0.005
INNER_LR = 0.5
ITERATIONS = 20000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 1
TEST_INNER_ADAPT_STEPS = 3
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration20000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Mini-ImageNet, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.MiniImageNet(mode='train')
VALID_DATASET = paddlefsl.datasets.MiniImageNet(mode='valid')
TEST_DATASET = paddlefsl.datasets.MiniImageNet(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.03
ITERATIONS = 60000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration60000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Mini-ImageNet, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.MiniImageNet(mode='train')
VALID_DATASET = paddlefsl.datasets.MiniImageNet(mode='valid')
TEST_DATASET = paddlefsl.datasets.MiniImageNet(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.1
ITERATIONS = 30000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration30000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, CifarFS, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.CifarFS(mode='train')
VALID_DATASET = paddlefsl.datasets.CifarFS(mode='valid')
TEST_DATASET = paddlefsl.datasets.CifarFS(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 32, 32), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.001
INNER_LR = 0.03
ITERATIONS = 30000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration30000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, CifarFS, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.CifarFS(mode='train')
VALID_DATASET = paddlefsl.datasets.CifarFS(mode='valid')
TEST_DATASET = paddlefsl.datasets.CifarFS(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 32, 32), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.0015
INNER_LR = 0.15
ITERATIONS = 10000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration10000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, FC100, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.FC100(mode='train')
VALID_DATASET = paddlefsl.datasets.FC100(mode='valid')
TEST_DATASET = paddlefsl.datasets.FC100(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 32, 32), output_size=WAYS)
META_LR = 0.002
INNER_LR = 0.05
ITERATIONS = 10000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 2000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration10000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, FC100, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.FC100(mode='train')
VALID_DATASET = paddlefsl.datasets.FC100(mode='valid')
TEST_DATASET = paddlefsl.datasets.FC100(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 32, 32), output_size=WAYS)
META_LR = 0.003
INNER_LR = 0.08
ITERATIONS = 5000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 1000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration5000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, CubFS, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.CubFS(mode='train')
VALID_DATASET = paddlefsl.datasets.CubFS(mode='valid')
TEST_DATASET = paddlefsl.datasets.CubFS(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.03
ITERATIONS = 20000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration20000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, CubFS, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.CubFS(mode='train')
VALID_DATASET = paddlefsl.datasets.CubFS(mode='valid')
TEST_DATASET = paddlefsl.datasets.CubFS(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.003
INNER_LR = 0.1
ITERATIONS = 10000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 2000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration10000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Tiered-ImageNet, Conv, 5 Ways, 1 Shot
TRAIN_DATASET = paddlefsl.datasets.TieredImageNet(mode='train')
VALID_DATASET = paddlefsl.datasets.TieredImageNet(mode='valid')
TEST_DATASET = paddlefsl.datasets.TieredImageNet(mode='test')
WAYS = 5
SHOTS = 1
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.03
ITERATIONS = 15000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration15000.params'
----------------------------------------------------------------------------------"""
""" ---------------------------------------------------------------------------------
# Config: MAML, Tiered-ImageNet, Conv, 5 Ways, 5 Shots
TRAIN_DATASET = paddlefsl.datasets.TieredImageNet(mode='train')
VALID_DATASET = paddlefsl.datasets.TieredImageNet(mode='valid')
TEST_DATASET = paddlefsl.datasets.TieredImageNet(mode='test')
WAYS = 5
SHOTS = 5
MODEL = paddlefsl.backbones.Conv(input_size=(3, 84, 84), output_size=WAYS, conv_channels=[32, 32, 32, 32])
META_LR = 0.002
INNER_LR = 0.01
ITERATIONS = 30000
TEST_EPOCH = 10
META_BATCH_SIZE = 32
TRAIN_INNER_ADAPT_STEPS = 5
TEST_INNER_ADAPT_STEPS = 10
APPROXIMATE = True
REPORT_ITER = 10
SAVE_MODEL_ITER = 5000
SAVE_MODEL_ROOT = '~/trained_models'
TEST_PARAM_FILE = 'iteration30000.params'
----------------------------------------------------------------------------------"""
if __name__ == '__main__':
main()
| 35.502762 | 106 | 0.591581 |
14ff741d0e6a57229801a6e6be5e98e3344172dd | 3,085 | py | Python | convert.py | AndreiBaias/PAS | 8905f86db15806647ab7879fd32c9057a9b93868 | [
"MIT"
] | null | null | null | convert.py | AndreiBaias/PAS | 8905f86db15806647ab7879fd32c9057a9b93868 | [
"MIT"
] | 3 | 2022-03-30T15:43:12.000Z | 2022-03-30T15:43:41.000Z | convert.py | AndreiBaias/PAS | 8905f86db15806647ab7879fd32c9057a9b93868 | [
"MIT"
] | null | null | null |
import numpy as np
import collections, numpy
import glob
from PIL import Image
from matplotlib.pyplot import cm
nrImages = 1
imageSize = 449
finalImageSize = 449
ImageNumber = 0
sourceFolder = 'images'
# sourceFolder = "testInput"
destinationFolder = 'final_text_files_2'
# destinationFolder = "testOutput"
# image = Image.open("1570.png").convert("L")
# print(np.asarray(image))
index = 0
for filename in glob.glob(sourceFolder + '/*.png'):
image = Image.open(filename).convert("L")
imageArray = np.asarray(image)
imageArray = modifica(imageArray)
eliminaExtraCladiri(imageArray)
g = open("./" + destinationFolder + "/map" + str(index) + ".txt", "w")
g.write("")
g.close()
g = open("./" + destinationFolder + "/map" + str(index) + ".txt", "a")
g.write(str(len(imageArray)) + "\n" + str(len(imageArray)) + "\n")
for x in imageArray:
for y in x:
g.write(str(y) + " ")
g.write("\n")
index += 1
if index % 100 == 0:
print(index)
print(index)
# for i in range(nrImages):
# image = Image.open("./final_images/_2O7gRvMPVdPfW9Ql60S-w.png").convert("L")
# # image = image.resize((imageSize, imageSize), Image.ANTIALIAS)
#
# imageArray = np.asarray(image)
# print(imageArray.shape)
# imageArray = modifica(imageArray)
# eliminaExtraCladiri(imageArray)
# print(imageArray)
# g = open("map2.txt", "w")
# g.write("")
# g.close()
# g = open("map2.txt", "a")
# g.write(str(len(imageArray)) + "\n" + str(len(imageArray)) + "\n")
# for x in imageArray:
# for y in x:
# g.write(str(y) + " ")
# g.write("\n") | 24.68 | 83 | 0.491086 |
14ff9b4e350a6ca08c90a2722fd722026d991e51 | 1,857 | py | Python | ravager/housekeeping.py | CoolFool/Ravager | 3d647115689dc23a160255221aaa493f879406a5 | [
"MIT"
] | null | null | null | ravager/housekeeping.py | CoolFool/Ravager | 3d647115689dc23a160255221aaa493f879406a5 | [
"MIT"
] | 1 | 2022-03-15T06:55:48.000Z | 2022-03-15T15:38:20.000Z | ravager/housekeeping.py | CoolFool/Ravager | 3d647115689dc23a160255221aaa493f879406a5 | [
"MIT"
] | 2 | 2022-02-09T21:30:57.000Z | 2022-03-15T06:19:57.000Z | from ravager.database.tasks import Tasks
import logging
from ravager.database.helpers import setup_db
from ravager.config import DATABASE_URL, LOGS_DIR
from ravager.helpers.check_process import Process
from subprocess import check_call
logger = logging.getLogger(__file__)
setup_db.create_tables()
logger.info("Database setup at {}".format(DATABASE_URL))
logger.info(Tasks().clear())
logger.info(start_aria())
logger.info("aria2c started")
| 36.411765 | 74 | 0.726979 |
14ffa6b25312cc8be37c853fbf3300bd513054fa | 4,623 | py | Python | VOTA_Control/VOTAScopeHW/daq_do/daq_do_dev.py | fullerene12/VOTA | 3a5cfc1e210ac7ea274537a8d189b54660416599 | [
"MIT"
] | null | null | null | VOTA_Control/VOTAScopeHW/daq_do/daq_do_dev.py | fullerene12/VOTA | 3a5cfc1e210ac7ea274537a8d189b54660416599 | [
"MIT"
] | null | null | null | VOTA_Control/VOTAScopeHW/daq_do/daq_do_dev.py | fullerene12/VOTA | 3a5cfc1e210ac7ea274537a8d189b54660416599 | [
"MIT"
] | 1 | 2021-08-01T22:39:18.000Z | 2021-08-01T22:39:18.000Z | from PyDAQmx import *
from ctypes import byref, c_ulong,c_int32
import numpy as np | 26.722543 | 100 | 0.544884 |
14ffc8d1112cea4351881119362848071845aff2 | 1,603 | py | Python | test/examples/integrated/codec/testlib.py | rodrigomelo9/uvm-python | e3127eba2cc1519a61dc6f736d862a8dcd6fce20 | [
"Apache-2.0"
] | 140 | 2020-01-18T00:14:17.000Z | 2022-03-29T10:57:24.000Z | test/examples/integrated/codec/testlib.py | Mohsannaeem/uvm-python | 1b8768a1358d133465ede9cadddae651664b1d53 | [
"Apache-2.0"
] | 24 | 2020-01-18T18:40:58.000Z | 2021-03-25T17:39:07.000Z | test/examples/integrated/codec/testlib.py | Mohsannaeem/uvm-python | 1b8768a1358d133465ede9cadddae651664b1d53 | [
"Apache-2.0"
] | 34 | 2020-01-18T12:22:59.000Z | 2022-02-11T07:03:11.000Z | #//
#// -------------------------------------------------------------
#// Copyright 2011 Synopsys, Inc.
#// Copyright 2010-2011 Mentor Graphics Corporation
#// Copyright 2019-2020 Tuomas Poikela (tpoikela)
#// All Rights Reserved Worldwide
#//
#// Licensed under the Apache License, Version 2.0 (the
#// "License"); you may not use this file except in
#// compliance with the License. You may obtain a copy of
#// the License at
#//
#// http://www.apache.org/licenses/LICENSE-2.0
#//
#// Unless required by applicable law or agreed to in
#// writing, software distributed under the License is
#// distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
#// CONDITIONS OF ANY KIND, either express or implied. See
#// the License for the specific language governing
#// permissions and limitations under the License.
#// -------------------------------------------------------------
#//
#
#class hw_reset_test(test):
#
# `uvm_component_utils(hw_reset_test)
#
# def __init__(self, name, parent=None)
# super().__init__(name, parent)
# endfunction
#
# local bit once = 1
#async
# def main_phase(self, phase):
# if (once):
# once = 0
# phase.raise_objection(self)
# repeat (100 * 8) @(posedge env.vif.sclk)
# // This will clear the objection
# uvm_info("TEST", "Jumping back to reset phase", UVM_NONE)
# phase.jump(uvm_reset_phase::get())
# end
# endtask
#
from uvm.macros import *
#endclass
| 34.106383 | 71 | 0.559576 |
09000834339f325e00a15ac2eaa5bf2ddeeff627 | 1,110 | py | Python | MGTU-demo/arrange_signs.py | webkadiz/olympiad-problems | 620912815904c0f95b91ccd193ca3db0ea20e507 | [
"MIT"
] | null | null | null | MGTU-demo/arrange_signs.py | webkadiz/olympiad-problems | 620912815904c0f95b91ccd193ca3db0ea20e507 | [
"MIT"
] | null | null | null | MGTU-demo/arrange_signs.py | webkadiz/olympiad-problems | 620912815904c0f95b91ccd193ca3db0ea20e507 | [
"MIT"
] | null | null | null | from math import inf
nums = list(map(int, input().split()))
signs = { '+': 1, '-': 2, '*': 3, '/': 4, '%': 5, '=': 0 }
anss = []
comb(nums[1:], str(nums[0]), False)
print(anss)
min_v = inf
for ans in anss:
ves = 0
for char in ans:
ves += signs.get(char, 0)
if ves < min_v:
min_v = ves
real_ans = ans
real_ans = real_ans.replace('==', '=')
print(real_ans)
for char in real_ans:
if 48 <= ord(char) <= 57:
continue
print(char, end='')
print()
| 17.076923 | 58 | 0.544144 |
09003d15be83a3b390c12acd09219a14eb6cb09a | 15,291 | py | Python | kempnn/trainer.py | ttyhasebe/KEMPNN | d52ec0a82d758431120c0831738b104a535f2264 | [
"BSD-3-Clause"
] | 4 | 2022-01-14T08:43:52.000Z | 2022-03-02T11:06:03.000Z | kempnn/trainer.py | ttyhasebe/KEMPNN | d52ec0a82d758431120c0831738b104a535f2264 | [
"BSD-3-Clause"
] | null | null | null | kempnn/trainer.py | ttyhasebe/KEMPNN | d52ec0a82d758431120c0831738b104a535f2264 | [
"BSD-3-Clause"
] | null | null | null | #
# Copyright 2021 by Tatsuya Hasebe, Hitachi, Ltd.
# All rights reserved.
#
# This file is part of the KEMPNN package,
# and is released under the "BSD 3-Clause License". Please see the LICENSE
# file that should have been included as part of this package.
#
import datetime
import json
import os
import pickle
import time
import numpy as np
import torch
import torch.utils.data
from .loader import MoleculeCollater, loadDataset
from .utils import peason_r2_score, rmse_score
defaultMoleculeTrainConfig = {
"name": "",
"device": "cuda",
"optimizer": torch.optim.Adam,
"optimizer_args": {"lr": 0.001},
"optimize_schedule": None,
"optimize_schedule_args": {},
"loss": torch.nn.MSELoss(),
"save": True,
"save_path": "weights",
"batch_size": 16,
"epochs": 50,
"drop_last": True,
}
| 33.459519 | 80 | 0.511804 |
09003f8db6874b60bff5eb74103e02a1d139ecc6 | 222 | py | Python | ex052.py | almmessias/CursoPython | 4cec6946f32002cbd5d3b802df11ea1ba74169f5 | [
"MIT"
] | null | null | null | ex052.py | almmessias/CursoPython | 4cec6946f32002cbd5d3b802df11ea1ba74169f5 | [
"MIT"
] | null | null | null | ex052.py | almmessias/CursoPython | 4cec6946f32002cbd5d3b802df11ea1ba74169f5 | [
"MIT"
] | null | null | null | num = int (input ('Digite um nmero inteiro: '))
if num % 2 != 0 and num % 3 != 0 and num % 5 != 0 and num % 7 != 0:
print ('{} um nmero primo'.format(num))
else:
print ('{} no um nmero primo.'.format(num))
| 37 | 67 | 0.567568 |
090286670227babe5f029e77ab867cf49a3711a6 | 659 | py | Python | invenio_records_lom/records/systemfields/providers.py | martinobersteiner/invenio-records-lom | 545a78eeb056b3c88ed46f7fe345a699bf283895 | [
"MIT"
] | null | null | null | invenio_records_lom/records/systemfields/providers.py | martinobersteiner/invenio-records-lom | 545a78eeb056b3c88ed46f7fe345a699bf283895 | [
"MIT"
] | 18 | 2020-10-21T07:58:14.000Z | 2022-03-29T12:10:25.000Z | invenio_records_lom/records/systemfields/providers.py | martinobersteiner/invenio-records-lom | 545a78eeb056b3c88ed46f7fe345a699bf283895 | [
"MIT"
] | 7 | 2020-10-06T08:46:40.000Z | 2021-07-06T13:21:29.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2021 Graz University of Technology.
#
# invenio-records-lom is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Provider for LOM PID-fields."""
from invenio_drafts_resources.records.api import DraftRecordIdProviderV2
from invenio_pidstore.providers.recordid_v2 import RecordIdProviderV2
| 27.458333 | 80 | 0.760243 |
0902becf0cc85035e9f04cc78c468e09f7880261 | 1,127 | py | Python | discord_base/__manifest__.py | bishalgit/discord-addons | f7f36791734440cd0b37296f5f5132e91035b15f | [
"MIT"
] | 1 | 2020-10-02T23:22:44.000Z | 2020-10-02T23:22:44.000Z | discord_base/__manifest__.py | bishalgit/discord-addons | f7f36791734440cd0b37296f5f5132e91035b15f | [
"MIT"
] | null | null | null | discord_base/__manifest__.py | bishalgit/discord-addons | f7f36791734440cd0b37296f5f5132e91035b15f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Ygen. See LICENSE file for full copyright and licensing details.
{
'name': 'Discord - Base module for discord',
'summary': """
This module is a base module to provide foudation for building discord modules for Odoo.""",
'description': """
This module is a base module to provide foudation for building discord modules for Odoo.""",
'version': '12.0.1.0.0',
'license': 'OPL-1',
'author': 'Bishal Pun, '
'Ygen Software Pvt Ltd',
'website': 'https://ygen.io',
'price': 50.00,
'currency': 'EUR',
'depends': [
'mail',
],
'data': [
'security/discord_security.xml',
'security/ir.model.access.csv',
'data/ir_sequence.xml',
'data/ir_config_parameter.xml',
'data/ir_cron_data.xml',
'views/discord_guild_views.xml',
'views/discord_channel_views.xml',
'views/discord_member_views.xml',
'views/discord_message_views.xml',
'views/discord_menu_views.xml',
],
'installable': True,
'auto_install': False,
'application': True,
} | 32.2 | 100 | 0.598048 |
0903fb75c589ec651b3db5a68d90addf520bf4a1 | 696 | py | Python | app.py | KaceyHirth/Library-DBMS-System | 40b425ed5c7b46627b7c48724b2d20e7a64cf025 | [
"MIT"
] | null | null | null | app.py | KaceyHirth/Library-DBMS-System | 40b425ed5c7b46627b7c48724b2d20e7a64cf025 | [
"MIT"
] | null | null | null | app.py | KaceyHirth/Library-DBMS-System | 40b425ed5c7b46627b7c48724b2d20e7a64cf025 | [
"MIT"
] | null | null | null | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_bootstrap import Bootstrap
from flask_login import LoginManager, UserMixin, login_user, login_required, logout_user, current_user
import os
basePath = os.path.abspath(os.path.dirname(__file__))
template_dir = os.path.join(basePath, 'templates')
app = Flask(__name__, template_folder=template_dir)
app.config['SECRET_KEY'] = 'Thisissupposedtobesecret'
app.config['SQL_TRACK_MODIFICATION'] = False
app.config['SQL_COMMIT_ON_TEARDOWN'] = True
app.config['SQLALCHEMY_DATABASE_URI'] = ''
db = SQLAlchemy(app)
Bootstrap(app)
login_manager = LoginManager()
login_manager.init_app(app)
login_manager.login_view = 'login'
| 27.84 | 102 | 0.806034 |
09042b5650522c004e88f9bb356dd4258fbf0a37 | 78 | py | Python | python_resolutions/beginner/1070.py | DanielYe1/UriResolutions | 7140c4a7f37b95cc15d9c77612c4abde469d379f | [
"Apache-2.0"
] | null | null | null | python_resolutions/beginner/1070.py | DanielYe1/UriResolutions | 7140c4a7f37b95cc15d9c77612c4abde469d379f | [
"Apache-2.0"
] | null | null | null | python_resolutions/beginner/1070.py | DanielYe1/UriResolutions | 7140c4a7f37b95cc15d9c77612c4abde469d379f | [
"Apache-2.0"
] | null | null | null | x = int(input())
for i in range(12):
if (i+x) % 2 ==1:
print(i+x)
| 15.6 | 21 | 0.448718 |
09047cdff4106518ddb7312a4ad2e4fbacd7ac5f | 6,167 | py | Python | xdl/blueprints/chasm2.py | mcrav/xdl | c120a1cf50a9b668a79b118700930eb3d60a9298 | [
"MIT"
] | null | null | null | xdl/blueprints/chasm2.py | mcrav/xdl | c120a1cf50a9b668a79b118700930eb3d60a9298 | [
"MIT"
] | null | null | null | xdl/blueprints/chasm2.py | mcrav/xdl | c120a1cf50a9b668a79b118700930eb3d60a9298 | [
"MIT"
] | null | null | null | from ..constants import JSON_PROP_TYPE
from .base_blueprint import BaseProcedureBlueprint
from ..steps import placeholders
from ..reagents import Reagent
DEFAULT_VESSEL: str = 'reactor'
DEFAULT_SEPARATION_VESSEL: str = 'separator'
DEFAULT_EVAPORATION_VESSEL: str = 'rotavap'
converters = {
'addition1': chasm2_addition,
'addition2': chasm2_addition,
'addition3': chasm2_addition,
'addition4': chasm2_addition,
'addition5': chasm2_addition,
'addition6': chasm2_addition,
'addition7': chasm2_addition,
'addition8': chasm2_addition,
'addition9': chasm2_addition,
'addition10': chasm2_addition,
'reaction': chasm2_reaction,
'separation1': chasm2_separation,
'separation2': chasm2_separation,
'separation3': chasm2_separation,
'separation4': chasm2_separation,
'separation5': chasm2_separation,
'evaporation': chasm2_evaporation,
'purification': chasm2_purification,
}
| 31.304569 | 78 | 0.592995 |
09050f807c744801e59522d4a44d059ae276259e | 570 | py | Python | pandaharvester/harvestercore/plugin_base.py | tsulaiav/harvester | ca3f78348019dd616738f2da7d50e81700a8e6b9 | [
"Apache-2.0"
] | 11 | 2017-06-01T10:16:58.000Z | 2019-11-22T08:41:36.000Z | pandaharvester/harvestercore/plugin_base.py | tsulaiav/harvester | ca3f78348019dd616738f2da7d50e81700a8e6b9 | [
"Apache-2.0"
] | 34 | 2016-10-25T19:15:24.000Z | 2021-03-05T12:59:04.000Z | pandaharvester/harvestercore/plugin_base.py | tsulaiav/harvester | ca3f78348019dd616738f2da7d50e81700a8e6b9 | [
"Apache-2.0"
] | 17 | 2016-10-24T13:29:45.000Z | 2021-03-23T17:35:27.000Z | from future.utils import iteritems
from pandaharvester.harvestercore import core_utils
| 33.529412 | 96 | 0.685965 |
0905ad16307a5af70bde741ea4817b4a93ef0e8a | 1,762 | py | Python | preprocessing/MEG/filtering.py | athiede13/neural_sources | 3435f26a4b99b7f705c7ed6b43ab9c741fdd1502 | [
"MIT"
] | null | null | null | preprocessing/MEG/filtering.py | athiede13/neural_sources | 3435f26a4b99b7f705c7ed6b43ab9c741fdd1502 | [
"MIT"
] | null | null | null | preprocessing/MEG/filtering.py | athiede13/neural_sources | 3435f26a4b99b7f705c7ed6b43ab9c741fdd1502 | [
"MIT"
] | null | null | null | """
Filtering of MEG data
Created on 13.9.2017
@author: Anja Thiede <anja.thiede@helsinki.fi>
"""
import os
from os import walk
import datetime
import numpy as np
import mne
now = datetime.datetime.now()
# set up data paths
root_path = ('/media/cbru/SMEDY_SOURCES/DATA/MEG_prepro/')
f = []
for (dirpath, dirnames, filenames) in walk(root_path):
f.extend(filenames)
break
log_path = root_path+'logs/logs_filt_'+now.strftime("%Y-%m-%d")
log = open(log_path, 'w')
#sub = ['sme_028'] # for testing or filtering single files
i = 0
for subject in dirnames: #sub: #
subject_folder = root_path+subject+'/'
subject_files = os.listdir(subject_folder)
# filt_file_count = processedcount(subject_files)
# if filt_file_count == 2:
# continue
for pieces in subject_files:
if pieces[-11:] == 'ref_ssp.fif':
final_path = subject_folder+pieces
print(final_path)
i = i+1
raw = mne.io.read_raw_fif(final_path, preload=True) # read preprocessed data
# raw.set_eeg_reference()
order = np.arange(raw.info['nchan'])
# filter the data
raw.load_data()
hp = 0.5
lp = 25.0
raw.filter(hp, None, n_jobs=8, method='fir')
# high-pass filter, default hamming window is used
raw.filter(None, lp, n_jobs=8, method='fir') # low-pass filter
fsave = subject_folder+pieces[:-4]+'_filt.fif'
print(fsave)
raw.save(fsave, overwrite=True) # save filtered file to disk
log.write(subject+' processed\n')
log.close()
| 27.53125 | 88 | 0.611805 |
090815c73402b5617ea4e0affb9e020029701833 | 108 | py | Python | solutions/week-1/interval.py | bekbolsky/stepik-python | 91613178ef8401019fab01ad18f10ee84f2f4491 | [
"MIT"
] | 1 | 2022-02-23T09:05:47.000Z | 2022-02-23T09:05:47.000Z | solutions/week-1/interval.py | bekbolsky/stepik-python | 91613178ef8401019fab01ad18f10ee84f2f4491 | [
"MIT"
] | null | null | null | solutions/week-1/interval.py | bekbolsky/stepik-python | 91613178ef8401019fab01ad18f10ee84f2f4491 | [
"MIT"
] | null | null | null | x = int(input())
if (-15 < x <= 12) or (14 < x < 17) or x >= 19:
print("True")
else:
print("False")
| 18 | 47 | 0.472222 |
09098bec23281af47e835daa26b81dccca6d2e2c | 22,972 | py | Python | src/pds_doi_service/core/db/doi_database.py | NASA-PDS/pds-doi-service | b994381a5757700229865e8fe905553559684e0d | [
"Apache-2.0"
] | 2 | 2020-11-03T19:29:11.000Z | 2021-09-26T01:42:41.000Z | src/pds_doi_service/core/db/doi_database.py | NASA-PDS/pds-doi-service | b994381a5757700229865e8fe905553559684e0d | [
"Apache-2.0"
] | 222 | 2020-05-07T21:05:23.000Z | 2021-12-16T22:14:54.000Z | src/pds_doi_service/core/db/doi_database.py | NASA-PDS/pds-doi-service | b994381a5757700229865e8fe905553559684e0d | [
"Apache-2.0"
] | null | null | null | #
# Copyright 202021, by the California Institute of Technology. ALL RIGHTS
# RESERVED. United States Government Sponsorship acknowledged. Any commercial
# use must be negotiated with the Office of Technology Transfer at the
# California Institute of Technology.
#
"""
===============
doi_database.py
===============
Contains classes and functions for interfacing with the local transaction
database (SQLite3).
"""
import sqlite3
from collections import OrderedDict
from datetime import datetime
from datetime import timedelta
from datetime import timezone
from sqlite3 import Error
from pds_doi_service.core.entities.doi import DoiStatus
from pds_doi_service.core.entities.doi import ProductType
from pds_doi_service.core.util.config_parser import DOIConfigUtil
from pds_doi_service.core.util.general_util import get_logger
# Get the common logger and set the level for this file.
logger = get_logger(__name__)
| 36.176378 | 119 | 0.632596 |
0909f5e66b19795a40b888634a2cf23b87f0cd63 | 786 | py | Python | amnesia/modules/search/views/tag.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 4 | 2015-05-08T10:57:56.000Z | 2021-05-17T04:32:11.000Z | amnesia/modules/search/views/tag.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 6 | 2019-12-26T16:43:41.000Z | 2022-02-28T11:07:54.000Z | amnesia/modules/search/views/tag.py | silenius/amnesia | ba5e3ac79a89da599c22206ad1fd17541855f74c | [
"BSD-2-Clause"
] | 1 | 2019-09-23T14:08:11.000Z | 2019-09-23T14:08:11.000Z | # -*- coding: utf-8 -*-
# pylint: disable=E1101
from pyramid.view import view_config
from pyramid.httpexceptions import HTTPNotFound
from amnesia.modules.tag import Tag
from amnesia.modules.search import SearchResource
def includeme(config):
''' Pyramid includeme func'''
config.scan(__name__)
| 23.117647 | 70 | 0.683206 |
090a042fdb172133fc8a7549c6014b2194047447 | 12,410 | py | Python | compute_mainmodes.py | mehrdad-bm/mobility_shift | 242f12b60dc8e07e3da13b5f1199456fd0fd697e | [
"MIT"
] | 1 | 2020-06-24T12:49:49.000Z | 2020-06-24T12:49:49.000Z | compute_mainmodes.py | mehrdad-bm/mobility_shift | 242f12b60dc8e07e3da13b5f1199456fd0fd697e | [
"MIT"
] | null | null | null | compute_mainmodes.py | mehrdad-bm/mobility_shift | 242f12b60dc8e07e3da13b5f1199456fd0fd697e | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 27 00:17:55 2020
@author: mehrdad
"""
import json
import numpy as np
import pandas as pd
import time
import math
#import blist
import tslib.mining
import tslib.common
import tslib.trip_detection
import tslib.trip
STORE_RESULTS = False
#output_folder = './data/output'
#all_modes = {'WALK':0, 'RUN':0, 'BUS': 0, 'TRAM':0, 'RAIL':0, 'FERRY':0,
# 'CAR':0, 'SUBWAY':0, 'BICYCLE':0, 'EBICYCLE':0}
#all_modes_df = pd.DataFrame(data=all_modes.values(), index=all_modes.keys())
#from pyfiles.common.modalchoice import ModalChoice
# ----------------------------------------------------------------------------------------
# -----------------------------------------------------------
# ======================================================
| 40.032258 | 135 | 0.652458 |
090a8d868906c9c97d5b54db58497792e9cd606d | 24,054 | py | Python | envs/env.py | CMU-Light-Curtains/SafetyEnvelopes | e2b32f99437ea36c8b22f97470c5a7f406d3ec78 | [
"BSD-3-Clause"
] | null | null | null | envs/env.py | CMU-Light-Curtains/SafetyEnvelopes | e2b32f99437ea36c8b22f97470c5a7f406d3ec78 | [
"BSD-3-Clause"
] | null | null | null | envs/env.py | CMU-Light-Curtains/SafetyEnvelopes | e2b32f99437ea36c8b22f97470c5a7f406d3ec78 | [
"BSD-3-Clause"
] | null | null | null | from abc import ABC, abstractmethod
import gym
import matplotlib.pyplot as plt
import numpy as np
from pathlib import Path
import time
from setcpp import SmoothnessDPL1Cost, SmoothnessDPPairGridCost, SmoothnessGreedy
import tqdm
from typing import Optional, Tuple, NoReturn
from data.synthia import Frame, append_xyz_to_depth_map
from devices.light_curtain import LCReturn
from lc_planner.planner import PlannerRT
from lc_planner.config import LCConfig
import utils
########################################################################################################################
# region Base Env class
########################################################################################################################
def augment_frame_data(self, frame: Frame) -> NoReturn:
"""Compute the gt safety envelope and add it to the frame"""
se_ranges = self.safety_envelope(frame) # (C,)
se_design_pts = utils.design_pts_from_ranges(se_ranges, self.thetas) # (C, 2)
frame.annos["se_ranges"] = se_ranges
frame.annos["se_design_pts"] = se_design_pts
####################################################################################################################
# region Env API functions
####################################################################################################################
def reset(self,
vid: Optional[int] = None,
start: Optional[int] = None) -> np.ndarray:
"""Resets the state of the environment, returns the initial envelope and also initializes self.intensities.
Args:
vid (int): video id.
start (int): start frame of video.
Returns:
init_envelope (np.ndarray, dtype=np.float32, shape=(C,)): the initial envelope.
"""
raise NotImplementedError
def step(self,
action: Optional[np.ndarray],
score: Optional[float] = None,
get_gt: bool = False) -> Tuple[LCReturn, bool, dict]:
"""
Compute the observations from the current step.
This is derived by placing the light curtain computed from observations in the previous timestep,
in the current frame.
Args:
action (np.ndarray, dtype=np.float32, shape=(C,)): Ranges of the light curtain.
This is optional; if None, then the ground truth action will be used instead (for behavior cloning).
score (Optional[float]): the score of the front curtain that needs to be published.
get_gt (bool): whether to compute gt_action or not
Returns:
observation (LCReturn): agent's observation of the current environment. This is the return from the front
light curtain. Always returns a valid observation, even when end=True.
end (bool): is True for the last valid observation in the episode. No further calls to step() should be
made after a end=True has been returned.
info (dict): Contains auxiliary diagnostic information (helpful for debugging, and sometimes learning).
{
'gt_action' (optional): (np.ndarray, dtype=float32, shape=(C,))
the light curtain placement that should be considered `ground
truth' for the previous timestep. This is what `action' should
ideally be equal to.
'ss_action' (optional): (np.ndarray, dtype=np.float32, shape=(C,))
partial ground truth self-supervision signal generated by random
light curtains. note that the mask is equal to
(ss_action < self.max_range).
}
"""
self.env_step_begin()
info = {}
################################################################################################################
# region Random curtain
################################################################################################################
if self._use_random_curtain:
# place random curtain and move f_curtain to wherever hits are observed
r_curtain, r_hits = self.env_place_r_curtain()
# compute self-supervision signal
# these are the ranges of the random curtain for those camera rays where a hit was observed.
# rays that did not observe a hit are masked out.
ss_action = r_curtain.copy() # (C,)
ss_action[~r_hits] = self.max_range
info['ss_action'] = ss_action
# endregion
################################################################################################################
# region Pre-processing forecasting curtain
################################################################################################################
if action is not None:
# clip curtain between min and max range
f_curtain = action.clip(min=self.min_range, max=self.max_range) # (C,)
if self._use_random_curtain and self._random_curtain_updates_main_curtain:
# update f_curtain by moving it to locations where the random curtain observed returns
# update only those locations where the random curtain detected objects *closer* than the main curtain
r_update_mask = r_hits & (r_curtain < f_curtain) # (C,)
f_curtain[r_update_mask] = r_curtain[r_update_mask] - self._r_recession
# since f_curtain is being updated, self.intensities must also be updated.
# furthermore, the locations of random curtain hits should get the highest priority
self.intensities[r_update_mask] = 1.1
# endregion
################################################################################################################
# region Smoothing forecasting curtain
################################################################################################################
if action is not None:
if self._pp_smoothing == "heuristic_global":
# heuristic smoothing: difference between ranges on consecutive rays shouldn't exceed a threshold
# global optimization: minimizes the sum of L1 differences across all rays using DP
if self._use_random_curtain and self._random_curtain_updates_main_curtain:
# when using random curtains, the cost will be hierarchical:
# (sum of L1 costs over rays in r_update_mask, sum of L1 costs over rays outside r_update_mask)
# this priorities being close to the locations updated by r_curtain more than the other locations.
ranges = np.array(self._smoothnessDPPairGridCost.getRanges(), dtype=np.float32) # (R,)
flat_cost = np.abs(ranges.reshape(-1, 1) - f_curtain) # (R, C)
# hierarchical cost
# - (L1cost, 0): if on ray in r_update_mask
# - (0, L1cost): if on ray outside r_update_mask
pair_cost = np.zeros([len(ranges), self.C, 2], dtype=np.float32) # (R, C, 2)
pair_cost[:, r_update_mask, 0] = flat_cost[:, r_update_mask]
pair_cost[:, ~r_update_mask, 1] = flat_cost[:, ~r_update_mask]
f_curtain = np.array(self._smoothnessDPPairGridCost.smoothedRanges(pair_cost), dtype=np.float32) # (C,)
else:
f_curtain = np.array(self._smoothnessDPL1Cost.smoothedRanges(f_curtain), dtype=np.float32) # (C,)
elif self._pp_smoothing == "heuristic_greedy":
# heuristic smoothing: difference between ranges on consecutive rays shouldn't exceed a threshold
# greedy optimization: greedily smoothes ranges while iterating over rays prioritized by largest weights
f_curtain = np.array(self._smoothnessGreedy.smoothedRanges(f_curtain, self.intensities), dtype=np.float32) # (C,)
elif self._pp_smoothing == "planner_global":
# create L1 cost function
ranges = self.plannerV2.ranges # (R,)
cmap = -np.abs(ranges.reshape(-1, 1) - f_curtain) # (R, C)
design_pts = self.plannerV2.get_design_points(cmap) # (C, 2)
assert design_pts.shape == (self.plannerV2.num_camera_angles, 2)
f_curtain = np.linalg.norm(design_pts, axis=1) # (C,)
else:
raise Exception(f"env.pp_smoothing must be " +
"\"heuristic_global\" or \"heuristic_greedy\" or \"planner_global\"")
# endregion
################################################################################################################
# region GT-action and placing forecasting curtain
################################################################################################################
if (action is None) and (get_gt == False):
raise Exception("Must compute gt_action in behavior cloning")
# the next line gets the ground truth action for the previous timestep
# in the ideal policy, `action' should match this `gt_action'
if get_gt:
info['gt_action'] = self.env_current_gt_action() # (C,)
# if action is set to None (for eg. in behavior cloning), use the ground truth action instead
if action is None:
f_curtain = info['gt_action']
# placing forecasting curtain
obs: LCReturn = self.env_place_f_curtain(f_curtain, score=score)
# the next line updates self.intensities
self.intensities = obs.bev_intensities() / 255.0
# the next line computes `end', which checks whether another env.step() call can be made
end = self.env_end()
time.sleep(0) # interrupt, useful for RealEnv
return obs, end, info
def done(self,
f_curtain: np.ndarray,
se_ranges: np.ndarray) -> bool:
"""
Whether the episode transitions to the terminal state or not.
Done is true when the curtain has moved too far away from the safety envelope on any camera ray i.e.
abs(f_curtain - se_ranges) > (atol + rtol * se_ranges) for any camera ray
Args:
f_curtain (np.ndarray, dtype=float32, shape=(C,)): curtain placement
se_ranges (np.ndarray, dtype=float32, shape=(C,)): ground truth safety envelope.
Returns:
done (bool): whether f_curtain is too far away from se_ranges on any camera ray.
"""
# the next line computes the mask over rays; only these rays should count towards termination
mask = se_ranges < self.max_range # (C,)
f_curtain = f_curtain[mask] # (C',)
se_ranges = se_ranges[mask] # (C',)
# bad_rays = np.abs(f_curtain - se_ranges) > self._atol + self._rtol * se_ranges # (C')
# frac_bad_rays = bad_rays.sum() / mask.sum().clip(min=1)
# return frac_bad_rays >= 0.5
return np.any(np.abs(f_curtain - se_ranges) > self._atol + self._rtol * se_ranges)
# endregion
####################################################################################################################
# region Env-specific helper functions for step()
####################################################################################################################
# endregion
####################################################################################################################
# region Legacy helper functions
####################################################################################################################
def _debug_visualize_curtains(self, f_curtain, r_curtain):
design_pts = utils.design_pts_from_ranges(f_curtain, self.thetas)
x, z = design_pts[:, 0], design_pts[:, 1]
plt.plot(x, z, c='b')
design_pts = utils.design_pts_from_ranges(r_curtain, self.thetas)
x, z = design_pts[:, 0], design_pts[:, 1]
plt.plot(x, z, c='r')
plt.ylim(0, 21)
plt.show()
def _random_curtain(self,
r_type: str = "linear") -> np.ndarray:
"""Computes a random curtain across the entire scene
Args:
r_type (str): type of the random curtain. Options are (1) "uniform", (2) "linear".
Returns:
curtain (np.ndarray, dtype=np.float32, shape=(C,)): range per camera ray that may not correpsond to a
valid curtain.
"""
limits_lo = np.ones(self.C, dtype=np.float32) * 0.5 * self.min_range # (C,)
limits_hi = np.ones(self.C, dtype=np.float32) * self.max_range # (C,)
if r_type == "uniform":
curtain = np.random.uniform(low=limits_lo, high=limits_hi) # (C,)
elif r_type == "linear":
curtain = np.sqrt(np.random.uniform(low=np.square(limits_lo), high=np.square(limits_hi))) # (C,)
else:
raise Exception("r_type must be one of [uniform/linear]")
return curtain
# endregion
####################################################################################################################
# endregion
########################################################################################################################
# region Random curtain generator class
########################################################################################################################
# endregion
########################################################################################################################
| 46.436293 | 130 | 0.547435 |
090ac8191b92a41692dec58a6457de7f58261791 | 17,884 | py | Python | pw_console/py/pw_console/plugins/clock_pane.py | octml/pigweed | e273d46024ef7b5a7c7ec584e4aaada41c541fc4 | [
"Apache-2.0"
] | 86 | 2021-03-09T23:49:40.000Z | 2022-03-30T08:14:51.000Z | pw_console/py/pw_console/plugins/clock_pane.py | octml/pigweed | e273d46024ef7b5a7c7ec584e4aaada41c541fc4 | [
"Apache-2.0"
] | 4 | 2021-07-27T20:32:03.000Z | 2022-03-08T10:39:07.000Z | pw_console/py/pw_console/plugins/clock_pane.py | octml/pigweed | e273d46024ef7b5a7c7ec584e4aaada41c541fc4 | [
"Apache-2.0"
] | 22 | 2021-03-11T15:15:47.000Z | 2022-02-09T06:16:36.000Z | # Copyright 2021 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
"""Example Plugin that displays some dynamic content (a clock) and examples of
text formatting."""
from datetime import datetime
from prompt_toolkit.filters import Condition, has_focus
from prompt_toolkit.formatted_text import (
FormattedText,
HTML,
merge_formatted_text,
)
from prompt_toolkit.key_binding import KeyBindings, KeyPressEvent
from prompt_toolkit.layout import FormattedTextControl, Window, WindowAlign
from prompt_toolkit.mouse_events import MouseEvent, MouseEventType
from pw_console.plugin_mixin import PluginMixin
from pw_console.widgets import ToolbarButton, WindowPane, WindowPaneToolbar
from pw_console.get_pw_console_app import get_pw_console_app
# Helper class used by the ClockPane plugin for displaying dynamic text,
# handling key bindings and mouse input. See the ClockPane class below for the
# beginning of the plugin implementation.
| 41.207373 | 132 | 0.597238 |
090c37be215b0f5ca5159ca3a58646804fc96e15 | 163 | py | Python | workshop_material/superfast/test/test_stuff.py | nrupatunga/pyimageconf2018 | 2f4c83a78206106b50835730749028a03fbbc565 | [
"BSL-1.0"
] | 106 | 2018-08-30T01:45:38.000Z | 2021-06-03T11:05:15.000Z | workshop_material/superfast/test/test_stuff.py | nrupatunga/pyimageconf2018 | 2f4c83a78206106b50835730749028a03fbbc565 | [
"BSL-1.0"
] | 3 | 2019-04-12T02:03:25.000Z | 2019-05-07T00:16:55.000Z | workshop_material/superfast/test/test_stuff.py | nrupatunga/pyimageconf2018 | 2f4c83a78206106b50835730749028a03fbbc565 | [
"BSL-1.0"
] | 36 | 2018-08-30T04:08:31.000Z | 2021-05-18T07:02:10.000Z | import numpy as np
import superfast
| 23.285714 | 55 | 0.656442 |
090c9c265a0fb2fb0dad7a18bd49965eaa38157a | 3,816 | py | Python | mono/model/mono_autoencoder/layers.py | Jenaer/FeatDepth | 64128b03873b27ffa5e99a5cb1712dd8aa15cb0d | [
"MIT"
] | 179 | 2020-08-21T08:57:22.000Z | 2022-03-26T21:55:20.000Z | mono/model/mono_autoencoder/layers.py | sconlyshootery/feature_metric_depth | 550420b3fb51a027549716b74c6fbce41651d3a5 | [
"MIT"
] | 84 | 2020-08-30T14:25:19.000Z | 2022-03-08T12:29:37.000Z | mono/model/mono_autoencoder/layers.py | sconlyshootery/feature_metric_depth | 550420b3fb51a027549716b74c6fbce41651d3a5 | [
"MIT"
] | 31 | 2020-10-01T12:12:19.000Z | 2022-03-06T08:04:18.000Z | from __future__ import absolute_import, division, print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
def compute_depth_errors(gt, pred):
thresh = torch.max((gt / pred), (pred / gt))
a1 = (thresh < 1.25 ).float().mean()
a2 = (thresh < 1.25 ** 2).float().mean()
a3 = (thresh < 1.25 ** 3).float().mean()
rmse = (gt - pred) ** 2
rmse = torch.sqrt(rmse.mean())
rmse_log = (torch.log(gt) - torch.log(pred)) ** 2
rmse_log = torch.sqrt(rmse_log.mean())
abs_rel = torch.mean(torch.abs(gt - pred) / gt)
sq_rel = torch.mean((gt - pred) ** 2 / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3 | 33.473684 | 128 | 0.586478 |
090caec635d034601a9a45a30d2d0ee7c652da16 | 2,413 | py | Python | pdnn/helpers/the_graveyard.py | alamorre/pdnn-experiment | b07b509e8610c324b11aa81204cfca06b8437f16 | [
"BSD-2-Clause-FreeBSD"
] | 17 | 2017-06-14T16:36:12.000Z | 2021-01-31T18:16:10.000Z | pdnn/helpers/the_graveyard.py | alamorre/pdnn-experiment | b07b509e8610c324b11aa81204cfca06b8437f16 | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2018-02-26T16:04:48.000Z | 2018-03-01T06:42:57.000Z | pdnn/helpers/the_graveyard.py | alamorre/pdnn-experiment | b07b509e8610c324b11aa81204cfca06b8437f16 | [
"BSD-2-Clause-FreeBSD"
] | 5 | 2017-09-12T13:20:02.000Z | 2019-02-06T08:41:58.000Z | import numpy as np
from plato.core import as_floatx, create_shared_variable, symbolic, add_update
from theano import tensor as tt
| 29.426829 | 86 | 0.61293 |
090cd84d945d6fe0adc3e503a0af7a8286c3e451 | 5,603 | py | Python | shop/views/cart_views.py | cuescience/cuescience-shop | bf5ea159f9277d1d6ab7acfcad3f2517723a225c | [
"MIT"
] | null | null | null | shop/views/cart_views.py | cuescience/cuescience-shop | bf5ea159f9277d1d6ab7acfcad3f2517723a225c | [
"MIT"
] | null | null | null | shop/views/cart_views.py | cuescience/cuescience-shop | bf5ea159f9277d1d6ab7acfcad3f2517723a225c | [
"MIT"
] | null | null | null | import logging
from cart import Cart
from django.conf import settings
from django.contrib.sites.models import get_current_site
from django.utils import translation
from mailtemplates.models import EMailTemplate
from payment.models import PrePayment
from payment.services.paypal import paypal
from shop.checkout_wizard import condition_step_3, CheckoutWizardBase
from shop.models import Product, Order
from django.http import Http404, HttpResponseNotAllowed
from django.shortcuts import redirect, render_to_response, render
from django.template import RequestContext
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
logger = logging.getLogger(__name__)
class CheckoutWizard(CheckoutWizardBase):
template_name = "cuescience_shop/cart/wizard.html"
| 35.916667 | 116 | 0.641442 |
090e30708a609fe03c64ef91c96c83167bd3b51a | 4,178 | py | Python | niftypad/api/__init__.py | AMYPAD/NiftyPAD | 80bc005ca409f503a8df3a13a071d2f3f413553f | [
"Apache-2.0"
] | null | null | null | niftypad/api/__init__.py | AMYPAD/NiftyPAD | 80bc005ca409f503a8df3a13a071d2f3f413553f | [
"Apache-2.0"
] | 2 | 2021-09-06T21:38:43.000Z | 2021-10-05T11:07:08.000Z | niftypad/api/__init__.py | AMYPAD/NiftyPAD | 80bc005ca409f503a8df3a13a071d2f3f413553f | [
"Apache-2.0"
] | null | null | null | """Clean API"""
import logging
from pathlib import Path
from . import readers
log = logging.getLogger(__name__)
def kinetic_model(src, dst=None, params=None, model='srtmb_basis', input_interp_method='linear',
w=None, r1=1, k2p=0.000250, beta_lim=None, n_beta=40, linear_phase_start=500,
linear_phase_end=None, km_outputs=None, thr=0.1, fig=False):
"""
Args:
src (Path or str): input patient directory or filename
dst (Path or str): output directory (default: `src` directory)
params (Path or str): config (relative to `src` directory)
model (str): any model from `niftypad.models` (see `niftypad.models.NAMES`)
input_interp_method (str): the interpolation method for getting reference input:
linear, cubic, exp_1, exp_2, feng_srtm
w (ndarray): weights for weighted model fitting
r1 (float): a pre-chosen value between 0 and 1 for r1, used in srtmb_asl_basis
k2p (float): a pre-chosen value for k2p, in second^-1, used in
srtmb_k2p_basis, logan_ref_k2p, mrtm_k2p
beta_lim (list[int]): [beta_min, beta_max] for setting the lower and upper limits
of beta values in basis functions, used in srtmb_basis, srtmb_k2p_basis, srtmb_asl_basis
n_beta (int): number of beta values/basis functions, used in
srtmb_basis, srtmb_k2p_basis, srtmb_asl_basis
linear_phase_start (int): start time of linear phase in seconds, used in logan_ref,
logan_ref_k2p, mrtm, mrtm_k2p
linear_phase_end (int): end time of linear phase in seconds, used in logan_ref,
logan_ref_k2p, mrtm, mrtm_k2p
km_outputs (list[str]): the kinetic parameters to save, e.g. ['R1', 'k2', 'BP']
thr (float): threshold value between 0 and 1. Used to mask out voxels with mean value
over time exceeding `thr * max(image value)`
fig (bool): whether to show a figure to check model fitting
"""
import nibabel as nib
import numpy as np
from niftypad import basis
from niftypad.image_process.parametric_image import image_to_parametric
from niftypad.models import get_model_inputs
from niftypad.tac import Ref
src_path = Path(src)
if src_path.is_dir():
fpath = next(src_path.glob('*.nii'))
else:
fpath = src_path
src_path = fpath.parent
log.debug("file:%s", fpath)
if dst is None:
dst_path = src_path
else:
dst_path = Path(dst)
assert dst_path.is_dir()
meta = readers.find_meta(src_path, filter(None, [params, fpath.stem]))
dt = np.asarray(meta['dt'])
ref = np.asarray(meta['ref'])
ref = Ref(ref, dt)
# change ref interpolation to selected method
ref.run_interp(input_interp_method=input_interp_method)
log.debug("looking for first `*.nii` file in %s", src_path)
img = nib.load(fpath)
# pet_image = img.get_fdata(dtype=np.float32)
pet_image = np.asanyarray(img.dataobj)
# basis functions
if beta_lim is None:
beta_lim = [0.01 / 60, 0.3 / 60]
# change ref.inputf1cubic -> ref.input_interp_1
b = basis.make_basis(ref.input_interp_1, dt, beta_lim=beta_lim, n_beta=n_beta, w=w, k2p=k2p)
if km_outputs is None:
km_outputs = ['R1', 'k2', 'BP']
# change ref.inputf1cubic -> ref.input_interp_1
user_inputs = {
'dt': dt, 'ref': ref, 'inputf1': ref.input_interp_1, 'w': w, 'r1': r1, 'k2p': k2p,
'beta_lim': beta_lim, 'n_beta': n_beta, 'b': b, 'linear_phase_start': linear_phase_start,
'linear_phase_end': linear_phase_end, 'fig': fig}
model_inputs = get_model_inputs(user_inputs, model)
# log.debug("model_inputs:%s", model_inputs)
parametric_images_dict, pet_image_fit = image_to_parametric(pet_image, dt, model, model_inputs,
km_outputs, thr=thr)
for kp in parametric_images_dict:
nib.save(nib.Nifti1Image(parametric_images_dict[kp], img.affine),
f"{dst_path / fpath.stem}_{model}_{kp}_{fpath.suffix}")
nib.save(nib.Nifti1Image(pet_image_fit, img.affine),
f"{dst_path / fpath.stem}_{model}_fit_{fpath.suffix}")
| 43.978947 | 99 | 0.66539 |
09112b983864e08dcf3260b85da5bc6f69581ccc | 1,360 | py | Python | cli/src/accretion_cli/_commands/raw/__init__.py | mattsb42/accretion | 7cce5f4ed6d290bd9314b116be91417ded6b0f64 | [
"Apache-2.0"
] | 1 | 2019-10-19T11:18:17.000Z | 2019-10-19T11:18:17.000Z | cli/src/accretion_cli/_commands/raw/__init__.py | mattsb42/accretion | 7cce5f4ed6d290bd9314b116be91417ded6b0f64 | [
"Apache-2.0"
] | 13 | 2019-06-10T07:03:26.000Z | 2019-11-06T01:09:38.000Z | cli/src/accretion_cli/_commands/raw/__init__.py | mattsb42/accretion | 7cce5f4ed6d290bd9314b116be91417ded6b0f64 | [
"Apache-2.0"
] | null | null | null | """Raw CLI commands."""
from typing import IO
import click
from ..._templates import artifact_builder, replication_listener, source_region_core
from ..._util.workers_zip import build_and_write_workers
from .add import add_to_deployment
from .init import init_project
_TEMPLATES = {"builder": artifact_builder, "listener": replication_listener, "core-source": source_region_core}
raw_cli.add_command(add_to_deployment)
raw_cli.add_command(init_project)
| 27.2 | 111 | 0.733088 |
0913490c7e8b8a24f711e0b9a27fb487a58be19f | 77 | py | Python | recipes/stages/_base_/optimizers/adam.py | openvinotoolkit/model_preparation_algorithm | 8d36bf5944837b7a3d22fc2c3a4cb93423619fc2 | [
"Apache-2.0"
] | null | null | null | recipes/stages/_base_/optimizers/adam.py | openvinotoolkit/model_preparation_algorithm | 8d36bf5944837b7a3d22fc2c3a4cb93423619fc2 | [
"Apache-2.0"
] | null | null | null | recipes/stages/_base_/optimizers/adam.py | openvinotoolkit/model_preparation_algorithm | 8d36bf5944837b7a3d22fc2c3a4cb93423619fc2 | [
"Apache-2.0"
] | null | null | null | _base_ = './optimizer.py'
optimizer = dict(
type='Adam',
lr=0.003
)
| 11 | 25 | 0.571429 |
091385b6dc5d31a226660eaa47c59dfda0a2329a | 5,998 | py | Python | .virtual_documents/project_workbook.ipynb.py | manolaz/bordeaux-data-mining-2021-workbook | bdb2b1418d20921878d9d74afcb6eac54c474061 | [
"MIT"
] | null | null | null | .virtual_documents/project_workbook.ipynb.py | manolaz/bordeaux-data-mining-2021-workbook | bdb2b1418d20921878d9d74afcb6eac54c474061 | [
"MIT"
] | null | null | null | .virtual_documents/project_workbook.ipynb.py | manolaz/bordeaux-data-mining-2021-workbook | bdb2b1418d20921878d9d74afcb6eac54c474061 | [
"MIT"
] | null | null | null | from IPython.display import display
from IPython.display import HTML
import IPython.core.display as di
# This line will hide code by default when the notebook is exported as HTML
di.display_html('<script>jQuery(function() {if (jQuery("body.notebook_app").length == 0) { jQuery(".input_area").toggle(); jQuery(".prompt").toggle();}});</script>', raw=True)
# This line will add a button to toggle visibility of code blocks, for use with the HTML export version
di.display_html('''<button onclick="jQuery('.input_area').toggle(); jQuery('.prompt').toggle();">Show/hide code</button>''', raw=True)
di.display_html("""
<style>
#customers {
font-family: "Trebuchet MS", Arial, Helvetica, sans-serif;
border-collapse: collapse;
width: 100get_ipython().run_line_magic(";", "")
}
#customers td, #customers th {
border: 1px solid #ddd;
padding: 8px;
text-align: center;
}
.content:nth-child(even){background-color: #f2f2f2;}
.content:hover{background-color:#C7C9C7;}
#customers th {
padding-top: 12px;
padding-bottom: 12px;
text-align: center;
color: white;
}
.first{
background-color: #4B6D80;
font-size:20px;
}
.second{
background-color: #71A4BF;
}
.third{
background-color: #B1D0E8;
color: white;
}
#customers a {
color: black;
padding: 10px 20px;
text-align: center;
text-decoration: none;
text-decoration-line: none;
text-decoration-style: solid;
text-decoration-color: currentcolor;
text-decoration-thickness: auto;
display: inline-block;
font-size: 16px;
margin-left: 20px;
}
</style>
""", raw=True)
di.display_html("""
<table id="customers">
<thead class="first">
<th colspan=5>Table of contents</th>
<tbody>
<tr>
<td colspan=5 class="cell"><a href='#Importing-Require'>Importing Require Libraries"</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#DataLoad'>Load</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#DataInsights'>Exploration Data - Data Insights</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#SummaryStatistics'>Exploration Data - Summary Statistics</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#DataLoad'>Data Cleaning</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#DataVisualization'>Data Visualization</a></td>
</tr>
<tr>
<td class="cell"><a href='#missing-value'>check missing values</a></td>
<td class="cell"><a href='#correlation'>correlation</a></td>
<td class="cell"><a href='#'>Correlation Heat Maps - Seaborn</a></td>
<td class="cell"><a href='#Outliers'>Outliers</a></td>
<td class="cell"><a href='#distribution-Skewness'>distribution-Skewness</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#Prediction'>Prediction Age and pay - Linear Regression</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#Comments-on-results'>Comments on results</a></td>
</tr>
<tr>
<td colspan=5 class="cell"><a href='#References'>References</a></td>
</tr>
</tbody>
</table>
""", raw=True)
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
import statsmodels.api as sm # Predict
import statsmodels.formula.api as smf #Predict
from sklearn import datasets, linear_model #Learn
from sklearn.metrics import mean_squared_error #Learn
get_ipython().run_line_magic("matplotlib", " inline")
df = pd.read_csv('dataset/credit_cards_dataset.csv',sep=',')
df.head()
df.shape
df.columns.values
df.info()
df.describe()
df.AGE.unique()
df.LIMIT_BAL.unique()
df.MARRIAGE.value_counts()
# - This tells us count of each MARRIAGE score in descending order.
# - "MARRIAGE" has most values concentrated in the categories 2, 1 .
# - Only a few observations made for the categories 3 & 0
## DATA CLEANING
### On the Dataset description , we don't have "MARRIAGE Status" = 0, so we need to clean up these values
df = df.loc[df["MARRIAGE"].isin([1,2])]
df
# Data Visualization
sns.heatmap(df.isnull(),cbar=False,yticklabels=False,cmap = 'viridis')
plt.figure(figsize=(6,4))
sns.heatmap(df.corr(),cmap='Blues',annot=False)
plt.figure(figsize=(6,4))
sns.heatmap(df.corr(),cmap='Blues',annot=True)
#Quality correlation matrix
k = 12 #number of variables for heatmap
cols = df.corr().nlargest(k, 'LIMIT_BAL')['LIMIT_BAL'].index
cm = df[cols].corr()
plt.figure(figsize=(10,6))
sns.heatmap(cm, annot=True, cmap = 'viridis')
l = df.columns.values
number_of_columns=12
number_of_rows = len(l)-1/number_of_columns
plt.figure(figsize=(number_of_columns,5*number_of_rows))
for i in range(0,len(l)):
plt.subplot(number_of_rows + 1,number_of_columns,i+1)
sns.set_style('whitegrid')
sns.boxplot(df[l[i]],color='green',orient='v')
plt.tight_layout()
plt.figure(figsize=(2*number_of_columns,5*number_of_rows))
for i in range(0,len(l)):
plt.subplot(number_of_rows + 1,number_of_columns,i+1)
sns.distplot(df[l[i]],kde=True)
from sklearn.model_selection import train_test_split
train, test = train_test_split(df, test_size=0.2, random_state=4)
results1 = smf.ols('AGE ~ PAY_0 + PAY_2 + PAY_3 + PAY_4 ', data=df).fit()
print(results1.summary())
y = train["AGE"]
cols = ["PAY_0","PAY_2","PAY_3","PAY_4"]
X=train[cols]
regr = linear_model.LinearRegression()
regr.fit(X,y)
ytrain_pred = regr.predict(X)
print("In-sample Mean squared error: get_ipython().run_line_magic(".2f"", "")
% mean_squared_error(y, ytrain_pred))
ytest = test["AGE"]
cols = ["PAY_0","PAY_2","PAY_3","PAY_4"]
Xtest=test[cols]
ypred = regr.predict(Xtest)
print("Out-of-sample Mean squared error: get_ipython().run_line_magic(".2f"", "")
% mean_squared_error(ytest, ypred))
| 24.283401 | 175 | 0.649883 |
0913a064947d243f614c619b7153c8fd3f692bd6 | 228 | py | Python | benders-decomposition/src/input/facility.py | grzegorz-siekaniec/benders-decomposition-gurobi | 5435e82c7ef4fe14fc53ff07b8eaa1516208b57c | [
"MIT"
] | 6 | 2021-05-31T10:23:18.000Z | 2022-02-15T08:45:30.000Z | benders-decomposition/src/input/facility.py | grzegorz-siekaniec/benders-decomposition-gurobi | 5435e82c7ef4fe14fc53ff07b8eaa1516208b57c | [
"MIT"
] | null | null | null | benders-decomposition/src/input/facility.py | grzegorz-siekaniec/benders-decomposition-gurobi | 5435e82c7ef4fe14fc53ff07b8eaa1516208b57c | [
"MIT"
] | null | null | null | from dataclasses import dataclass
from typing import Dict
| 19 | 56 | 0.723684 |
0913ad0ca10d6662285de519bc20eb0bf251e5c6 | 3,834 | py | Python | B2G/gecko/toolkit/crashreporter/client/certdata2pem.py | wilebeast/FireFox-OS | 43067f28711d78c429a1d6d58c77130f6899135f | [
"Apache-2.0"
] | 3 | 2015-08-31T15:24:31.000Z | 2020-04-24T20:31:29.000Z | B2G/gecko/toolkit/crashreporter/client/certdata2pem.py | wilebeast/FireFox-OS | 43067f28711d78c429a1d6d58c77130f6899135f | [
"Apache-2.0"
] | null | null | null | B2G/gecko/toolkit/crashreporter/client/certdata2pem.py | wilebeast/FireFox-OS | 43067f28711d78c429a1d6d58c77130f6899135f | [
"Apache-2.0"
] | 3 | 2015-07-29T07:17:15.000Z | 2020-11-04T06:55:37.000Z | #!/usr/bin/python
# vim:set et sw=4:
#
# Originally from:
# http://cvs.fedoraproject.org/viewvc/F-13/ca-certificates/certdata2pem.py?revision=1.1&content-type=text%2Fplain&view=co
#
# certdata2pem.py - converts certdata.txt into PEM format.
#
# Copyright (C) 2009 Philipp Kern <pkern@debian.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301,
# USA.
import base64
import os.path
import re
import sys
import textwrap
objects = []
# Dirty file parser.
in_data, in_multiline, in_obj = False, False, False
field, type, value, obj = None, None, None, dict()
for line in sys.stdin:
# Ignore the file header.
if not in_data:
if line.startswith('BEGINDATA'):
in_data = True
continue
# Ignore comment lines.
if line.startswith('#'):
continue
# Empty lines are significant if we are inside an object.
if in_obj and len(line.strip()) == 0:
objects.append(obj)
obj = dict()
in_obj = False
continue
if len(line.strip()) == 0:
continue
if in_multiline:
if not line.startswith('END'):
if type == 'MULTILINE_OCTAL':
line = line.strip()
for i in re.finditer(r'\\([0-3][0-7][0-7])', line):
value += chr(int(i.group(1), 8))
else:
value += line
continue
obj[field] = value
in_multiline = False
continue
if line.startswith('CKA_CLASS'):
in_obj = True
line_parts = line.strip().split(' ', 2)
if len(line_parts) > 2:
field, type = line_parts[0:2]
value = ' '.join(line_parts[2:])
elif len(line_parts) == 2:
field, type = line_parts
value = None
else:
raise NotImplementedError, 'line_parts < 2 not supported.'
if type == 'MULTILINE_OCTAL':
in_multiline = True
value = ""
continue
obj[field] = value
if len(obj.items()) > 0:
objects.append(obj)
# Build up trust database.
trust = dict()
for obj in objects:
if obj['CKA_CLASS'] != 'CKO_NETSCAPE_TRUST':
continue
# For some reason, OpenSSL on Maemo has a bug where if we include
# this certificate, and it winds up as the last certificate in the file,
# then OpenSSL is unable to verify the server certificate. For now,
# we'll just omit this particular CA cert, since it's not one we need
# for crash reporting.
# This is likely to be fragile if the NSS certdata.txt changes.
# The bug is filed upstream:
# https://bugs.maemo.org/show_bug.cgi?id=10069
if obj['CKA_LABEL'] == '"ACEDICOM Root"':
continue
# We only want certs that are trusted for SSL server auth
if obj['CKA_TRUST_SERVER_AUTH'] == 'CKT_NETSCAPE_TRUSTED_DELEGATOR':
trust[obj['CKA_LABEL']] = True
for obj in objects:
if obj['CKA_CLASS'] == 'CKO_CERTIFICATE':
if not obj['CKA_LABEL'] in trust or not trust[obj['CKA_LABEL']]:
continue
sys.stdout.write("-----BEGIN CERTIFICATE-----\n")
sys.stdout.write("\n".join(textwrap.wrap(base64.b64encode(obj['CKA_VALUE']), 64)))
sys.stdout.write("\n-----END CERTIFICATE-----\n\n")
| 34.232143 | 121 | 0.639541 |
0913cfe9fc0c893e99396e709cf82a81dfd1ee9b | 56 | py | Python | pandaharvester/commit_timestamp.py | PalNilsson/harvester | dab4f388c6d1f33291b44c1a8d656d210330e767 | [
"Apache-2.0"
] | null | null | null | pandaharvester/commit_timestamp.py | PalNilsson/harvester | dab4f388c6d1f33291b44c1a8d656d210330e767 | [
"Apache-2.0"
] | null | null | null | pandaharvester/commit_timestamp.py | PalNilsson/harvester | dab4f388c6d1f33291b44c1a8d656d210330e767 | [
"Apache-2.0"
] | null | null | null | timestamp = "02-03-2022 13:12:15 on flin (by mightqxc)"
| 28 | 55 | 0.696429 |
0914e1a28a157c416a8a2c605a43e0263d7aefd4 | 701 | py | Python | distil/utils/config_helper.py | ansunsujoe/distil | cf6cae2b88ef129d09c159aae0569978190e9f98 | [
"MIT"
] | 83 | 2021-01-06T06:50:30.000Z | 2022-03-31T05:16:32.000Z | distil/utils/config_helper.py | ansunsujoe/distil | cf6cae2b88ef129d09c159aae0569978190e9f98 | [
"MIT"
] | 30 | 2021-02-27T06:09:47.000Z | 2021-12-23T11:03:36.000Z | distil/utils/config_helper.py | ansunsujoe/distil | cf6cae2b88ef129d09c159aae0569978190e9f98 | [
"MIT"
] | 13 | 2021-03-05T18:26:58.000Z | 2022-03-12T01:53:17.000Z | import json
import os
def read_config_file(filename):
"""
Loads and returns a configuration from the supplied filename / path.
Parameters
----------
filename: string
The name/path of the config file to load.
Returns
----------
config: object
The resulting configuration laoded from the JSON file
"""
print(filename.split('.')[-1])
if filename.split('.')[-1] not in ['json']:
raise IOError('Only json type are supported now!')
if not os.path.exists(filename):
raise FileNotFoundError('Config file does not exist!')
with open(filename, 'r') as f:
config = json.load(f)
return config | 24.172414 | 72 | 0.600571 |
0915536721dec4fcf77cccd8a1e6caa20567b01f | 1,944 | py | Python | Easy/26.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | 6 | 2017-09-25T18:05:50.000Z | 2019-03-27T00:23:15.000Z | Easy/26.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | 1 | 2017-10-29T12:04:41.000Z | 2018-08-16T18:00:37.000Z | Easy/26.py | Hellofafar/Leetcode | 7a459e9742958e63be8886874904e5ab2489411a | [
"CNRI-Python"
] | null | null | null | # ------------------------------
# 26. Remove Duplicates from Sorted Array
#
# Description:
# Given a sorted array, remove the duplicates in place such that each element appear only once and return the new length.
# Do not allocate extra space for another array, you must do this in place with constant memory.
#
# For example,
# Given input array nums = [1,1,2],
#
# Your function should return length = 2, with the first two elements of nums being 1 and 2 respectively. It doesn't matter what you leave beyond the new length.
#
# Version: 1.0
# 09/17/17 by Jianfa
# ------------------------------
# Used for test
if __name__ == "__main__":
test = Solution()
nums = [1,1,1,2,3,4,4,4,4]
print(test.removeDuplicates(nums))
# ------------------------------
# Good idea from other solution:
# Actually there is no need to really remove value from the list. As the last sentence said
# "It doesn't matter what you leave beyond the new length." So we can just modify the first several
# numbers which is the length of unique values, but leave other values behind unchanged. We set two
# runner: a fast runner and a slow runner. As long as a different value is met, modify the corresponding
# value in position of slow runner, otherwise move the fast runner.
# Here is a link for reference:
# https://leetcode.com/problems/remove-duplicates-from-sorted-array/solution/ | 34.714286 | 161 | 0.587963 |
091779d4a3220139baaa8d0f21ee1690811fd3bf | 197 | py | Python | enote/__init__.py | tkjacobsen/enote | b8150885733599016b2d0b1d36f03e62ca8e3cdc | [
"MIT"
] | 16 | 2015-04-30T22:36:57.000Z | 2021-04-29T16:38:17.000Z | enote/__init__.py | tkjacobsen/enote | b8150885733599016b2d0b1d36f03e62ca8e3cdc | [
"MIT"
] | 1 | 2017-02-18T18:42:31.000Z | 2017-02-18T18:48:47.000Z | enote/__init__.py | tkjacobsen/enote | b8150885733599016b2d0b1d36f03e62ca8e3cdc | [
"MIT"
] | 2 | 2017-06-03T08:00:28.000Z | 2017-07-15T16:50:47.000Z | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright (c) 2016 Troels Agergaard Jacobsen
__version__ = '0.2.0'
__description__ = 'Command line utility to backup Evernote notes and notebooks.'
| 32.833333 | 80 | 0.725888 |
0919fdf2ce825bd818ddbc07ef5fd14d15e3d623 | 6,373 | py | Python | ncpsort/cluster_synthetic_data/inference_plot_synthetic.py | yueqiw/ncp-sort | 045361d93bc9d8ef2596cdda7c485b6ffd77dd81 | [
"MIT"
] | 2 | 2019-08-06T10:10:37.000Z | 2020-09-30T12:11:28.000Z | ncpsort/cluster_synthetic_data/inference_plot_synthetic.py | yueqiw/ncp-sort | 045361d93bc9d8ef2596cdda7c485b6ffd77dd81 | [
"MIT"
] | 1 | 2021-04-14T12:09:02.000Z | 2021-07-19T04:06:05.000Z | ncpsort/cluster_synthetic_data/inference_plot_synthetic.py | yueqiw/ncp-sort | 045361d93bc9d8ef2596cdda7c485b6ffd77dd81 | [
"MIT"
] | null | null | null |
"""Plot clustered spikes
Usage:
python ncpsort.cluster_synthetic_data.inference_plot_synthetic \
--inference_dir ./inference_synthetic_N-1000/cluster_S-150-beam_NCP-10000 \
--min_cls_size 50 --plot_type overlay
or --inference_dir --min_cls_size 50 --plot_type tsne
"""
import numpy as np
import torch
import time
import json
import argparse
import os
from ncpsort.utils.spike_utils import get_chan_nbrs, select_template_channels, template_window
from ncpsort.utils.plotting import DEFAULT_COLORS
from ncpsort.utils.plotting import plot_spike_clusters_and_gt_in_rows
from ncpsort.utils.plotting import plot_spike_clusters_and_templates_overlay
from ncpsort.utils.plotting import plot_raw_and_encoded_spikes_tsne
parser = argparse.ArgumentParser(description='Plot inference results.')
parser.add_argument('--inference_dir', type=str)
parser.add_argument('--min_cls_size', type=int, default=0)
parser.add_argument('--topn', type=int, default=1)
parser.add_argument('--plot_mfm', action="store_const", const=True, default=False)
parser.add_argument('--plot_type', type=str, default="overlay")
if __name__ == "__main__":
args = parser.parse_args()
do_corner_padding = True
output_dir = args.inference_dir
with open(os.path.join(output_dir, "infer_params.json"), "r") as f:
infer_params = json.load(f)
min_cls_size = args.min_cls_size
templates = None
templates_use = None
templates_name = None
infer_params['nbr_dist'] = 70
infer_params['n_nbr'] = 7
print("parameters:\n", json.dumps(infer_params, indent=2))
geom = np.array([
[-585.0, 270.0],
[-645.0, 270.0],
[-525.0, 270.0],
[-615.0, 210.0],
[-555.0, 210.0],
[-615.0, 330.0],
[-555.0, 330.0]]
)
chans_with_nbrs, chan_to_nbrs = get_chan_nbrs(geom, infer_params['nbr_dist'], infer_params['n_nbr'], keep_less_nbrs=False)
print("{} channels used:".format(len(chans_with_nbrs)))
print(chans_with_nbrs)
topn = args.topn
data_dir = os.path.join(output_dir, "data_ncp")
# fig_dir_by_row = os.path.join(output_dir, "figures_by_row")
# if not os.path.isdir(fig_dir_by_row): os.mkdir(fig_dir_by_row)
fig_dir_overlay = os.path.join(output_dir, "figs_overlay_min-cls-{}_temp-{}".format(min_cls_size, templates_name))
if not os.path.isdir(fig_dir_overlay): os.mkdir(fig_dir_overlay)
fig_dir_vert_overlay = os.path.join(output_dir, "figs_overlay_vertical_min-cls-{}_temp-{}".format(min_cls_size, templates_name))
if not os.path.isdir(fig_dir_vert_overlay): os.mkdir(fig_dir_vert_overlay)
if args.plot_mfm:
mfm_dir = os.path.join(infer_params['data_name'], "cluster_mfm", "data_mfm")
input_dir = infer_params['data_name']
fnames_list = [x.rstrip(".npz") for x in os.listdir(os.path.join(input_dir, "data_input")) if x.endswith(".npz")]
fnames_list = sorted(fnames_list)
for fname in fnames_list:
if args.plot_mfm:
mfm_fname = [x for x in os.listdir(mfm_dir) if fname in x and x.endswith(".npy")]
mfm_fname = mfm_fname[0].rstrip(".npy")
npy_fname = os.path.join(mfm_dir, "{}.npy".format(mfm_fname))
mfm_clusters = np.load(npy_fname)
mfm_name = "MFM"
else:
mfm_clusters = None
mfm_name = None
print("Plotting {}:".format(fname))
npz_fname = os.path.join(data_dir, "{}_ncp.npz".format(fname))
npz = np.load(npz_fname)
clusters, nll, data_arr, gt_labels = npz['clusters'], npz['nll'], npz['data_arr'], npz['gt_labels']
# plot_spike_clusters_and_gt_in_rows(
# css, nll, data_arr, gt_labels, topn=topn,
# figdir=fig_dir_by_row, fname_postfix=fname,
# plot_params={"spacing":1.25, "width":0.9, "vscale":1.5, "subplot_adj":0.9},
# downsample=3)
temp_in_ch = None
templates_name = "{} templates".format(templates_name) if templates_name else None
nbr_channels = np.arange(len(geom))
if args.plot_type == 'overlay':
plot_spike_clusters_and_templates_overlay(
clusters, nll, data_arr, geom, nbr_channels, DEFAULT_COLORS, topn=topn,
extra_clusters=mfm_clusters, extra_name=mfm_name, gt_labels=gt_labels,
min_cls_size=min_cls_size, templates=temp_in_ch, template_name=templates_name,
figdir=fig_dir_overlay, fname_postfix=fname, size_single=(9,6),
plot_params={"time_scale":1.1, "scale":8., "alpha_overlay":0.1})
n_ch = len(nbr_channels)
vertical_geom = np.stack([np.zeros(n_ch), - np.arange(n_ch) * 12 * 7]).T
plot_spike_clusters_and_templates_overlay(
clusters, nll, data_arr, vertical_geom, np.arange(n_ch), DEFAULT_COLORS, topn=topn,
extra_clusters=mfm_clusters, extra_name=mfm_name, gt_labels=gt_labels,
min_cls_size=min_cls_size, templates=temp_in_ch, template_name=templates_name,
figdir=fig_dir_vert_overlay, fname_postfix=fname, size_single=(2.5,18), vertical=True,
plot_params={"time_scale":1.1, "scale":8., "alpha_overlay":0.1})
elif args.plot_type == 'tsne':
fig_dir_tsne = os.path.join(output_dir, "figs_tsne_min-cls-{}".format(min_cls_size))
if not os.path.isdir(fig_dir_tsne): os.mkdir(fig_dir_tsne)
tsne_dir = os.path.join(infer_params['data_name'], "spike_encoder_it-18600/data_encoder")
fname = [x for x in os.listdir(tsne_dir) if fname in x and x.endswith(".npz")]
data_encoded = np.load(os.path.join(tsne_dir, "{}".format(fname[0])))
data_encoded = data_encoded['encoded_spikes']
fname = fname[0].rstrip("_encoded_spikes.npz")
plot_raw_and_encoded_spikes_tsne(
clusters, nll, data_arr, data_encoded, DEFAULT_COLORS, topn=topn,
extra_clusters=mfm_clusters, extra_name=mfm_name, gt_labels=gt_labels,
min_cls_size=min_cls_size, sort_by_count=True,
figdir=fig_dir_tsne, fname_postfix=fname, size_single=(6,6),
tsne_params={'seed': 0, 'perplexity': 30},
plot_params={'pt_scale': 1}, show=False
)
| 45.198582 | 132 | 0.661541 |