hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
fd349e3352814cffd9d5b6c0c4f84624bb4c6bc6 | 1,868 | py | Python | app/services/aggregator/aggr.py | maestro-server/report-app | 0bf9014400f2979c51c1c544347d5134c73facdf | [
"Apache-2.0"
] | 1 | 2020-05-19T20:18:05.000Z | 2020-05-19T20:18:05.000Z | app/services/aggregator/aggr.py | maestro-server/report-app | 0bf9014400f2979c51c1c544347d5134c73facdf | [
"Apache-2.0"
] | 2 | 2019-10-21T14:56:04.000Z | 2020-03-27T12:48:26.000Z | app/services/aggregator/aggr.py | maestro-server/report-app | 0bf9014400f2979c51c1c544347d5134c73facdf | [
"Apache-2.0"
] | null | null | null | import pandas as pd
from pydash.objects import get
| 23.64557 | 77 | 0.556745 |
fd375dd0458ad06acf734748313eaba69b115090 | 310 | py | Python | email.py | zhaoxiaoyunok/python-library-fuzzers | 83db496f75280795415821097802a96fbf72f50f | [
"MIT"
] | 4 | 2019-07-03T06:01:08.000Z | 2019-07-29T08:07:54.000Z | email.py | zhaoxiaoyunok/python-library-fuzzers | 83db496f75280795415821097802a96fbf72f50f | [
"MIT"
] | null | null | null | email.py | zhaoxiaoyunok/python-library-fuzzers | 83db496f75280795415821097802a96fbf72f50f | [
"MIT"
] | 4 | 2019-07-03T03:24:56.000Z | 2021-12-11T12:30:31.000Z | from email.parser import BytesParser, Parser
from email.policy import default, HTTP
| 28.181818 | 81 | 0.696774 |
fd37bfae595f1bdb3cabeb62c9beddb408f90236 | 315 | py | Python | tests/conftest.py | calpt/flask-filealchemy | b3575299f0230d5a64865af8066122c2e0c485ec | [
"MIT"
] | 16 | 2018-10-16T03:32:39.000Z | 2020-09-04T02:05:37.000Z | tests/conftest.py | calpt/flask-filealchemy | b3575299f0230d5a64865af8066122c2e0c485ec | [
"MIT"
] | 8 | 2019-02-25T10:59:15.000Z | 2019-03-11T08:36:57.000Z | tests/conftest.py | calpt/flask-filealchemy | b3575299f0230d5a64865af8066122c2e0c485ec | [
"MIT"
] | 3 | 2019-11-22T23:46:16.000Z | 2020-06-05T19:17:23.000Z | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import pytest
| 18.529412 | 64 | 0.742857 |
fd3bd480b9c0a1b8e0dc9e02d722d288943bec44 | 357 | py | Python | DataStructuresAndAlgorithms/sorting algorithms/SelectionSort.py | armaan2k/Training-Exercises | 6dd94efb6cd6e0dc6c24e2b7d5e74588a74d190d | [
"MIT"
] | null | null | null | DataStructuresAndAlgorithms/sorting algorithms/SelectionSort.py | armaan2k/Training-Exercises | 6dd94efb6cd6e0dc6c24e2b7d5e74588a74d190d | [
"MIT"
] | null | null | null | DataStructuresAndAlgorithms/sorting algorithms/SelectionSort.py | armaan2k/Training-Exercises | 6dd94efb6cd6e0dc6c24e2b7d5e74588a74d190d | [
"MIT"
] | null | null | null |
A = [3, 5, 8, 9, 6, 2]
print('Original Array: ', A)
selection_sort(A)
print('Sorted Array: ', A)
| 21 | 34 | 0.473389 |
fd3cb5b3e208b581d3cf014077d7e88f0727e79e | 3,465 | py | Python | books/models.py | MattRijk/finance-ebook-site | c564d4bc9578f0a6f47efa53f0c81893fbee08f7 | [
"MIT"
] | 1 | 2020-05-16T12:48:02.000Z | 2020-05-16T12:48:02.000Z | books/models.py | MattRijk/finance-ebook-site | c564d4bc9578f0a6f47efa53f0c81893fbee08f7 | [
"MIT"
] | null | null | null | books/models.py | MattRijk/finance-ebook-site | c564d4bc9578f0a6f47efa53f0c81893fbee08f7 | [
"MIT"
] | 3 | 2017-12-06T11:18:10.000Z | 2020-05-16T12:49:32.000Z | from django.db import models
from django.core.urlresolvers import reverse
from django.utils import timezone
from django.utils.text import slugify
| 33 | 72 | 0.610967 |
fd3e69529a275a604f79403141c9d3a32f7b625b | 341 | py | Python | bucketlist_django/bucketlist_django/settings/development.py | andela-tadesanya/django-bucketlist-application | 315ceb77e635fe051b5600ada460af938c140af1 | [
"MIT"
] | null | null | null | bucketlist_django/bucketlist_django/settings/development.py | andela-tadesanya/django-bucketlist-application | 315ceb77e635fe051b5600ada460af938c140af1 | [
"MIT"
] | null | null | null | bucketlist_django/bucketlist_django/settings/development.py | andela-tadesanya/django-bucketlist-application | 315ceb77e635fe051b5600ada460af938c140af1 | [
"MIT"
] | null | null | null | # load defaults and override with devlopment settings
from defaults import *
DEBUG = True
WSGI_APPLICATION = 'bucketlist_django.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'bucketlist',
'USER': 'bucketlist',
'PASSWORD': 'bucketlist'
}
}
| 21.3125 | 59 | 0.653959 |
fd45eaa50a88cd4355e2753ab9dc9b6e727d52ec | 1,198 | py | Python | storyboard/tests/plugin/scheduler/test_base.py | Sitcode-Zoograf/storyboard | 5833f87e20722c524a1e4a0b8e1fb82206fb4e5c | [
"Apache-2.0"
] | null | null | null | storyboard/tests/plugin/scheduler/test_base.py | Sitcode-Zoograf/storyboard | 5833f87e20722c524a1e4a0b8e1fb82206fb4e5c | [
"Apache-2.0"
] | null | null | null | storyboard/tests/plugin/scheduler/test_base.py | Sitcode-Zoograf/storyboard | 5833f87e20722c524a1e4a0b8e1fb82206fb4e5c | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2015 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
from storyboard.plugin.scheduler.base import SchedulerPluginBase
import storyboard.tests.base as base
| 29.219512 | 78 | 0.717863 |
fd464df7fbbebbe26bc4d827bb8cf980aecbe03a | 13,019 | py | Python | src/model/build_models.py | VinGPan/classification_model_search | fab7ce6fc131b858f1b79633e0f7b86d1446c93d | [
"MIT"
] | null | null | null | src/model/build_models.py | VinGPan/classification_model_search | fab7ce6fc131b858f1b79633e0f7b86d1446c93d | [
"MIT"
] | null | null | null | src/model/build_models.py | VinGPan/classification_model_search | fab7ce6fc131b858f1b79633e0f7b86d1446c93d | [
"MIT"
] | null | null | null | import os
import os.path
import pickle
from shutil import copyfile
import numpy as np
import pandas as pd
import xgboost as xgb
from sklearn.decomposition import KernelPCA
from sklearn.decomposition import PCA
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.exceptions import ConvergenceWarning
from sklearn.externals import joblib
from sklearn.impute import SimpleImputer
from sklearn.linear_model import LogisticRegression, LinearRegression
from sklearn.manifold import Isomap
from sklearn.manifold import LocallyLinearEmbedding
from sklearn.metrics import accuracy_score, balanced_accuracy_score, r2_score, mean_absolute_error, mean_squared_error
from sklearn.metrics import make_scorer
from sklearn.model_selection import GridSearchCV
from sklearn.naive_bayes import GaussianNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.svm import SVC
from sklearn.utils.testing import ignore_warnings
from src.model.utils import makedir
from src.utils.logging import logger
#####################################################################
# HERE IS LIST OF VARIES LIBRARIES WE STUDIED DURING SCS_3253_024 Machine Learning COURSE that are
# relevant to classification problem. We will tray use as many ideas as possible for this project.
#####################################################################
| 47.688645 | 118 | 0.541132 |
fd4aa50f6950d8285cebb403be0898f64adbb857 | 2,495 | py | Python | gleague/gleague/frontend/seasons.py | Nuqlear/genkstaleague | 664ed1d3ebea9c43053546fc2d658083cc16526b | [
"MIT"
] | 7 | 2015-08-18T01:21:48.000Z | 2021-04-30T03:10:38.000Z | gleague/gleague/frontend/seasons.py | Nuqlear/genkstaleague | 664ed1d3ebea9c43053546fc2d658083cc16526b | [
"MIT"
] | 1 | 2019-04-28T10:02:39.000Z | 2019-05-06T08:11:56.000Z | gleague/gleague/frontend/seasons.py | Nuqlear/genkstaleague | 664ed1d3ebea9c43053546fc2d658083cc16526b | [
"MIT"
] | 3 | 2015-08-14T09:42:25.000Z | 2018-11-08T07:07:58.000Z | from flask import Blueprint
from flask import abort
from flask import render_template
from flask import request
from flask import current_app
from sqlalchemy import desc
from gleague.core import db
from gleague.models import Match
from gleague.models import Season
from gleague.models import SeasonStats
from gleague.models.queries import season_analytic
seasons_bp = Blueprint("seasons", __name__)
| 34.652778 | 83 | 0.710621 |
fd4d6ed01b3decd5927f1d836a338350d16f500c | 941 | py | Python | LC_problems/822.py | Howardhuang98/Blog | cf58638d6d0bbf55b95fe08e43798e7dd14219ac | [
"MIT"
] | null | null | null | LC_problems/822.py | Howardhuang98/Blog | cf58638d6d0bbf55b95fe08e43798e7dd14219ac | [
"MIT"
] | null | null | null | LC_problems/822.py | Howardhuang98/Blog | cf58638d6d0bbf55b95fe08e43798e7dd14219ac | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- encoding: utf-8 -*-
"""
@File : 822.py
@Contact : huanghoward@foxmail.com
@Modify Time : 2022/3/29 13:19
------------
"""
from typing import List
if __name__ == '__main__':
s = Solution()
print(s.flipgame([1],[1])) | 27.676471 | 67 | 0.420829 |
fd4d784f79a128a2168a7d3f9c317a2fb64d12f1 | 22,795 | py | Python | result/analyze.py | kuriatsu/PIE_RAS | 8dd33b4d4f7b082337a2645c0a72082374768b52 | [
"Apache-2.0"
] | null | null | null | result/analyze.py | kuriatsu/PIE_RAS | 8dd33b4d4f7b082337a2645c0a72082374768b52 | [
"Apache-2.0"
] | null | null | null | result/analyze.py | kuriatsu/PIE_RAS | 8dd33b4d4f7b082337a2645c0a72082374768b52 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python3
# -*- coding: utf-8 -*-
import pickle
import pandas as pd
import xml.etree.ElementTree as ET
import math
import seaborn as sns
import matplotlib.pyplot as plt
import numpy as np
import csv
import glob
import scikit_posthocs as sp
from scipy import stats
import os
from scipy import stats
import scikit_posthocs as sp
sns.set(context='paper', style='whitegrid')
hue_order = ["traffic light", "crossing intention", "trajectory"]
eps=0.01
tl_black_list = [
"3_3_96tl",
"3_3_102tl",
"3_4_107tl",
"3_4_108tl",
"3_5_112tl",
"3_5_113tl",
"3_5_116tl",
"3_5_117tl",
"3_5_118tl",
"3_5_119tl",
"3_5_122tl",
"3_5_123tl",
"3_5_126tl",
"3_5_127tl",
"3_6_128tl",
"3_6_137tl",
"3_7_142tl",
"3_8_153tl",
"3_8_160tl",
"3_9_173tl",
"3_9_174tl",
"3_9_179tl",
"3_10_185tl",
"3_10_188tl",
"3_11_205tl",
"3_12_218tl",
"3_12_221tl",
"3_15_241tl",
"3_16_256tl",
"3_16_257tl",
]
opposite_anno_list = ["3_16_259tl", "3_16_258tl", "3_16_249tl"]
log_data = None
data_path = "/home/kuriatsu/Dropbox/data/pie202203"
for file in glob.glob(os.path.join(data_path, "log*.csv")):
buf = pd.read_csv(file)
filename =file.split("/")[-1]
count = int(filename.replace("log_data_", "").split("_")[-1].replace(".csv", ""))
print("{}".format(filename))
if count in [0, 1, 2]:
print("skipped")
continue
trial = filename.split("_")[-1].replace(".csv", "")
buf["subject"] = filename.replace("log_data_", "").split("_")[0]
buf["task"] = filename.replace("log_data_", "").split("_")[1]
correct_list = []
response_list = []
for idx, row in buf.iterrows():
if row.id in tl_black_list:
row.last_state = -2
if row.last_state == -1: # no intervention
correct_list.append(-1)
response_list.append(-1)
elif int(row.last_state) == int(row.state):
if row.id in opposite_anno_list:
correct_list.append(1)
if row.last_state == 1:
response_list.append(3)
elif row.last_state == 0:
response_list.append(0)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
else:
correct_list.append(0)
if row.last_state == 1:
response_list.append(1)
elif row.last_state == 0:
response_list.append(2)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
else:
if row.id in opposite_anno_list:
correct_list.append(0)
if row.last_state == 1:
response_list.append(1)
elif row.last_state == 0:
response_list.append(2)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
else:
correct_list.append(1)
if row.last_state == 1:
response_list.append(3)
elif row.last_state == 0:
response_list.append(0)
else:
print(f"last_state{row.last_state}, state{row.state}")
response_list.append(4) # ignored=4
buf["correct"] = correct_list
buf["response"] = response_list
len(correct_list)
if log_data is None:
log_data = buf
else:
log_data = log_data.append(buf, ignore_index=True)
task_list = {"int": "crossing intention", "tl": "traffic light", "traj":"trajectory"}
subject_data = pd.DataFrame(columns=["subject", "task", "acc", "int_length", "missing"])
for subject in log_data.subject.drop_duplicates():
for task in log_data.task.drop_duplicates():
for length in log_data.int_length.drop_duplicates():
target = log_data[(log_data.subject == subject) & (log_data.task == task) & (log_data.int_length == length)]
# acc = len(target[target.correct == 1])/(len(target))
acc = len(target[target.correct == 1])/(len(target[target.correct == 0]) + len(target[target.correct == 1])+eps)
missing = len(target[target.correct == -1])/(len(target[target.correct != -2])+eps)
buf = pd.DataFrame([(subject, task_list.get(task), acc, length, missing)], columns=subject_data.columns)
subject_data = pd.concat([subject_data, buf])
subject_data.acc = subject_data.acc * 100
subject_data.missing = subject_data.missing * 100
# sns.barplot(x="task", y="acc", hue="int_length", data=subject_data, ci="sd")
# sns.barplot(x="task", y="acc", data=subject_data, ci="sd")
################################################
print("check intervene acc")
################################################
for length in subject_data.int_length.drop_duplicates():
print(f"acc : length={length}")
target_df = subject_data[subject_data.int_length == length]
_, norm_p = stats.shapiro(target_df.acc.dropna())
_, var_p = stats.levene(
target_df[target_df.task == 'trajectory'].acc.dropna(),
target_df[target_df.task == 'crossing intention'].acc.dropna(),
target_df[target_df.task == 'traffic light'].acc.dropna(),
center='median'
)
# if norm_p < 0.05 or var_p < 0.05:
# print(f"norm:{norm_p}, var:{var_p}")
# print('steel-dwass\n', sp.posthoc_dscf(target_df, val_col='acc', group_col='task'))
# else:
# multicomp_result = multicomp.MultiComparison(np.array(target_df.dropna(how='any').acc, dtype="float64"), target_df.dropna(how='any').type)
# print(f"norm:{norm_p}, var:{var_p}")
# print('levene', multicomp_result.tukeyhsd().summary())
if norm_p < 0.05 or var_p < 0.05:
_, anova_p = stats.friedmanchisquare(
target_df[target_df.task == "trajectory"].acc,
target_df[target_df.task == "crossing intention"].acc,
target_df[target_df.task == "traffic light"].acc,
)
print(f"norm:{norm_p}, var:{var_p}")
print("anova(friedman test)", anova_p)
if anova_p < 0.05:
print('conover\n', sp.posthoc_conover(target_df, val_col="acc", group_col="task"))
print('steel-dwass\n', sp.posthoc_dscf(target_df, val_col='acc', group_col='task'))
else:
# melted_df = pd.melt(target_df, id_vars=["subject", "acc", "int_length"], var_name="task", value_name="rate")
aov = stats_anova.AnovaRM(melted_df, "missing", "subject", ["task"])
print(f"norm:{norm_p}, var:{var_p}")
print("reperted anova: ", aov.fit())
multicomp_result = multicomp.MultiComparison(melted_df[length], nasa_df.task)
print(melted_df.tukeyhsd().summary())
fig, ax = plt.subplots()
sns.pointplot(x="int_length", y="acc", data=subject_data, hue="task", hue_order=hue_order, ax=ax, capsize=0.1, ci="sd")
ax.set_ylim(0.0, 100.0)
ax.set_xlabel("intervention time [s]", fontsize=18)
ax.set_ylabel("intervention accuracy [%]", fontsize=18)
ax.tick_params(labelsize=14)
ax.legend(fontsize=14)
plt.show()
################################################
print("check miss rate")
################################################
for length in subject_data.int_length.drop_duplicates():
print(f"miss : length={length}")
target_df = subject_data[subject_data.int_length == length]
_, norm_p = stats.shapiro(target_df.missing.dropna())
_, var_p = stats.levene(
target_df[target_df.task == 'trajectory'].missing.dropna(),
target_df[target_df.task == 'crossing intention'].missing.dropna(),
target_df[target_df.task == 'traffic light'].missing.dropna(),
center='median'
)
# if norm_p < 0.05 or var_p < 0.05:
# print(f"norm:{norm_p}, var:{var_p}")
# print('steel-dwass\n', sp.posthoc_dscf(target_df, val_col='missing', group_col='task'))
# else:
# multicomp_result = multicomp.MultiComparison(np.array(target_df.dropna(how='any').missing, dtype="float64"), target_df.dropna(how='any').type)
# print(f"norm:{norm_p}, var:{var_p}")
# print('levene', multicomp_result.tukeyhsd().summary())
if norm_p < 0.05 or var_p < 0.05:
_, anova_p = stats.friedmanchisquare(
target_df[target_df.task == "trajectory"].missing,
target_df[target_df.task == "crossing intention"].missing,
target_df[target_df.task == "traffic light"].missing,
)
print(f"norm:{norm_p}, var:{var_p}")
print("anova(friedman test)", anova_p)
if anova_p < 0.05:
print('steel-dwass\n', sp.posthoc_dscf(target_df, val_col='missing', group_col='task'))
print('conover\n', sp.posthoc_conover(target_df, val_col="missing", group_col="task"))
else:
# melted_df = pd.melt(target_df, id_vars=["subject", "acc", "int_length"], var_name="task", value_name="rate")
aov = stats_anova.AnovaRM(melted_df, "missing", "subject", ["task"])
print(f"norm:{norm_p}, var:{var_p}")
print("reperted anova: ", aov.fit())
multicomp_result = multicomp.MultiComparison(melted_df[length], nasa_df.task)
print(melted_df.tukeyhsd().summary())
fig, ax = plt.subplots()
sns.pointplot(x="int_length", y="missing", data=subject_data, hue="task", hue_order=hue_order, ax = ax, capsize=0.1, ci=95)
ax.set_ylim(0.0, 100.0)
ax.set_xlabel("intervention time [s]", fontsize=18)
ax.set_ylabel("intervention missing rate [%]", fontsize=18)
ax.tick_params(labelsize=14)
ax.legend(fontsize=14)
plt.show()
#####################################
# mean val show
#####################################
target = subject_data[subject_data.task == "crossing intention"]
print("int acc mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].acc.mean(),
target[target.int_length == 3.0].acc.mean(),
target[target.int_length == 5.0].acc.mean(),
target[target.int_length == 8.0].acc.mean(),
target[target.int_length == 1.0].acc.std(),
target[target.int_length == 3.0].acc.std(),
target[target.int_length == 5.0].acc.std(),
target[target.int_length == 8.0].acc.std(),
))
target = subject_data[subject_data.task == "trajectory"]
print("traj acc mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].acc.mean(),
target[target.int_length == 3.0].acc.mean(),
target[target.int_length == 5.0].acc.mean(),
target[target.int_length == 8.0].acc.mean(),
target[target.int_length == 1.0].acc.std(),
target[target.int_length == 3.0].acc.std(),
target[target.int_length == 5.0].acc.std(),
target[target.int_length == 8.0].acc.std(),
))
target = subject_data[subject_data.task == "traffic light"]
print("tl acc mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].acc.mean(),
target[target.int_length == 3.0].acc.mean(),
target[target.int_length == 5.0].acc.mean(),
target[target.int_length == 8.0].acc.mean(),
target[target.int_length == 1.0].acc.std(),
target[target.int_length == 3.0].acc.std(),
target[target.int_length == 5.0].acc.std(),
target[target.int_length == 8.0].acc.std(),
))
target = subject_data[subject_data.task == "crossing intention"]
print("int missing mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].missing.mean(),
target[target.int_length == 3.0].missing.mean(),
target[target.int_length == 5.0].missing.mean(),
target[target.int_length == 8.0].missing.mean(),
target[target.int_length == 1.0].missing.std(),
target[target.int_length == 3.0].missing.std(),
target[target.int_length == 5.0].missing.std(),
target[target.int_length == 8.0].missing.std(),
))
target = subject_data[subject_data.task == "trajectory"]
print("traj missing mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].missing.mean(),
target[target.int_length == 3.0].missing.mean(),
target[target.int_length == 5.0].missing.mean(),
target[target.int_length == 8.0].missing.mean(),
target[target.int_length == 1.0].missing.std(),
target[target.int_length == 3.0].missing.std(),
target[target.int_length == 5.0].missing.std(),
target[target.int_length == 8.0].missing.std(),
))
target = subject_data[subject_data.task == "traffic light"]
print("tl missing mean: 1.0:{}, 3.0:{}, 5.0:{}, 8.0:{}\n std {} {} {} {}".format(
target[target.int_length == 1.0].missing.mean(),
target[target.int_length == 3.0].missing.mean(),
target[target.int_length == 5.0].missing.mean(),
target[target.int_length == 8.0].missing.mean(),
target[target.int_length == 1.0].missing.std(),
target[target.int_length == 3.0].missing.std(),
target[target.int_length == 5.0].missing.std(),
target[target.int_length == 8.0].missing.std(),
))
###########################################
# collect wrong intervention ids
###########################################
task_list = {"int": "crossing intention", "tl": "traffic light", "traj":"trajectory"}
id_data = pd.DataFrame(columns=["id", "task", "false_rate", "missing", "total"])
for id in log_data.id.drop_duplicates():
for task in log_data.task.drop_duplicates():
for length in log_data.int_length.drop_duplicates():
target = log_data[(log_data.id == id) & (log_data.task == task) & (log_data.int_length == length)]
# acc = len(target[target.correct == 1])/(len(target))
total = len(target)
name = id.replace("tl","")+task+"_"+str(length)
if len(target) > 0:
false_rate = len(target[target.correct == 0])/len(target)
else:
false_rate = 0.0
missing = len(target[target.correct == -1])
buf = pd.DataFrame([(name, task, false_rate, missing, total)], columns=id_data.columns)
id_data = pd.concat([id_data, buf])
pd.set_option("max_rows", None)
sort_val = id_data.sort_values(["false_rate","total"], ascending=False)
false_playlist = sort_val[(sort_val.false_rate>0.0)&(sort_val.total>1)]
print(false_playlist)
false_playlist.to_csv("/home/kuriatsu/Dropbox/data/pie202203/false_playlist.csv")
# sns.barplot(x="id", y="acc", hue="int_length", data=id_data)
###############################################################################################
print("response rate stacked bar plot")
###############################################################################################
response_summary_pred = pd.DataFrame(columns=["int_length", "task", "response", "count"])
for int_length in log_data.int_length.drop_duplicates():
for task in log_data.task.drop_duplicates():
for response in [0, 1, 2, 3, -1]:
buf = pd.Series([int_length, task, response,
len(log_data[(log_data.int_length==int_length) & (log_data.task==task) & (log_data.response <= response)])/len(log_data[(log_data.int_length==int_length) & (log_data.task==task) & (log_data.response!=4)])],
index=response_summary_pred.columns)
response_summary_pred = response_summary_pred.append(buf, ignore_index=True)
fig, axes = plt.subplots()
cr = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==3], ax=axes, palette=sns.color_palette(["turquoise"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
fa = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==2], ax=axes, palette=sns.color_palette(["orangered"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
miss = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==1], ax=axes, palette=sns.color_palette(["lightsalmon"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
hit = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==0], ax=axes, palette=sns.color_palette(["teal"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
no_int = sns.barplot(x="task", y="count", hue="int_length", data=response_summary_pred[response_summary_pred.response==-1], ax=axes, palette=sns.color_palette(["gray"]*6), edgecolor="0.2", order=["tl", "int", "traj"])
axes.set_xticks([-0.3, -0.1, 0.1, 0.3, 0.7, 0.9, 1.1, 1.3, 1.7, 1.9, 2.1, 2.3])
axes.set_xticklabels(["1.0", "3.0", "5.0", "8.0", "1.0", "3.0", "5.0", "8.0", "1.0", "3.0", "5.0", "8.0"], fontsize=14)
# axes.set_yticklabels(fontsize=14)
ax_pos = axes.get_position()
fig.text(ax_pos.x1-0.75, ax_pos.y1-0.84, "traffic light", fontsize=14)
fig.text(ax_pos.x1-0.55, ax_pos.y1-0.84, "crossing intention", fontsize=14)
fig.text(ax_pos.x1-0.25, ax_pos.y1-0.84, "trajectory", fontsize=14)
axes.tick_params(labelsize=14)
axes.set_ylabel("Response Rate", fontsize=18)
axes.set_xlabel("")
handles, labels = axes.get_legend_handles_labels()
axes.legend(handles[::4], ["CR", "FA", "miss", "hit", "no_int"], bbox_to_anchor=(1.0, 1.0), loc='upper left', fontsize=14)
plt.show()
###############################################
# Workload
###############################################
workload = pd.read_csv("{}/workload.csv".format(data_path))
workload.satisfy = 10-workload.satisfy
workload_melted = pd.melt(workload, id_vars=["subject", "type"], var_name="scale", value_name="score")
#### nasa-tlx ####
for item in workload_melted.scale.drop_duplicates():
print(item)
_, norm_p1 = stats.shapiro(workload[workload.type == "trajectory"][item])
_, norm_p2 = stats.shapiro(workload[workload.type == "crossing intention"][item])
_, norm_p3 = stats.shapiro(workload[workload.type == "traffic light"][item])
_, var_p = stats.levene(
workload[workload.type == "trajectory"][item],
workload[workload.type == "crossing intention"][item],
workload[workload.type == "traffic light"][item],
center='median'
)
if norm_p1 < 0.05 or norm_p2 < 0.05 or norm_p3 < 0.05 or norm_p4 < 0.05:
_, anova_p = stats.friedmanchisquare(
workload[workload.type == "trajectory"][item],
workload[workload.type == "crossing intention"][item],
workload[workload.type == "traffic light"][item],
)
print("anova(friedman test)", anova_p)
if anova_p < 0.05:
print(sp.posthoc_conover(workload, val_col=item, group_col="type"))
else:
melted_df = pd.melt(nasa_df, id_vars=["name", "experiment_type"], var_name="type", value_name="score")
aov = stats_anova.AnovaRM(workload_melted[workload_melted.type == item], "score", "subject", ["type"])
print("reperted anova: ", aov.fit())
multicomp_result = multicomp.MultiComparison(workload_melted[item], nasa_df.type)
print(multicomp_result.tukeyhsd().summary())
fig, ax = plt.subplots()
sns.barplot(x="scale", y="score", data=workload_melted, hue="type", hue_order=hue_order, ax=ax)
ax.set_ylim(0, 10)
ax.legend(bbox_to_anchor=(0.0, 1.0), loc='lower left', fontsize=14)
ax.set_xlabel("scale", fontsize=18)
ax.set_ylabel("score (lower is better)", fontsize=18)
ax.tick_params(labelsize=14)
plt.show()
###############################################
# necessary time
###############################################
time = pd.read_csv("/home/kuriatsu/Dropbox/documents/subjective_time.csv")
fig, ax = plt.subplots()
# mean_list = [
# time[time.type=="crossing intention"].ideal_time.mean(),
# time[time.type=="trajectory"].ideal_time.mean(),
# time[time.type=="traffic light"].ideal_time.mean(),
# ]
# sem_list = [
# time[time.type=="crossing intention"].ideal_time.sem(),
# time[time.type=="trajectory"].ideal_time.sem(),
# time[time.type=="traffic light"].ideal_time.sem(),
# ]
_, norm_p = stats.shapiro(time.ideal_time.dropna())
_, var_p = stats.levene(
time[time.type == 'crossing intention'].ideal_time.dropna(),
time[time.type == 'trajectory'].ideal_time.dropna(),
time[time.type == 'traffic light'].ideal_time.dropna(),
center='median'
)
if norm_p < 0.05 or var_p < 0.05:
print('steel-dwass\n', sp.posthoc_dscf(time, val_col='ideal_time', group_col='type'))
else:
multicomp_result = multicomp.MultiComparison(np.array(time.dropna(how='any').ideal_time, dtype="float64"), time.dropna(how='any').type)
print('levene', multicomp_result.tukeyhsd().summary())
sns.pointplot(x="type", y="ideal_time", hue="type", hue_order=hue_order, data=time, join=False, ax=ax, capsize=0.1, ci=95)
ax.set_ylim(0.5,3.5)
plt.yticks([1, 2, 3, 4], ["<3", "3-5", "5-8", "8<"])
plt.show()
###############################################
# compare prediction and intervention
###############################################
with open("/home/kuriatsu/Dropbox/data/pie202203/database.pkl", "rb") as f:
database = pickle.load(f)
tl_result = pd.read_csv("/home/kuriatsu/Dropbox/data/pie202203/tlr_result.csv")
overall_result = pd.DataFrame(columns=["id", "task", "subject", "gt", "int", "prediction"])
log_data = None
data_path = "/home/kuriatsu/Dropbox/data/pie202203"
for file in glob.glob(os.path.join(data_path, "log*.csv")):
buf = pd.read_csv(file)
filename =file.split("/")[-1]
count = float(filename.replace("log_data_", "").split("_")[-1].replace(".csv", ""))
print("{}".format(filename))
if count in [0, 1, 2]:
print("skipped")
continue
subject = filename.replace("log_data_", "").split("_")[0]
task = filename.replace("log_data_", "").split("_")[1]
for idx, row in buf.iterrows():
if task != "tl":
database_id = row.id+task+"_"+str(float(row.int_length))
prediction = (database[database_id].get("likelihood") <= 0.5)
gt = False if row.state else True
else:
database_id = row.id+"_"+str(float(row.int_length))
prediction = 1 if float(tl_result[tl_result.id == row.id].result) == 2 else 0
gt = False if row.state else True
if row.id in tl_black_list:
intervention = -2
if row.last_state == -1: # no intervention
intervention = -1
else:
if row.id in opposite_anno_list:
intervention = False if row.last_state else True
else:
intervention = row.last_state
buf = pd.DataFrame([(row.id, task, subject, int(gt), int(intervention), int(prediction))], columns = overall_result.columns)
overall_result = pd.concat([overall_result, buf])
overall_result.to_csv("/home/kuriatsu/Dropbox/data/pie202203/acc.csv")
| 44.696078 | 222 | 0.612547 |
fd4df1f5acd3eb66e203334228aa56f68ab7a4a9 | 302 | py | Python | tests/huge.py | nbdy/podb | 684ed6b8330c0d18a2b89d6521cb15586d1f95a4 | [
"MIT"
] | null | null | null | tests/huge.py | nbdy/podb | 684ed6b8330c0d18a2b89d6521cb15586d1f95a4 | [
"MIT"
] | null | null | null | tests/huge.py | nbdy/podb | 684ed6b8330c0d18a2b89d6521cb15586d1f95a4 | [
"MIT"
] | null | null | null | import unittest
from podb import DB
from tqdm import tqdm
from . import HugeDBItem
db = DB("huge")
if __name__ == '__main__':
unittest.main()
| 17.764706 | 42 | 0.682119 |
fd5095688e3adf6f9ca25f40240ff9d7e4246e41 | 153 | py | Python | moto/sts/__init__.py | pll/moto | e49e67aba5d108b03865bdb42124206ea7e572ea | [
"Apache-2.0"
] | null | null | null | moto/sts/__init__.py | pll/moto | e49e67aba5d108b03865bdb42124206ea7e572ea | [
"Apache-2.0"
] | null | null | null | moto/sts/__init__.py | pll/moto | e49e67aba5d108b03865bdb42124206ea7e572ea | [
"Apache-2.0"
] | null | null | null | from .models import sts_backend
from ..core.models import base_decorator
sts_backends = {"global": sts_backend}
mock_sts = base_decorator(sts_backends)
| 25.5 | 40 | 0.810458 |
fd518544ef8c44c965453eb8925336fcec4f3ee3 | 3,005 | py | Python | convert_nbrId_to_orgnr.py | obtitus/barnehagefakta_osm | 4539525f6defcc67a087cc57baad996f8d76b8bd | [
"Apache-2.0"
] | 1 | 2018-10-05T17:00:23.000Z | 2018-10-05T17:00:23.000Z | convert_nbrId_to_orgnr.py | obtitus/barnehagefakta_osm | 4539525f6defcc67a087cc57baad996f8d76b8bd | [
"Apache-2.0"
] | 6 | 2016-05-29T09:33:06.000Z | 2019-12-18T20:24:50.000Z | convert_nbrId_to_orgnr.py | obtitus/barnehagefakta_osm | 4539525f6defcc67a087cc57baad996f8d76b8bd | [
"Apache-2.0"
] | null | null | null | # Database switched from having nsrId to using orgnr, this script helps with this conversion.
import os
import re
import json
import subprocess
from glob import glob
from utility_to_osm import file_util
if __name__ == '__main__':
data_dir = 'data' #'barnehagefakta_osm_data/data'
nsrId_to_orgnr_filename = 'nsrId_to_orgnr.json'
if False:
# Done once, on a old dump of the database, to get mapping from nsrId to orgnr
nsrId_to_orgnr = dict()
for kommune_nr in os.listdir(data_dir):
folder = os.path.join(data_dir, kommune_nr)
if os.path.isdir(folder):
print(folder)
count = 0
for filename in glob(os.path.join(folder, 'barnehagefakta_no_nbrId*.json')):
content = file_util.read_file(filename)
if content == '404':
# cleanup
os.remove(filename)
continue
dct = json.loads(content)
nsrId = dct['nsrId']
orgnr = dct['orgnr']
if nsrId in nsrId_to_orgnr and nsrId_to_orgnr[nsrId] != orgnr:
raise ValueError('Duplicate key %s, %s != %s' % (nsrId, nsrId_to_orgnr[nsrId], orgnr))
nsrId_to_orgnr[nsrId] = orgnr
count += 1
print('Found', count)
with open(nsrId_to_orgnr_filename, 'w') as f:
json.dump(nsrId_to_orgnr, f)
content = file_util.read_file(nsrId_to_orgnr_filename)
nsrId_to_orgnr = json.loads(content)
if True:
# Rename files
for kommune_nr in os.listdir(data_dir):
folder = os.path.join(data_dir, kommune_nr)
if os.path.isdir(folder):
print(folder)
count = 0
for filename in glob(os.path.join(folder, 'barnehagefakta_no_nbrId*.json')):
reg = re.search('barnehagefakta_no_nbrId(\d+)', filename)
if reg:
nbrId = reg.group(1)
try:
orgnr = nsrId_to_orgnr[nbrId]
except KeyError as e:
content = file_util.read_file(filename)
print('ERROR', repr(e), filename, content)
if content == '404':
os.remove(filename)
continue
new_filename = filename.replace('barnehagefakta_no_nbrId%s' % nbrId,
'barnehagefakta_no_orgnr%s' % orgnr)
subprocess.run(['git', 'mv', filename, new_filename])
# if the file is still there, probably not version controlled
if os.path.exists(filename):
os.rename(filename, new_filename)
| 40.608108 | 110 | 0.509151 |
fd51dbb2cb05ef487bcf83c509336b681bc19872 | 769 | py | Python | regcore/tests/layer_tests.py | navigo/regulations-core | 0b2a2034baacfa1cc5ff87f14db7d1aaa8d260c3 | [
"CC0-1.0"
] | 17 | 2016-06-14T19:06:02.000Z | 2021-10-03T23:46:00.000Z | regcore/tests/layer_tests.py | navigo/regulations-core | 0b2a2034baacfa1cc5ff87f14db7d1aaa8d260c3 | [
"CC0-1.0"
] | 42 | 2016-04-06T22:34:26.000Z | 2020-04-14T22:00:24.000Z | regcore/tests/layer_tests.py | navigo/regulations-core | 0b2a2034baacfa1cc5ff87f14db7d1aaa8d260c3 | [
"CC0-1.0"
] | 20 | 2016-05-04T06:04:34.000Z | 2020-10-07T16:16:03.000Z | from django.test import TestCase
from regcore.layer import standardize_params
| 33.434783 | 55 | 0.669701 |
fd5213aa3c0233313738c5ac6fd68800d2601767 | 459 | py | Python | asal.py | kedigucuk01/asal-sayi-bulucu | dffc81cec5c4bbd6b4423d8991a5559a79f26f92 | [
"MIT"
] | 2 | 2021-06-10T16:27:42.000Z | 2021-06-11T10:54:24.000Z | asal.py | kedigucuk01/asal-sayi-bulucu | dffc81cec5c4bbd6b4423d8991a5559a79f26f92 | [
"MIT"
] | 1 | 2021-06-15T11:08:58.000Z | 2021-08-10T20:23:11.000Z | asal.py | kedigucuk01/asal-sayi-bulucu | dffc81cec5c4bbd6b4423d8991a5559a79f26f92 | [
"MIT"
] | null | null | null | while True:
try:
i = int(input("Say giriniz: ")) # 2
except ValueError:
print("Hata Kodu: 5786, \nAklama: Ltfen bir \"tam say\" giriniz.")
else:
for s in range(2, i, 1):
if i%s == 0:
print(f"{i} says, asal deildir.")
break
else:
if s == i - 1:
print(f"{i} says, asal bir saydr.")
if i < 2:
print(f"{i} says, asal deildir.")
elif i == 2:
print(f"{i} says, asal bir saydr.")
| 20.863636 | 73 | 0.542484 |
fd543b58f8ff3f846e998d58939fe4d5bc4acf05 | 5,859 | py | Python | main.py | MO-RISE/crowsnest-connector-cluon-n2k | 11eaefd8ebe76829ec8fe91f99da9acbc84e5187 | [
"Apache-2.0"
] | null | null | null | main.py | MO-RISE/crowsnest-connector-cluon-n2k | 11eaefd8ebe76829ec8fe91f99da9acbc84e5187 | [
"Apache-2.0"
] | null | null | null | main.py | MO-RISE/crowsnest-connector-cluon-n2k | 11eaefd8ebe76829ec8fe91f99da9acbc84e5187 | [
"Apache-2.0"
] | null | null | null | """Main entrypoint for this application"""
from pathlib import Path
from math import degrees
from datetime import datetime
import logging
import warnings
from environs import Env
from streamz import Stream
from paho.mqtt.client import Client as MQTT
from pycluon import OD4Session, Envelope as cEnvelope
from pycluon.importer import import_odvd
from marulc import NMEA2000Parser
from marulc.utils import filter_on_pgn, deep_get
from marulc.exceptions import MultiPacketInProcessError
from brefv.envelope import Envelope
from brefv.messages.observations.rudder import Rudder
from brefv.messages.observations.propeller import Propeller
# Reading config from environment variables
env = Env()
CLUON_CID = env.int("CLUON_CID", 111)
MQTT_BROKER_HOST = env("MQTT_BROKER_HOST")
MQTT_BROKER_PORT = env.int("MQTT_BROKER_PORT", 1883)
MQTT_CLIENT_ID = env("MQTT_CLIENT_ID", None)
MQTT_TRANSPORT = env("MQTT_TRANSPORT", "tcp")
MQTT_TLS = env.bool("MQTT_TLS", False)
MQTT_USER = env("MQTT_USER", None)
MQTT_PASSWORD = env("MQTT_PASSWORD", None)
MQTT_BASE_TOPIC = env("MQTT_BASE_TOPIC", "/test/test")
RUDDER_CONFIG = env.dict("RUDDER_CONFIG", default={})
PROPELLER_CONFIG = env.dict("PROPELLER_CONFIG", default={})
LOG_LEVEL = env.log_level("LOG_LEVEL", logging.WARNING)
## Import and generate code for message specifications
THIS_DIR = Path(__file__).parent
memo = import_odvd(THIS_DIR / "memo" / "memo.odvd")
# Setup logger
logging.basicConfig(level=LOG_LEVEL)
logging.captureWarnings(True)
warnings.filterwarnings("once")
LOGGER = logging.getLogger("crowsnest-connector-cluon-n2k")
mq = MQTT(client_id=MQTT_CLIENT_ID, transport=MQTT_TRANSPORT)
# Not empty filter
not_empty = lambda x: x is not None
## Main entrypoint for N2k frames
entrypoint = Stream()
parser = NMEA2000Parser()
def unpack_n2k_frame(envelope: cEnvelope):
"""Extract an n2k frame from an envelope and unpack it using marulc"""
LOGGER.info("Got envelope from pycluon")
try:
frame = memo.memo_raw_NMEA2000()
frame.ParseFromString(envelope.serialized_data)
LOGGER.debug("Frame: %s", frame.data)
msg = parser.unpack(frame.data)
LOGGER.debug("Unpacked: %s", msg)
msg["timestamp"] = envelope.sampled
return msg
except MultiPacketInProcessError:
LOGGER.debug("Multi-packet currently in process")
return None
except Exception: # pylint: disable=broad-except
LOGGER.exception("Exception when unpacking a frame")
return None
unpacked = entrypoint.map(unpack_n2k_frame).filter(not_empty)
## Rudder
def pgn127245_to_brefv(msg):
"""Converting a marulc dict to a brefv messages and packaging it into a a brefv construct"""
n2k_id = str(deep_get(msg, "Fields", "instance"))
if sensor_id := RUDDER_CONFIG.get(n2k_id):
crowsnest_id = list(RUDDER_CONFIG.keys()).index(n2k_id)
rud = Rudder(
sensor_id=sensor_id, angle=degrees(-1 * msg["Fields"]["angleOrder"])
) # Negating to adhere to brefv conventions
envelope = Envelope(
sent_at=datetime.utcfromtimestamp(msg["timestamp"]).isoformat(),
message_type="https://mo-rise.github.io/brefv/0.1.0/messages/observations/rudder.json",
message=rud.dict(
exclude_none=True, exclude_unset=True, exclude_defaults=True
),
)
LOGGER.info("Brefv envelope with Rudder message assembled")
LOGGER.debug("Envelope:\n%s", envelope)
return f"/observations/rudder/{crowsnest_id}", envelope
warnings.warn(f"No Rudder config found for N2k instance id: {n2k_id}")
return None
brefv_rudder = (
unpacked.filter(filter_on_pgn(127245)).map(pgn127245_to_brefv).filter(not_empty)
)
## Propeller (Using engine data for now...)
def pgn127488_to_brefv(msg):
"""Converting a marulc dict to a brefv messages and packaging it into a a brefv construct"""
n2k_id = str(deep_get(msg, "Fields", "instance"))
if sensor_id := PROPELLER_CONFIG.get(n2k_id):
crowsnest_id = list(PROPELLER_CONFIG.keys()).index(n2k_id)
prop = Propeller(sensor_id=sensor_id, rpm=msg["Fields"]["speed"])
envelope = Envelope(
sent_at=datetime.utcfromtimestamp(msg["timestamp"]).isoformat(),
message_type="https://mo-rise.github.io/brefv/0.1.0/messages/observations/propeller.json", # pylint: disable=line-too-long
message=prop.dict(
exclude_none=True, exclude_unset=True, exclude_defaults=True
),
)
LOGGER.info("Brefv envelope with Propeller message assembled")
LOGGER.debug("Envelope:\n%s", envelope)
return f"/observations/propeller/{crowsnest_id}", envelope
warnings.warn(f"No Propeller config found for {n2k_id}")
return None
brefv_propeller = (
unpacked.filter(filter_on_pgn(127488)).map(pgn127488_to_brefv).filter(not_empty)
)
# Finally, publish to mqtt
def to_mqtt(data):
"""Push data to a mqtt topic"""
subtopic, envelope = data
topic = f"{MQTT_BASE_TOPIC}{subtopic}"
LOGGER.debug("Publishing on %s", topic)
try:
mq.publish(
topic,
envelope.json(),
)
except Exception: # pylint: disable=broad-except
LOGGER.exception("Failed publishing to broker!")
if __name__ == "__main__":
print("All setup done, lets start processing messages!")
# Connect remaining pieces
brefv_rudder.latest().rate_limit(0.1).sink(to_mqtt)
brefv_propeller.latest().rate_limit(0.1).sink(to_mqtt)
# Connect to broker
mq.username_pw_set(MQTT_USER, MQTT_PASSWORD)
if MQTT_TLS:
mq.tls_set()
mq.connect(MQTT_BROKER_HOST, MQTT_BROKER_PORT)
# Register triggers
session = OD4Session(CLUON_CID)
session.add_data_trigger(10002, entrypoint.emit)
mq.loop_forever()
| 30.046154 | 135 | 0.70524 |
fd54b2677eda2400e60664de51925feee4550c09 | 7,569 | py | Python | cocapi/client/api.py | bim-ba/coc-api | 69ff957803cb991dfad8df3af752d193171f2ef0 | [
"Unlicense"
] | 1 | 2022-03-29T12:39:36.000Z | 2022-03-29T12:39:36.000Z | cocapi/client/api.py | bim-ba/coc-api | 69ff957803cb991dfad8df3af752d193171f2ef0 | [
"Unlicense"
] | null | null | null | cocapi/client/api.py | bim-ba/coc-api | 69ff957803cb991dfad8df3af752d193171f2ef0 | [
"Unlicense"
] | null | null | null | from typing import Any, Optional
from dataclasses import dataclass, field
import aiohttp
from ..types import aliases
from ..types import exceptions
# not used, but can be
| 34.880184 | 143 | 0.62069 |
fd56674cc383ba9fa6321e89c2463e251d94abf2 | 28,594 | py | Python | ratings.py | struct-rgb/ratings | 40d56455406cfee9731c564e54ed7610b5a9641c | [
"MIT"
] | null | null | null | ratings.py | struct-rgb/ratings | 40d56455406cfee9731c564e54ed7610b5a9641c | [
"MIT"
] | null | null | null | ratings.py | struct-rgb/ratings | 40d56455406cfee9731c564e54ed7610b5a9641c | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import re
import json
import random
from pathlib import Path
from datetime import date
from typing import Any, Callable, Set, Tuple
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from tags import Filter, Box, CompilationError, escape, enum_subject_parser_factory, tagset, PredicateDefinitions, DEFAULT_BOOL_PARSER, highlighting
from model import Search, Sort, Score, Status, Page, Tag, Rating, Model
#
# filter setup
#
parser_score = enum_subject_parser_factory(Score)
parser_status = enum_subject_parser_factory(Status)
####################
# RANDOM PREDICATE #
####################
PREDICATES = PredicateDefinitions(
action=lambda tag, rating: tag in rating.tags
)
###################
# COUNT PREDICATE #
###################
count_pattern = re.compile(r"^(\d+)\s+of\s+(.*)$")
##################
# EVAL PREDICATE #
##################
###################
# DATE PREDICATES #
###################
(PREDICATES
.define("tag",
readme="filter for ratings with the specified tag",
action=lambda tag, rating: tag in rating.tags,
parser=lambda x: x,
)
.define("score",
readme="filter for ratings with the specified score",
action=lambda score, rating: score == rating.score,
parser=parser_score,
)
.define("minimum score",
readme="filter for ratings with at least a certain score",
action=lambda score, rating: rating.score >= score,
parser=parser_score,
)
.define("maximum score",
readme="filter for ratings with at most a certain score",
action=lambda score, rating: rating.score <= score,
parser=parser_score,
)
.define("status",
readme="filter for ratings with the specified status",
action=lambda status, rating: status == rating.status,
parser=parser_status,
)
.define("minimum status",
readme="filter for ratings with at least a certain status",
action=lambda status, rating: rating.status >= status,
parser=parser_status,
)
.define("maximum status",
readme="filter for ratings with at most a certain status",
action=lambda status, rating: rating.status <= status,
parser=parser_status,
)
.define("tags",
readme="filter for ratings with a specific number of tags",
action=lambda number, rating: len(rating.tags) == number,
parser=int,
)
.define("minimum tags",
readme="filter for ratings with a specific number of tags",
action=lambda number, rating: len(rating.tags) >= number,
parser=int,
)
.define("maximum tags",
readme="filter for ratings with a specific number of tags",
action=lambda number, rating: len(rating.tags) <= number,
parser=int,
)
.define("random",
readme="filter ratings with a percent chance to include each",
action=action_random,
parser=parser_random,
pure=False
)
.define("count", # TODO possibly remove
readme="filter for a certain number of results at most",
action=action_count,
parser=parser_count,
pure=False
)
.define("eval", # TODO possibly remove
readme="evaluate a string as an expression",
action=lambda function, rating: function(rating),
parser=parser_eval,
pure=False
)
.define("title",
readme="filter for ratings with certain text in the title (case insensitive)",
action=lambda string, rating: rating.title.lower().find(string) != -1,
parser=parser_lower_str,
)
.define("comment",
readme="filter for ratings with certain text in the comments (case insensitive)",
action=lambda string, rating: rating.comments.lower().find(string) != -1,
parser=parser_lower_str,
)
.define("text",
readme="filter for ratings with certain text in the title or the comments (case insensitive)",
action=lambda string, rating: (
rating.title.lower().find(string) != -1 or rating.comments.lower().find(string) != -1
),
parser=parser_lower_str,
)
.define("commented",
readme="filter for ratings that either have or lack a comment",
action=lambda boolean, rating: bool(rating.comments) == boolean,
parser=DEFAULT_BOOL_PARSER,
)
.define("value",
readme="a literal boolean value; true or false",
action=lambda boolean, rating: boolean,
parser=DEFAULT_BOOL_PARSER,
)
.define("modified",
readme="ratings modified on YYYY-MM-DD",
action=lambda day, rating: rating.modified == day,
parser=parser_date,
)
.define("modified after",
readme="ratings modified after YYYY-MM-DD",
action=lambda day, rating: rating.modified > day,
parser=parser_date,
)
.define("modified before",
readme="ratings modified before YYYY-MM-DD",
action=lambda day, rating: rating.modified < day,
parser=parser_date,
)
.define("created",
readme="ratings created on YYYY-MM-DD",
action=lambda day, rating: rating.created == day,
parser=parser_date,
)
.define("created after",
readme="ratings created after YYYY-MM-DD",
action=lambda day, rating: rating.created > day,
parser=parser_date,
)
.define("created before",
readme="ratings created before YYYY-MM-DD",
action=lambda day, rating: rating.created < day,
parser=parser_date,
)
# alias definitions
.alias("minimum score", "min score")
.alias("maximum score", "max score")
.alias("minimum status", "min status")
.alias("maximum status", "max status")
.alias("minimum tags", "min tags")
.alias("maximum tags", "max tags")
.alias("commented", "has comment")
)
class FilesTab(object):
def update_path(self):
self.path = self._chooser.get_filename()
class TaggingTab(object):
def main():
rater = Rater()
Gtk.main()
if __name__ == '__main__':
main() | 26.305428 | 148 | 0.708435 |
fd567ff8b78d041903de62043964d3c66a7450a4 | 10,218 | py | Python | K64F Python Interfacing Testing/Loop_Read.py | Marnold212/CamLab-K64F | 20689b4be38aa329990dbfe13eec43d74b3ae27a | [
"Apache-2.0"
] | null | null | null | K64F Python Interfacing Testing/Loop_Read.py | Marnold212/CamLab-K64F | 20689b4be38aa329990dbfe13eec43d74b3ae27a | [
"Apache-2.0"
] | null | null | null | K64F Python Interfacing Testing/Loop_Read.py | Marnold212/CamLab-K64F | 20689b4be38aa329990dbfe13eec43d74b3ae27a | [
"Apache-2.0"
] | null | null | null | import numpy as np
from serial.serialutil import SerialException
import serial.tools.list_ports as port_list
import serial
import time
# def List_All_Mbed_USB_Devices(self):
# def Reverse_4byte_hex(input):
# reverse = ""
# if(len(input) == 4*2):
# reverse += input[6:8] + input[4:6] + input[2:4] + input[0:2]
# return reverse
# Assumes Data recieved is
# Testing
if __name__ == "__main__":
mbed_USB_info = List_All_Mbed_USB_Devices()
for i in range(5):
print(mbed_USB_info[i])
# serial_port = serial.Serial(port=mbed_USB_info[1][0], baudrate=115200, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
# for x in range(1000):
# raw_data = ADC_8x_16_Raw_Read(serial_port)
# # raw_data = serial_port.read(1)
# data = []
# for x in range(8):
# data.append(Convert_ADC_Raw(raw_data[1][x], 16, 5))
# # print(raw_data)
# print(data, raw_data [0])
Bytes_Per_Sample = 32
Number_Samples = 300
Serial_Baudrate = 230400 # 962100
serial_port = serial.Serial(port=mbed_USB_info[1][0], baudrate=Serial_Baudrate, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
data = []
for x in range(Number_Samples):
raw = serial_port.read(Bytes_Per_Sample).hex()
data.append(raw)
# print(data)
# print(data)
Formatted = Decode_Raw_Data(data)
print(Formatted[0], Formatted[Number_Samples - 1])
# print(Results[0:2])
#
# Serial_device = serial.Serial(port="COM4", baudrate=9600, bytesize=8, timeout=1, stopbits=serial.STOPBITS_ONE)
# Target_Register = "0x40048024"
# Received_String = Read_K64F_Hex_Register(Serial_device, Target_Register, 4)
# print("READ COMMAND (0x30): Requested Register = %s; Contents of Register(Hex) = %s" % (Target_Register , Received_String[:-2])) | 47.305556 | 224 | 0.641124 |
fd57c568a71d49ca653bf0ce40af26241330267b | 190 | py | Python | geosolver/text/generate_rules.py | mhrmm/geosolver | 13ae2972c58d5ba4c4878576f9fba8569cc99629 | [
"Apache-2.0"
] | 83 | 2015-09-14T13:50:42.000Z | 2022-03-12T10:24:38.000Z | geosolver/text/generate_rules.py | nehamjadhav/geosolver | 13ae2972c58d5ba4c4878576f9fba8569cc99629 | [
"Apache-2.0"
] | 8 | 2021-07-21T09:55:42.000Z | 2022-02-15T02:31:47.000Z | geosolver/text/generate_rules.py | nehamjadhav/geosolver | 13ae2972c58d5ba4c4878576f9fba8569cc99629 | [
"Apache-2.0"
] | 33 | 2015-06-16T18:52:43.000Z | 2021-12-16T08:58:27.000Z | from geosolver.ontology.ontology_definitions import FunctionSignature, signatures
from geosolver.text.rule import TagRule
from geosolver.utils.num import is_number
__author__ = 'minjoon'
| 23.75 | 81 | 0.847368 |
fd5911cef504b719bc6cc6d734809ba588ffa54f | 1,433 | py | Python | fhir_dataframes/store.py | Tiro-health/fhir-dataframes | 57086b7bb385ffbce55f57747903eca7a7f84665 | [
"MIT"
] | 1 | 2022-02-09T08:16:09.000Z | 2022-02-09T08:16:09.000Z | fhir_dataframes/store.py | Tiro-health/fhir-dataframes | 57086b7bb385ffbce55f57747903eca7a7f84665 | [
"MIT"
] | null | null | null | fhir_dataframes/store.py | Tiro-health/fhir-dataframes | 57086b7bb385ffbce55f57747903eca7a7f84665 | [
"MIT"
] | null | null | null | from __future__ import annotations
from itertools import tee
from typing import Iterable, Optional, Sequence, Union
import pandas as pd
from tiro_fhir import Resource
from fhir_dataframes import code_accessor
| 30.489362 | 87 | 0.686671 |
fd5ed16b310aacd62d38f7ed79f88685cc24b454 | 1,189 | py | Python | senlerpy/senler.py | tezmen/SenlerPy | ce8ab8512ed795e8e6f1e7ff76f54c6aa2d3cd82 | [
"Apache-2.0"
] | 2 | 2019-03-19T08:46:27.000Z | 2020-11-12T10:55:59.000Z | senlerpy/senler.py | tezmen/SenlerPy | ce8ab8512ed795e8e6f1e7ff76f54c6aa2d3cd82 | [
"Apache-2.0"
] | 1 | 2021-03-30T16:55:09.000Z | 2021-03-30T16:55:09.000Z | senlerpy/senler.py | tezmen/SenlerPy | ce8ab8512ed795e8e6f1e7ff76f54c6aa2d3cd82 | [
"Apache-2.0"
] | 7 | 2019-03-19T08:47:35.000Z | 2021-08-24T11:47:41.000Z | # -*- coding: utf-8 -*-
import json
import logging
from .request import RequestApi
from .exceptions import ApiError, WrongId, HttpError
logger = logging.getLogger(__name__)
| 25.847826 | 81 | 0.735071 |
fd5f3466a377d682676cf2f35cddaec4567f59df | 11,354 | py | Python | robinhoodbot/main.py | bpk9/Robinhood-Stock-Trading-Bot | c2ab0dd58f5236ee051ad38277c8ba5c46bd0aa4 | [
"MIT"
] | null | null | null | robinhoodbot/main.py | bpk9/Robinhood-Stock-Trading-Bot | c2ab0dd58f5236ee051ad38277c8ba5c46bd0aa4 | [
"MIT"
] | null | null | null | robinhoodbot/main.py | bpk9/Robinhood-Stock-Trading-Bot | c2ab0dd58f5236ee051ad38277c8ba5c46bd0aa4 | [
"MIT"
] | null | null | null | import pyotp
import robin_stocks as r
import pandas as pd
import numpy as np
import ta as ta
from pandas.plotting import register_matplotlib_converters
from ta import *
from misc import *
from tradingstats import *
from config import *
#Log in to Robinhood
#Put your username and password in a config.py file in the same directory (see sample file)
totp = pyotp.TOTP(rh_2fa_code).now()
login = r.login(rh_username,rh_password, totp)
#Safe divide by zero division function
def get_spy_symbols():
"""
Returns: the symbol for each stock in the S&P 500 as a list of strings
"""
symbols = pd.read_html('https://en.wikipedia.org/wiki/List_of_S%26P_500_companies')[0]['Symbol']
return list(symbols.values.flatten())
def get_watchlist_symbols():
"""
Returns: the symbol for each stock in your watchlist as a list of strings
"""
my_list_names = []
symbols = []
for name in r.get_all_watchlists(info='name'):
my_list_names.append(name)
for name in my_list_names:
list = r.get_watchlist_by_name(name)
for item in list:
instrument_data = r.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
def get_portfolio_symbols():
"""
Returns: the symbol for each stock in your portfolio as a list of strings
"""
symbols = []
holdings_data = r.get_open_stock_positions()
for item in holdings_data:
if not item:
continue
instrument_data = r.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
def get_position_creation_date(symbol, holdings_data):
"""Returns the time at which we bought a certain stock in our portfolio
Args:
symbol(str): Symbol of the stock that we are trying to figure out when it was bought
holdings_data(dict): dict returned by r.get_open_stock_positions()
Returns:
A string containing the date and time the stock was bought, or "Not found" otherwise
"""
instrument = r.get_instruments_by_symbols(symbol)
url = instrument[0].get('url')
for dict in holdings_data:
if(dict.get('instrument') == url):
return dict.get('created_at')
return "Not found"
def get_modified_holdings():
""" Retrieves the same dictionary as r.build_holdings, but includes data about
when the stock was purchased, which is useful for the read_trade_history() method
in tradingstats.py
Returns:
the same dict from r.build_holdings, but with an extra key-value pair for each
position you have, which is 'bought_at': (the time the stock was purchased)
"""
holdings = r.build_holdings()
holdings_data = r.get_open_stock_positions()
for symbol, dict in holdings.items():
bought_at = get_position_creation_date(symbol, holdings_data)
bought_at = str(pd.to_datetime(bought_at))
holdings[symbol].update({'bought_at': bought_at})
return holdings
def golden_cross(stockTicker, n1, n2, direction=""):
"""Determine if a golden/death cross has occured for a specified stock in the last X trading days
Args:
stockTicker(str): Symbol of the stock we're querying
n1(int): Specifies the short-term indicator as an X-day moving average.
n2(int): Specifies the long-term indicator as an X-day moving average.
(n1 should be smaller than n2 to produce meaningful results, e.g n1=50, n2=200)
direction(str): "above" if we are searching for an upwards cross, "below" if we are searching for a downwaords cross. Optional, used for printing purposes
Returns:
1 if the short-term indicator crosses above the long-term one
0 if the short-term indicator crosses below the long-term one
price(float): last listed close price
"""
history = get_historicals(stockTicker)
closingPrices = []
dates = []
for item in history:
closingPrices.append(float(item['close_price']))
dates.append(item['begins_at'])
price = pd.Series(closingPrices)
dates = pd.Series(dates)
dates = pd.to_datetime(dates)
ema1 = ta.trend.EMAIndicator(price, int(n1)).ema_indicator()
ema2 = ta.trend.EMAIndicator(price, int(n2)).ema_indicator()
if plot:
show_plot(price, ema1, ema2, dates, symbol=stockTicker, label1=str(n1)+" day EMA", label2=str(n2)+" day EMA")
return ema1.iat[-1] > ema2.iat[-1], closingPrices[len(closingPrices) - 1]
def get_rsi(symbol, days):
"""Determine the relative strength index for a specified stock in the last X trading days
Args:
symbol(str): Symbol of the stock we're querying
days(int): Specifies the maximum number of days that the cross can occur by
Returns:
rsi(float): Relative strength index value for a specified stock in the last X trading days
"""
history = get_historicals(symbol)
closingPrices = [ float(item['close_price']) for item in history ]
price = pd.Series(closingPrices)
rsi = ta.momentum.RSIIndicator(close=price, window=int(days), fillna=False).rsi()
return rsi.iat[-1]
def get_macd(symbol):
"""Determine the Moving Average Convergence/Divergence for a specified stock
Args:
symbol(str): Symbol of the stock we're querying
Returns:
rsi(float): Moving Average Convergence/Divergence value for a specified stock
"""
history = get_historicals(symbol)
closingPrices = [ float(item['close_price']) for item in history ]
price = pd.Series(closingPrices)
macd = ta.trend.MACD(price).macd_diff()
return macd.iat[-1]
def get_buy_rating(symbol):
"""Determine the listed investor rating for a specified stock
Args:
symbol(str): Symbol of the stock we're querying
Returns:
rating(int): 0-100 rating of a particular stock
"""
ratings = r.get_ratings(symbol=symbol)['summary']
if ratings:
return ratings['num_buy_ratings'] / (ratings['num_buy_ratings'] + ratings['num_hold_ratings'] + ratings['num_sell_ratings']) * 100
return 0
def sell_holdings(symbol, holdings_data):
""" Place an order to sell all holdings of a stock.
Args:
symbol(str): Symbol of the stock we want to sell
holdings_data(dict): dict obtained from get_modified_holdings() method
"""
shares_owned = int(float(holdings_data[symbol].get("quantity")))
if not debug:
r.order_sell_market(symbol, shares_owned)
print("####### Selling " + str(shares_owned) + " shares of " + symbol + " #######")
def buy_holdings(potential_buys, profile_data, holdings_data):
""" Places orders to buy holdings of stocks. This method will try to order
an appropriate amount of shares such that your holdings of the stock will
roughly match the average for the rest of your portfoilio. If the share
price is too high considering the rest of your holdings and the amount of
buying power in your account, it will not order any shares.
Args:
potential_buys(list): List of strings, the strings are the symbols of stocks we want to buy
symbol(str): Symbol of the stock we want to sell
holdings_data(dict): dict obtained from r.build_holdings() or get_modified_holdings() method
"""
cash = float(profile_data.get('cash'))
portfolio_value = float(profile_data.get('equity')) - cash
ideal_position_size = (safe_division(portfolio_value, len(holdings_data))+cash/len(potential_buys))/(2 * len(potential_buys))
prices = r.get_latest_price(potential_buys)
for i in range(0, len(potential_buys)):
stock_price = float(prices[i])
if(ideal_position_size < stock_price < ideal_position_size*1.5):
num_shares = int(ideal_position_size*1.5/stock_price)
elif (stock_price < ideal_position_size):
num_shares = int(ideal_position_size/stock_price)
else:
print("####### Tried buying shares of " + potential_buys[i] + ", but not enough buying power to do so#######")
break
print("####### Buying " + str(num_shares) + " shares of " + potential_buys[i] + " #######")
if not debug:
r.order_buy_market(potential_buys[i], num_shares)
def scan_stocks():
""" The main method. Sells stocks in your portfolio if their 50 day moving average crosses
below the 200 day, and buys stocks in your watchlist if the opposite happens.
###############################################################################################
WARNING: Comment out the sell_holdings and buy_holdings lines if you don't actually want to execute the trade.
###############################################################################################
If you sell a stock, this updates tradehistory.txt with information about the position,
how much you've earned/lost, etc.
"""
if debug:
print("----- DEBUG MODE -----\n")
print("----- Starting scan... -----\n")
register_matplotlib_converters()
spy_symbols = get_spy_symbols()
portfolio_symbols = get_portfolio_symbols()
holdings_data = get_modified_holdings()
potential_buys = []
sells = []
stock_data = []
print("Current Portfolio: " + str(portfolio_symbols) + "\n")
# print("Current Watchlist: " + str(watchlist_symbols) + "\n")
print("----- Scanning portfolio for stocks to sell -----\n")
print()
print("PORTFOLIO")
print("-------------------")
print()
print ("{}\t{}\t\t{}\t{}\t{}\t{}".format('SYMBOL', 'PRICE', 'RSI', 'MACD', 'RATING', 'EMA'))
print()
for symbol in portfolio_symbols:
cross, price = golden_cross(symbol, n1=50, n2=200, direction="below")
data = {'symbol': symbol, 'price': price, 'cross': cross, 'rsi': get_rsi(symbol=symbol, days=14), 'macd': get_macd(symbol=symbol), 'buy_rating': get_buy_rating(symbol=symbol)}
stock_data.append(data)
print ("{}\t${:.2f}\t\t{}\t{}\t{}\t{}".format(data['symbol'], data['price'], rsi_to_str(data['rsi']), macd_to_str(data['macd']), rating_to_str(data['buy_rating']), cross_to_str(data['cross'])))
if(cross == False):
sell_holdings(symbol, holdings_data)
sells.append(symbol)
profile_data = r.build_user_profile()
print("\n----- Scanning S&P 500 for stocks to buy -----\n")
for symbol in spy_symbols:
if(symbol not in portfolio_symbols):
cross, price = golden_cross(symbol, n1=50, n2=200, direction="above")
stock_data.append({'symbol': symbol, 'price': price, 'cross': cross, 'rsi': get_rsi(symbol=symbol, days=14), 'macd': get_macd(symbol=symbol), 'buy_rating': get_buy_rating(symbol=symbol)})
if(cross == True):
potential_buys.append(symbol)
if(len(potential_buys) > 0):
buy_holdings(potential_buys, profile_data, holdings_data)
if(len(sells) > 0):
update_trade_history(sells, holdings_data, "tradehistory.txt")
print("----- Scan over -----\n")
print_table(stock_data)
if debug:
print("----- DEBUG MODE -----\n")
#execute the scan
scan_stocks()
| 42.365672 | 201 | 0.656068 |
fd60adf005e921981d0393064770bc769120bb9d | 3,401 | py | Python | slivka/server/forms/file_proxy.py | warownia1/Slivca | 5491afec63c8cd41d6f1389a5dd0ba9877b888a1 | [
"Apache-2.0"
] | 5 | 2016-09-01T15:30:46.000Z | 2019-07-15T12:26:46.000Z | slivka/server/forms/file_proxy.py | warownia1/Slivca | 5491afec63c8cd41d6f1389a5dd0ba9877b888a1 | [
"Apache-2.0"
] | 75 | 2016-08-31T11:32:49.000Z | 2021-05-12T14:33:17.000Z | slivka/server/forms/file_proxy.py | warownia1/Slivca | 5491afec63c8cd41d6f1389a5dd0ba9877b888a1 | [
"Apache-2.0"
] | 3 | 2017-06-01T10:21:04.000Z | 2020-06-12T10:32:49.000Z | import io
import os
import shutil
from base64 import urlsafe_b64decode
from bson import ObjectId
from slivka.db.documents import UploadedFile, JobRequest
| 31.490741 | 76 | 0.612173 |
fd618c3a159e9f99d7c6ca6d044db4a500817e13 | 1,160 | py | Python | debug_toolbar/panels/profiling.py | chrismaille/fastapi-debug-toolbar | 76d1e78eda4a23fc2b3e3d3c978ee9d8dbf025ae | [
"BSD-3-Clause"
] | 36 | 2021-07-22T08:11:31.000Z | 2022-01-31T13:09:26.000Z | debug_toolbar/panels/profiling.py | chrismaille/fastapi-debug-toolbar | 76d1e78eda4a23fc2b3e3d3c978ee9d8dbf025ae | [
"BSD-3-Clause"
] | 10 | 2021-07-21T19:39:38.000Z | 2022-02-26T15:35:35.000Z | debug_toolbar/panels/profiling.py | chrismaille/fastapi-debug-toolbar | 76d1e78eda4a23fc2b3e3d3c978ee9d8dbf025ae | [
"BSD-3-Clause"
] | 2 | 2021-07-28T09:55:13.000Z | 2022-02-18T11:29:25.000Z | import typing as t
from fastapi import Request, Response
from pyinstrument import Profiler
from starlette.concurrency import run_in_threadpool
from debug_toolbar.panels import Panel
from debug_toolbar.types import Stats
from debug_toolbar.utils import is_coroutine, matched_endpoint
| 30.526316 | 82 | 0.70431 |
fd63367d2463bae216c32c0f3162ba07be04c060 | 3,003 | py | Python | test/system/auto/simple/compaction.py | marciosilva/accumulo | 70404cbd1e0a2d2b7c2235009e158979abeef35f | [
"Apache-2.0"
] | 3 | 2021-11-11T05:18:23.000Z | 2021-11-11T05:18:43.000Z | test/system/auto/simple/compaction.py | jatrost/accumulo | 6be40f2f3711aaa7d0b68b5b6852b79304af3cff | [
"Apache-2.0"
] | 1 | 2021-06-22T09:52:37.000Z | 2021-06-22T09:52:37.000Z | test/system/auto/simple/compaction.py | isabella232/accumulo-1 | 70404cbd1e0a2d2b7c2235009e158979abeef35f | [
"Apache-2.0"
] | 1 | 2021-06-22T09:33:38.000Z | 2021-06-22T09:33:38.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import logging
import unittest
from simple.bulk import SimpleBulkTest
N = 100000
COUNT = 5
log = logging.getLogger('test.auto')
| 33.741573 | 109 | 0.656011 |
fd651bee99ddf05837b752023ba0b7975d179d63 | 660 | py | Python | src/featurehub/problems/__init__.py | vishalbelsare/FeatureHub | dca517338b6e1359faa47ba309f05691cb96e8f8 | [
"MIT"
] | 85 | 2017-09-11T22:37:34.000Z | 2022-02-26T09:07:05.000Z | src/featurehub/problems/__init__.py | HDI-Project/FeatureFactory | dca517338b6e1359faa47ba309f05691cb96e8f8 | [
"MIT"
] | 5 | 2018-08-08T15:34:36.000Z | 2018-11-15T04:52:10.000Z | src/featurehub/problems/__init__.py | HDI-Project/FeatureFactory | dca517338b6e1359faa47ba309f05691cb96e8f8 | [
"MIT"
] | 18 | 2017-11-01T04:14:16.000Z | 2021-09-27T00:53:32.000Z | import imp
import sys
from sqlalchemy.exc import ProgrammingError
from featurehub.user.session import Session
from featurehub.admin.admin import Commands
try:
for _problem in Commands().get_problems():
# Create a session for each problem and make it importable
_commands = Session(_problem)
_module = imp.new_module(_problem)
_module.__dict__['commands'] = _commands
sys.modules['featurehub.problems.' + _problem] = _module
except ProgrammingError:
print("Competition not initialized properly. User commands "
"unavailable. Please contact the competition administrator.",
file=sys.stderr)
| 33 | 71 | 0.725758 |
b5b97c67425c6b42d928076e5a8d8cb8fc8a23c8 | 12,107 | py | Python | python/lexical_analysis.py | Compiler-Construction-Uni-Freiburg/lecture-notes-2021 | 56300e6649e32f0594bbbd046a2e19351c57dd0c | [
"BSD-3-Clause"
] | 1 | 2022-01-05T07:11:01.000Z | 2022-01-05T07:11:01.000Z | python/lexical_analysis.py | Compiler-Construction-Uni-Freiburg/lecture-notes-2021 | 56300e6649e32f0594bbbd046a2e19351c57dd0c | [
"BSD-3-Clause"
] | null | null | null | python/lexical_analysis.py | Compiler-Construction-Uni-Freiburg/lecture-notes-2021 | 56300e6649e32f0594bbbd046a2e19351c57dd0c | [
"BSD-3-Clause"
] | null | null | null | from dataclasses import dataclass
from functools import reduce
from typing import Callable, Iterable, Iterator
'''
The first phase of a compiler is called `lexical analysis` implemented by a `scanner` or `lexer`.
It breaks a program into a sequence `lexemes`:
meaningful substrings of the input.
It also transforms lexemes into `tokens`:
symbolic representations of lexemes with some internalized information.
The classic, state-of-the-art method to specify lexemes is by regular expressions.
'''
'''
1. Representation of regular expressions.
'''
## smart constructors for regular expressions
## goal: construct regexps in "normal form"
## * avoid Null() subexpressions
## * Epsilon() subexpressions as much as possible
## * nest concatenation and alternative to the right
null = Null()
epsilon = Epsilon()
symbol = Symbol
## utilities to construct regular expressions
def optional(r : Regexp) -> Regexp:
'construct r?'
return alternative(r, epsilon)
def repeat_one(r : Regexp) -> Regexp:
'construct r+'
return concat(r, repeat(r))
## a few examples for regular expressions (taken from JavaScript definition)
'''
digit ::= 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
hexdigit ::= digit | A | B | C | D | E | F | a | b | c | d | e | f
hexprefix ::= 0x | 0X
sign ::= empty | -
empty ::=
integer-literal ::= sign digit+ | sign hexprefix hexdigit+
letter ::= A | B | C | ...| Z | a | b | c | ...| z
identifier-start ::= letter | $ | _
identifier-part ::= identifier-start | digit
identifier ::= identifier-start identifier-part*
'''
def class_regexp(s: str) -> Regexp:
'returns a regexp for the alternative of all characters in s'
return alternative_list(map(symbol, s))
def string_regexp(s: str) -> Regexp:
'returns a regexp for the concatenation of all characters in s'
return concat_list(map(symbol, s))
digit = class_regexp("0123456789")
hexdigit = alternative(digit, class_regexp("ABCDEFabcdef"))
hexprefix = alternative(string_regexp("0x"), string_regexp("0X"))
sign = optional(symbol('-'))
integer_literal = concat(sign, repeat_one(digit))
integer_literal_js = alternative( concat(sign, repeat_one(digit)),
concat_list([sign, hexprefix, repeat_one(hexdigit)]))
lc_letter = alternative_list(map(symbol, map(chr, range(ord('a'), ord('z')+1))))
uc_letter = alternative_list(map(symbol, map(chr, range(ord('A'), ord('Z')+1))))
letter = alternative(lc_letter, uc_letter)
identifier_start = alternative_list([letter, symbol('$'), symbol('_')])
identifier_part = alternative(identifier_start, digit)
identifier = concat(identifier_start, repeat(identifier_part))
blank_characters = "\t "
line_end_characters = "\n\r"
white_space = repeat_one(class_regexp(blank_characters + line_end_characters))
'''
2. Executing regular expressions
The standard method to 'execute' regular expressions is to transform them into finite automata.
Here we use a different method to execute them directly using `derivatives`.
This method uses regular expressions themselves as states of an automaton without constructing it.
We consider a regexp a final state if it accepts the empty word "".
This condition can be checked by a simple function on the regexp.
'''
def accepts_empty(r : Regexp) -> bool:
'check if r accepts the empty word'
match r:
case Null() | Symbol(_):
return False
case Epsilon() | Repeat(_):
return True
case Concat(r1, r2):
return accepts_empty(r1) and accepts_empty(r2)
case Alternative(r1, r2):
return accepts_empty(r1) or accepts_empty(r2)
'''
The transition function of a (deterministic) finite automaton maps
state `r0` and symbol `s` to the next state, say, `r1`.
If the state `r0` recognizes any words `w` that start with `s` (w[0] == s),
then state `r1` recognizes all those words `w` with the first letter removed (w[1:]).
This construction is called the `derivative` of a language by symbol `s`:
derivative(L, s) = { w[1:] | w in L and w[0] == s }
If L is the language recognized by regular expression `r0`,
then we can effectively compute a regular expression for derivative(L, s)!
As follows:
'''
def after_symbol(s : str, r : Regexp) -> Regexp:
'produces regexp after r consumes symbol s'
match r:
case Null() | Epsilon():
return null
case Symbol(s_expected):
return epsilon if s == s_expected else null
case Alternative(r1, r2):
return alternative(after_symbol(s, r1), after_symbol(s, r2))
case Concat(r1, r2):
return alternative(concat(after_symbol(s, r1), r2),
after_symbol(s, r2) if accepts_empty(r1) else null)
case Repeat(r1):
return concat(after_symbol(s, r1), Repeat(r1))
## matching against a regular expression
########################################################################
'''
3. Lexer descriptions
A lexer (scanner) is different from a finite automaton in several aspects.
1. The lexer must classify the next lexeme from a choice of several regular expressions.
It cannot match a single regexp, but it has to keep track and manage matching for
several regexps at the same time.
2. The lexer follows the `maximum munch` rule, which says that the next lexeme is
the longest prefix that matches one of the regular expressions.
3. Once a lexeme is identified, the lexer must turn it into a token and attribute.
Re maximum munch consider this input:
ifoundsalvationinapubliclavatory
Suppose that `if` is a keyword, why should the lexer return <identifier> for this input?
Similarly:
returnSegment
would count as an identifier even though starting with the keyword `return`.
These requirements motivate the following definitions.
A lex_action
* takes some (s : str, i : int position in s, j : int pos in s)
* consumes the lexeme sitting at s[i:j]
* returns (token for s[i:j], some k >= j)
'''
Position = int # input position
lex_result = tuple[Token, Position]
lex_action = Callable[[str, Position, Position], lex_result]
# a lexer rule attaches a lex_action to a regular expression
# a lexer tries to match its input to a list of lex rules
Lex_state = list[Lex_rule]
# reading a symbol advances the regular expression of each lex rule
#####################################################################
def make_scanner(scan_one: Callable[[str, Position], lex_result], ss: str) -> Iterator[Token]:
i = 0
while i < len(ss):
(token, i) = scan_one(ss, i)
yield (token)
## example: excerpt from JavaScript scanner
escaped_char = concat(symbol('\\'), alternative(symbol('\\'), symbol('"')))
content_char = alternative_list([symbol(chr(a))
for a in range(ord(' '), 128)
if a not in [ord('\\'), ord('"')]])
string_literal = concat_list([symbol('"'), repeat(alternative(escaped_char, content_char)), symbol('"')])
string_spec: Lex_state = [
Lex_rule(escaped_char, lambda ss, i, j: (ss[i+1], j)),
Lex_rule(content_char, lambda ss, i, j: (ss[i], j))
]
string_token = Scan(string_spec).scan_one()
def strlit(ss: str) -> Strlit:
"use subsidiary scanner to transform string content"
return Strlit("".join(make_scanner(string_token, ss)))
js_spec: Lex_state = [
Lex_rule(string_regexp("return"), lambda ss, i, j: (Return(), j)),
Lex_rule(integer_literal, lambda ss, i, j: (Intlit(int(ss[i:j])), j)),
Lex_rule(identifier, lambda ss, i, j: (Ident(ss[i:j]), j)),
Lex_rule(white_space, lambda ss, i, j: js_token(ss, j)),
Lex_rule(symbol("("), lambda ss, i, j: (Lparen(), j)),
Lex_rule(symbol(")"), lambda ss, i, j: (Rparen(), j)),
Lex_rule(symbol("/"), lambda ss, i, j: (Slash(), j)),
Lex_rule(string_literal, lambda ss, i, j: (strlit(ss[i+1:j-1]), j))
]
js_token = Scan(js_spec).scan_one() | 32.810298 | 105 | 0.641943 |
b5ba7ba10498502b304fe0e8be303cfbec8a9050 | 179 | py | Python | tapiriik/web/views/dashboard.py | prohfesor/tapiriik | 0c476f8bb6b3d51674f0117b054777405ff2ee0d | [
"Apache-2.0"
] | 1,445 | 2015-01-01T21:43:31.000Z | 2022-03-17T13:40:23.000Z | tapiriik/web/views/dashboard.py | prohfesor/tapiriik | 0c476f8bb6b3d51674f0117b054777405ff2ee0d | [
"Apache-2.0"
] | 441 | 2015-01-02T03:37:49.000Z | 2022-03-31T18:18:03.000Z | tapiriik/web/views/dashboard.py | prohfesor/tapiriik | 0c476f8bb6b3d51674f0117b054777405ff2ee0d | [
"Apache-2.0"
] | 333 | 2015-01-06T12:14:15.000Z | 2022-03-27T19:58:48.000Z | from django.shortcuts import render
from django.views.decorators.csrf import ensure_csrf_cookie
| 22.375 | 59 | 0.810056 |
b5bab08cb20bbb7d8d1adf2e537dc3cd96869fbf | 2,830 | py | Python | rcommander/src/rcommander/trigger_tool.py | rummanwaqar/rcommander-core | 7106d5868db76c47dea6ad11118a54351a8bd390 | [
"BSD-3-Clause"
] | 4 | 2015-04-08T09:57:43.000Z | 2021-08-12T01:44:37.000Z | rcommander/src/rcommander/trigger_tool.py | jhu-lcsr-forks/rcommander-core | 1a0350e9b93687eff6a4407f72b5250be5f56919 | [
"BSD-3-Clause"
] | 1 | 2015-03-12T09:10:27.000Z | 2015-03-12T09:10:27.000Z | rcommander/src/rcommander/trigger_tool.py | jhu-lcsr-forks/rcommander-core | 1a0350e9b93687eff6a4407f72b5250be5f56919 | [
"BSD-3-Clause"
] | 3 | 2015-03-12T10:59:17.000Z | 2021-06-21T02:13:57.000Z | import tool_utils as tu
import PyQt4.QtGui as qtg
import PyQt4.QtCore as qtc
from PyQt4.QtGui import *
from PyQt4.QtCore import *
import smach
import rospy
from msg import Trigger
TRIGGER_TOPIC = 'trigger'
| 29.479167 | 99 | 0.639929 |
b5bace535ed77ddf8b3b03e2ed93c9e75ae9c3a6 | 1,488 | py | Python | tests/helpers/fake_tunnel.py | intdata-bsc/idact | 54cb65a711c145351e205970c27c83e6393cccf5 | [
"MIT"
] | 5 | 2018-12-06T15:40:34.000Z | 2019-06-19T11:22:58.000Z | tests/helpers/fake_tunnel.py | garstka/idact | b9c8405c94db362c4a51d6bfdf418b14f06f0da1 | [
"MIT"
] | 9 | 2018-12-06T16:35:26.000Z | 2019-04-28T19:01:40.000Z | tests/helpers/fake_tunnel.py | intdata-bsc/idact | 54cb65a711c145351e205970c27c83e6393cccf5 | [
"MIT"
] | 2 | 2019-04-28T19:18:58.000Z | 2019-06-17T06:56:28.000Z | from contextlib import contextmanager
from idact import ClusterConfig
from idact.detail.nodes.node_impl import NodeImpl
from idact.detail.tunnel.tunnel_internal import TunnelInternal
| 22.892308 | 68 | 0.625672 |
b5bbdd0d4bc19356fb3ff4442955d7c4c889b2e9 | 3,226 | py | Python | app/airtable/base_school_db/educators_schools.py | WildflowerSchools/wf-airtable-api | 963021e5108462d33efa222fedb00890e1788ad6 | [
"MIT"
] | null | null | null | app/airtable/base_school_db/educators_schools.py | WildflowerSchools/wf-airtable-api | 963021e5108462d33efa222fedb00890e1788ad6 | [
"MIT"
] | null | null | null | app/airtable/base_school_db/educators_schools.py | WildflowerSchools/wf-airtable-api | 963021e5108462d33efa222fedb00890e1788ad6 | [
"MIT"
] | null | null | null | from datetime import date
from typing import Optional, Union
from pydantic import BaseModel, Field, validator
from . import educators as educators_models
from . import schools as schools_models
from app.airtable.response import AirtableResponse
from app.airtable.validators import get_first_or_default_none
| 32.26 | 119 | 0.712027 |
b5bc0b82b561c3ccd0c214272db1e77e19243f08 | 4,003 | py | Python | rad/rest/client/cli/zpool/cmd_zpool_list.py | guillermomolina/rad-rest-client | c22528764bdf9dddc5ff7d269d7465d34878a7e3 | [
"Apache-2.0"
] | 1 | 2021-09-17T13:40:13.000Z | 2021-09-17T13:40:13.000Z | rad/rest/client/cli/zpool/cmd_zpool_list.py | guillermomolina/rad-rest-client | c22528764bdf9dddc5ff7d269d7465d34878a7e3 | [
"Apache-2.0"
] | null | null | null | rad/rest/client/cli/zpool/cmd_zpool_list.py | guillermomolina/rad-rest-client | c22528764bdf9dddc5ff7d269d7465d34878a7e3 | [
"Apache-2.0"
] | 1 | 2021-09-17T16:26:32.000Z | 2021-09-17T16:26:32.000Z | # Copyright 2021, Guillermo Adrin Molina
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import json
import yaml
from rad.rest.client.util import print_table, print_parsable
from rad.rest.client.api.authentication_1 import Session
from rad.rest.client.api.zfsmgr_1 import Zpool
from rad.rest.client.api.zfsmgr_1.zpool_resource import ZpoolResource
LOG = logging.getLogger(__name__)
| 43.043011 | 105 | 0.572321 |
b5bcf620df665e14fd0ade4b0917ffe41b1ea768 | 3,736 | py | Python | Sofware/main.py | Mark-MDO47/PiPod | 990042ff5ad69d9fc93d1bd5bd684db730156222 | [
"MIT"
] | 63 | 2018-08-02T20:50:41.000Z | 2022-03-02T02:42:48.000Z | Sofware/main.py | Mark-MDO47/PiPod | 990042ff5ad69d9fc93d1bd5bd684db730156222 | [
"MIT"
] | 2 | 2018-08-30T16:31:48.000Z | 2021-12-02T01:28:23.000Z | Sofware/main.py | Mark-MDO47/PiPod | 990042ff5ad69d9fc93d1bd5bd684db730156222 | [
"MIT"
] | 14 | 2018-08-05T04:45:07.000Z | 2022-02-18T10:56:20.000Z | #!/usr/bin/python3
import playback
import display
import navigation
import device
import pygame
done = False
music = playback.music()
view = display.view()
menu = navigation.menu()
PiPod = device.PiPod()
menu.loadMetadata()
status = PiPod.getStatus()
songMetadata = music.getStatus()
displayUpdate = pygame.USEREVENT + 1
pygame.time.set_timer(displayUpdate, 500)
view.update(status, menu.menuDict, songMetadata)
while not done:
music.loop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
PiPod.toggleSleep()
elif event.key == pygame.K_u:
music.volumeUp()
elif event.key == pygame.K_d:
music.volumeDown()
elif event.key == pygame.K_UP:
if status[2]:
music.volumeUp()
elif menu.menuDict["current"] == "musicController":
menu.gotomenu()
else:
action = menu.up()
elif event.key == pygame.K_DOWN:
if status[2]:
music.volumeDown()
elif menu.menuDict["current"] == "musicController":
music.shuffle()
menu.menuDict["Queue"] = music.playlist
else:
action = menu.down()
elif event.key == pygame.K_LEFT:
if status[2] or menu.menuDict["current"] == "musicController":
music.prev()
else:
action = menu.left()
elif event.key == pygame.K_RIGHT:
if status[2] or menu.menuDict["current"] == "musicController":
music.next()
else:
action = menu.right()
if action == "updateList":
music.updateList(menu.menuDict["Queue"])
elif event.key == pygame.K_RETURN:
if status[2] or menu.menuDict["current"] == "musicController":
music.playPause()
else:
action = menu.select()
if action == "play":
music.loadList(menu.menuDict["Queue"])
music.play()
elif action == "clearQueue":
menu.menuDict["Queue"] = []
music.clearQueue()
elif action == "updateLibrary":
if music.updateLibrary():
done = True
elif action == "toggleSleep":
PiPod.toggleSleep()
elif action == "shutdown":
while not PiPod.shutdown():
view.popUp("Shutdown")
elif action == "reboot":
while not PiPod.reboot():
view.popUp("Reboot")
elif action == "playAtIndex":
if menu.menuDict["selectedItem"] == 0:
music.clearQueue()
menu.menuDict["Queue"] = []
else:
music.playAtIndex(menu.menuDict["selectedItem"]-1)
status = PiPod.getStatus()
songMetadata = music.getStatus()
view.update(status, menu.menuDict, songMetadata)
# display.update() without arguments updates the entire display just like display.flip()
pygame.time.Clock().tick(
30) # Limit the framerate to 20 FPS, this is to ensure it doesn't use all of the CPU resources
| 34.592593 | 103 | 0.482869 |
b5bd68a22d06b0793abe8bb8a40789d31dac7150 | 3,250 | py | Python | cloudshell/cli/session/telnet_session.py | test-gh-org-workflow/probable-garbanzo | c6b8a0dbc573a2a0073b5ab7c8619c4d0baf7088 | [
"Apache-2.0"
] | 4 | 2017-01-31T14:05:19.000Z | 2019-04-10T16:35:44.000Z | cloudshell/cli/session/telnet_session.py | test-gh-org-workflow/probable-garbanzo | c6b8a0dbc573a2a0073b5ab7c8619c4d0baf7088 | [
"Apache-2.0"
] | 89 | 2016-05-25T14:17:38.000Z | 2022-03-17T13:09:59.000Z | cloudshell/cli/session/telnet_session.py | test-gh-org-workflow/probable-garbanzo | c6b8a0dbc573a2a0073b5ab7c8619c4d0baf7088 | [
"Apache-2.0"
] | 6 | 2016-07-21T12:24:10.000Z | 2022-02-21T06:33:18.000Z | import socket
import telnetlib
from collections import OrderedDict
from cloudshell.cli.session.connection_params import ConnectionParams
from cloudshell.cli.session.expect_session import ExpectSession
from cloudshell.cli.session.session_exceptions import (
SessionException,
SessionReadEmptyData,
SessionReadTimeout,
)
| 27.542373 | 88 | 0.614462 |
b5bdd10944be47a0eef70a2d5c3fc45fddcfaaf6 | 5,698 | py | Python | src/contentbase/auditor.py | ClinGen/clincoded | 5624c74546ce2a44eda00ee632a8de8c2099da10 | [
"MIT"
] | 30 | 2015-09-23T20:38:57.000Z | 2021-03-10T03:12:46.000Z | src/contentbase/auditor.py | ClinGen/clincoded | 5624c74546ce2a44eda00ee632a8de8c2099da10 | [
"MIT"
] | 2,132 | 2015-06-08T21:50:35.000Z | 2022-02-15T22:44:18.000Z | src/contentbase/auditor.py | ClinGen/clincoded | 5624c74546ce2a44eda00ee632a8de8c2099da10 | [
"MIT"
] | 10 | 2015-09-25T20:11:25.000Z | 2020-12-09T02:58:44.000Z | """ Cross-object data auditing
Schema validation allows for checking values within a single object.
We also need to perform higher order checking between linked objects.
"""
from past.builtins import basestring
import logging
import venusian
logger = logging.getLogger(__name__)
# Same as logging
_levelNames = {
0: 'NOTSET',
10: 'DEBUG',
20: 'INFO',
30: 'DCC_ACTION',
40: 'WARNING',
50: 'NOT_COMPLIANT',
60: 'ERROR',
'DEBUG': 10,
'ERROR': 60,
'INFO': 20,
'NOTSET': 0,
'WARNING': 40,
'NOT_COMPLIANT': 50,
'DCC_ACTION': 30,
}
# Imperative configuration
def add_audit_checker(config, checker, item_type, condition=None, frame='embedded'):
auditor = config.registry['auditor']
config.action(None, auditor.add_audit_checker,
(checker, item_type, condition, frame))
# Declarative configuration
def audit_checker(item_type, condition=None, frame='embedded'):
""" Register an audit checker
"""
return decorate
| 32.56 | 92 | 0.551071 |
b5be5d0470828bf2d8483755e027514f357777f6 | 1,694 | py | Python | PyTrinamic/ic/TMC2130/TMC2130.py | trinamic-AA/PyTrinamic | b054f4baae8eb6d3f5d2574cf69c232f66abb4ee | [
"MIT"
] | 37 | 2019-01-13T11:08:45.000Z | 2022-03-25T07:18:15.000Z | PyTrinamic/ic/TMC2130/TMC2130.py | trinamic-AA/PyTrinamic | b054f4baae8eb6d3f5d2574cf69c232f66abb4ee | [
"MIT"
] | 56 | 2019-02-25T02:48:27.000Z | 2022-03-31T08:45:34.000Z | PyTrinamic/ic/TMC2130/TMC2130.py | trinamic-AA/PyTrinamic | b054f4baae8eb6d3f5d2574cf69c232f66abb4ee | [
"MIT"
] | 26 | 2019-01-14T05:20:16.000Z | 2022-03-08T13:27:35.000Z | '''
Created on 14.10.2019
@author: JM
'''
from PyTrinamic.ic.TMC2130.TMC2130_register import TMC2130_register
from PyTrinamic.ic.TMC2130.TMC2130_register_variant import TMC2130_register_variant
from PyTrinamic.ic.TMC2130.TMC2130_fields import TMC2130_fields
from PyTrinamic.helpers import TMC_helpers
| 32.576923 | 183 | 0.707202 |
b5be6e33c1957ff7fe4d9e1d181d17faa43d7603 | 319 | py | Python | setup.py | space-cadet/tncontract | a5503951e218a91e9ba03e11c601b95b6bfcb72a | [
"MIT"
] | 39 | 2016-09-19T01:22:43.000Z | 2022-01-12T07:26:29.000Z | setup.py | space-cadet/tncontract | a5503951e218a91e9ba03e11c601b95b6bfcb72a | [
"MIT"
] | 9 | 2016-09-25T22:51:35.000Z | 2019-07-14T16:56:12.000Z | setup.py | space-cadet/tncontract | a5503951e218a91e9ba03e11c601b95b6bfcb72a | [
"MIT"
] | 12 | 2017-02-14T11:55:30.000Z | 2021-02-01T01:09:31.000Z | from setuptools import setup, find_packages
# Get version from tncontract/version.py
exec(open("tncontract/version.py").read())
setup(
name = "tncontract",
version = __version__,
packages = find_packages(),
author = "Andrew Darmawan",
license = "MIT",
install_requires = ["numpy", "scipy"],
)
| 22.785714 | 43 | 0.677116 |
b5bf153601a744508ecc99c7f24b1fb9627883ce | 150 | py | Python | exampleb.py | JFletcher94/tBot | 051281c81b5712f7ecdb4355b7ea7f6551dec7c7 | [
"MIT"
] | null | null | null | exampleb.py | JFletcher94/tBot | 051281c81b5712f7ecdb4355b7ea7f6551dec7c7 | [
"MIT"
] | null | null | null | exampleb.py | JFletcher94/tBot | 051281c81b5712f7ecdb4355b7ea7f6551dec7c7 | [
"MIT"
] | null | null | null | #exampleb generates a full tweet
#examplet only calls get_string()
def get_string():
'''generate full tweet text'''
return 'example #text'
| 18.75 | 34 | 0.7 |
b5bf895845b26e76fb4d05e08f9ee6d0b182cce7 | 37 | py | Python | reto_numeros_nones.py | Naxred/PensamientoComputacionalPython | a19fe394fd8b6265d486d432bbc5774d0cf35368 | [
"Unlicense"
] | null | null | null | reto_numeros_nones.py | Naxred/PensamientoComputacionalPython | a19fe394fd8b6265d486d432bbc5774d0cf35368 | [
"Unlicense"
] | null | null | null | reto_numeros_nones.py | Naxred/PensamientoComputacionalPython | a19fe394fd8b6265d486d432bbc5774d0cf35368 | [
"Unlicense"
] | null | null | null | for x in range(1,100,2):
print(x) | 18.5 | 24 | 0.594595 |
b5c11f56555074149df0acc7544e0c995e6baf54 | 3,213 | py | Python | gryphon/data_service/auditors/trades_volume_auditor.py | qiquanzhijia/gryphon | 7bb2c646e638212bd1352feb1b5d21536a5b918d | [
"Apache-2.0"
] | 1,109 | 2019-06-20T19:23:27.000Z | 2022-03-20T14:03:43.000Z | gryphon/data_service/auditors/trades_volume_auditor.py | qiquanzhijia/gryphon | 7bb2c646e638212bd1352feb1b5d21536a5b918d | [
"Apache-2.0"
] | 63 | 2019-06-21T05:36:17.000Z | 2021-05-26T21:08:15.000Z | gryphon/data_service/auditors/trades_volume_auditor.py | qiquanzhijia/gryphon | 7bb2c646e638212bd1352feb1b5d21536a5b918d | [
"Apache-2.0"
] | 181 | 2019-06-20T19:42:05.000Z | 2022-03-21T13:05:13.000Z | # -*- coding: utf-8 -*-
from datetime import timedelta
from delorean import Delorean
from sqlalchemy import and_
from twisted.internet import defer
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from twisted.internet.task import LoopingCall
from twisted.python import log
from gryphon.data_service.auditors.auditor import Auditor
import gryphon.data_service.util as util
from gryphon.lib.models.emeraldhavoc.base import EmeraldHavocBase
from gryphon.lib.twistedbitcoinwisdom import TwistedBitcoinWisdom
metadata = EmeraldHavocBase.metadata
trades = metadata.tables['trade']
EXCHANGES = ['KRAKEN', 'BITSTAMP', 'BITFINEX', 'CAVIRTEX', 'VAULTOFSATOSHI']
| 32.13 | 91 | 0.591659 |
b5c68bd329a9e17d20d1b6dbb51e72b824cb0447 | 370 | py | Python | shablbot/__main__.py | Blackgard/vk-bot-python | 5d1eb269d76567a8e31dec47c0ea3c5cc1bcbc3c | [
"MIT"
] | 5 | 2019-11-12T05:15:07.000Z | 2022-01-20T06:26:55.000Z | shablbot/__main__.py | Blackgard/vk-bot-python | 5d1eb269d76567a8e31dec47c0ea3c5cc1bcbc3c | [
"MIT"
] | 1 | 2021-06-02T00:33:47.000Z | 2021-06-02T00:33:47.000Z | shablbot/__main__.py | Blackgard/vk-bot-python | 5d1eb269d76567a8e31dec47c0ea3c5cc1bcbc3c | [
"MIT"
] | 2 | 2021-12-18T17:03:10.000Z | 2022-01-29T17:08:35.000Z | """
Shablbot manager commands
"""
import sys
if __name__ == '__main__':
main()
| 18.5 | 86 | 0.654054 |
b5c6fd5f851fb29a3b78c943cdd438c87f4e64cf | 726 | py | Python | Patterns_Custom_Row-Column_PYTHON.py | sukantadas194/Patters.Python | 14e62b61defca2e1f192f6ac8b1484c0a9745cfb | [
"BSD-2-Clause"
] | null | null | null | Patterns_Custom_Row-Column_PYTHON.py | sukantadas194/Patters.Python | 14e62b61defca2e1f192f6ac8b1484c0a9745cfb | [
"BSD-2-Clause"
] | null | null | null | Patterns_Custom_Row-Column_PYTHON.py | sukantadas194/Patters.Python | 14e62b61defca2e1f192f6ac8b1484c0a9745cfb | [
"BSD-2-Clause"
] | null | null | null |
#Print Custom Row-Column Patterns..
#e.g. '''@ @ @ @
# @ @ @ @
# @ @ @ @'''
w = print("What do you want to print?")
wa = str(input("Answer : "))
try:
m1 = print("How many rows do you want to print?")
n1 = int(input("Answer : "))
m2 = print("How many columns do you want to print?")
n2 = int(input("Answer : "))
if n1 <= 0 or n2 <= 0:
print("Wrong Input")
print("Input should be positive & greater than '0'")
print("Start over again..")
for k in range(n1):
for i in range(n2):
print(wa, end=" ")
print()
except:
print("Wrong Input")
print("Only numbers are accepted")
print("Start over again..") | 26.888889 | 61 | 0.506887 |
b5c737a61480861fc56685c1832b7805d5bbd65b | 17,116 | py | Python | app/cluegame.py | dabreese00/clue-solver | 3b99778075882974459e1c75792e7d051b6fe20a | [
"MIT"
] | null | null | null | app/cluegame.py | dabreese00/clue-solver | 3b99778075882974459e1c75792e7d051b6fe20a | [
"MIT"
] | null | null | null | app/cluegame.py | dabreese00/clue-solver | 3b99778075882974459e1c75792e7d051b6fe20a | [
"MIT"
] | null | null | null | """cluegame.py -- Classes to track Clue game events and make inferences
The board game Clue is also known as Cluedo. This module contains classes that
make it possible to record specific knowledge-generating events that a Clue
player may observe during the course of a game (such as, for example, that
Player A showed Player B one of either Card X, Card Y, or Card Z).
More to the point, Clue is a game about building knowledge through logical
inference. As such, these classes are designed to track not only these events
themselves, but also the sum total of game knowledge that can be logically
inferred from them.
Classes:
ClueCardType -- an Enum of possible card types in the game
ClueRelationType -- an Enum of possible types of Player-Card relation
Player -- a player in the Clue game
Card -- a card in the Clue game
ClueRelation -- an individual Player-Card relation that is known
Game -- a tracker and inference engine for total game knowledge
Functions:
normalize_to_list -- matches an object (or its name) to a list of objects
"""
from app.objectfilter import ObjectFilter
import enum
import pickle
import os
import collections
Player = collections.namedtuple('Player', 'name hand_size')
Player.__doc__ += ': A player in the Clue game'
Player.name.__doc__ = 'A name by which this player is identified'
Player.hand_size.__doc__ = 'Number of cards in hand of this player'
Card = collections.namedtuple('Card', 'name card_type')
Card.__doc__ += ': A card in the Clue game'
Card.name.__doc__ = 'A name by which this card is identified'
Card.card_type.__doc__ = 'Which ClueCardType this card belongs to'
def normalize_to_list(obj, lst):
"""Returns a matching member of a Player or Card list, if possible.
Assumes names of objects in the list are unique, for match by name.
Arguments:
obj -- a Player, Card, or a name (string) representing one
lst -- a list of Players or Cards
Returns:
a Player or Card from the list, matching obj
"""
if obj in lst:
return obj
try:
my_obj = next(o for o in lst if o.name == obj)
except(StopIteration):
raise ValueError("No such Player/Card {} in list {}".format(
obj, lst))
return my_obj
Game.load = classmethod(Game.load)
Game.delete = classmethod(Game.delete)
| 37.535088 | 79 | 0.62649 |
b5cdf29e6b6b8257a8b1c9b388ba9bf3693defbc | 726 | py | Python | config.py | adesolagbenga0052/web-app | c6d6ca3f998897986ac25a1e93477af0a8bfacf6 | [
"Apache-2.0"
] | null | null | null | config.py | adesolagbenga0052/web-app | c6d6ca3f998897986ac25a1e93477af0a8bfacf6 | [
"Apache-2.0"
] | null | null | null | config.py | adesolagbenga0052/web-app | c6d6ca3f998897986ac25a1e93477af0a8bfacf6 | [
"Apache-2.0"
] | null | null | null | """Flask configuration."""
from os import environ, path
basedir = path.abspath(path.dirname(__file__))
| 27.923077 | 106 | 0.698347 |
b5cff4fdd46f8909e02bbf2707f338423530762f | 691 | py | Python | tests/test_metrics.py | tolmanam/python-nomad-alt | f93d3f6553cdb1ee16dadabd385208b5cc550024 | [
"MIT"
] | null | null | null | tests/test_metrics.py | tolmanam/python-nomad-alt | f93d3f6553cdb1ee16dadabd385208b5cc550024 | [
"MIT"
] | null | null | null | tests/test_metrics.py | tolmanam/python-nomad-alt | f93d3f6553cdb1ee16dadabd385208b5cc550024 | [
"MIT"
] | null | null | null | from nomad_alt import Nomad
import json
import uuid
from pprint import pformat
import os
import pytest
import nomad_alt.exceptions
import tests.common as common
| 24.678571 | 97 | 0.732272 |
b5d0213de62ed3ea48e3a10bf0cc5d6b41c2e553 | 5,979 | py | Python | djproject/pictureupload/views.py | missingDown/webForUpload | fbd5ed9e8cfcd4ad906913f4a31c24e87919f9a3 | [
"MIT"
] | null | null | null | djproject/pictureupload/views.py | missingDown/webForUpload | fbd5ed9e8cfcd4ad906913f4a31c24e87919f9a3 | [
"MIT"
] | null | null | null | djproject/pictureupload/views.py | missingDown/webForUpload | fbd5ed9e8cfcd4ad906913f4a31c24e87919f9a3 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.http import HttpResponse
import logging
import json
import base64
import time
# Create your views here.
logger = logging.getLogger(__name__)
# form-data/Multipart POST
# BodyJson POST
#
# def writeLog(text):
# with open('/mnt/testlog.txt', 'a+') as fp:
# fp.write(text+'\n')
# form-data/Multipart PUT | 38.082803 | 85 | 0.632882 |
b5d05429ba74cb0c817d19e6c37641ec569991cf | 2,191 | py | Python | logparser/logs/logs.py | rkorte/rticonnextdds-logparser | e8d0446c8d1318e68886a58e95c3f1ba4a1fa455 | [
"Apache-2.0"
] | 11 | 2016-06-28T13:26:01.000Z | 2021-06-07T09:18:32.000Z | logparser/logs/logs.py | rkorte/rticonnextdds-logparser | e8d0446c8d1318e68886a58e95c3f1ba4a1fa455 | [
"Apache-2.0"
] | 27 | 2016-10-26T19:57:16.000Z | 2019-04-12T16:48:11.000Z | logparser/logs/logs.py | rkorte/rticonnextdds-logparser | e8d0446c8d1318e68886a58e95c3f1ba4a1fa455 | [
"Apache-2.0"
] | 7 | 2016-08-28T17:24:15.000Z | 2021-12-10T11:28:20.000Z | # Log Parser for RTI Connext.
#
# Copyright 2016 Real-Time Innovations, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create the global list of regular expressions and functions.
Functions:
+ add_regex: Compile the regex and add it to the list.
+ create_regex_list: Create the list of regular expressions and functions.
"""
from __future__ import absolute_import
import re
from logparser.logs.custom.logs import get_regex_list as custom_regex
from logparser.logs.debug.logs import get_regex_list as debug_regex
from logparser.logs.events.logs import get_regex_list as events_regex
from logparser.logs.micro.logs import get_regex_list as micro_regex
from logparser.logs.micro.micro import init as init_micro
from logparser.logs.network.logs import get_regex_list as network_regex
from logparser.logs.routing.logs import get_regex_list as routing_regex
def add_regex(log_list, method, regex):
"""Compile the regex and add it to the list."""
log_list.append((method, re.compile(regex)))
def create_regex_list(state):
"""Create the list of regular expressions and functions."""
init_micro(state)
# pylint: disable=W0106
expressions = []
[add_regex(expressions, expr[0], expr[1]) for expr in micro_regex()]
[add_regex(expressions, expr[0], expr[1]) for expr in network_regex()]
[add_regex(expressions, expr[0], expr[1]) for expr in events_regex()]
[add_regex(expressions, expr[0], expr[1]) for expr in routing_regex()]
[add_regex(expressions, expr[0], expr[1]) for expr in custom_regex()]
if state['debug']:
[add_regex(expressions, expr[0], expr[1]) for expr in debug_regex()]
return expressions
| 40.574074 | 76 | 0.748517 |
b5d0c034f7242aa14fa3baca13d703e86f187f17 | 276 | py | Python | torrents/tests/test_file.py | noahgoldman/torwiz | 213be5cf3b62d2c18c09e2fe4b869c549c263f32 | [
"MIT"
] | 1 | 2015-03-09T01:58:23.000Z | 2015-03-09T01:58:23.000Z | torrents/tests/test_file.py | noahgoldman/torwiz | 213be5cf3b62d2c18c09e2fe4b869c549c263f32 | [
"MIT"
] | 3 | 2015-04-01T22:49:58.000Z | 2015-05-01T19:09:11.000Z | torrents/tests/test_file.py | noahgoldman/torwiz | 213be5cf3b62d2c18c09e2fe4b869c549c263f32 | [
"MIT"
] | null | null | null | from bson.objectid import ObjectId
from torrents.file import TorrentFile
| 25.090909 | 82 | 0.695652 |
b5d13876f65729d4efb83ad2b61955efd49a0d23 | 2,444 | py | Python | google/cloud/storage/benchmarks/storage_throughput_plots.py | millerantonio810/google-cloud-cpp | 71582d922bc22b0dcbc58234f36c726ea3b7c171 | [
"Apache-2.0"
] | 1 | 2021-01-16T02:43:50.000Z | 2021-01-16T02:43:50.000Z | google/cloud/storage/benchmarks/storage_throughput_plots.py | millerantonio810/google-cloud-cpp | 71582d922bc22b0dcbc58234f36c726ea3b7c171 | [
"Apache-2.0"
] | null | null | null | google/cloud/storage/benchmarks/storage_throughput_plots.py | millerantonio810/google-cloud-cpp | 71582d922bc22b0dcbc58234f36c726ea3b7c171 | [
"Apache-2.0"
] | 1 | 2020-05-09T20:12:05.000Z | 2020-05-09T20:12:05.000Z | #!/usr/bin/env python3
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Summarize the results from running storage_throughput_benchmark."""
# %%
import argparse
import pandas as pd
import plotnine as p9
from scipy.stats import mannwhitneyu
# %%
pd.set_option("precision", 2)
# %%
def load_benchmark_output(file):
"""Loads the output generated by storage_throughput_benchmark."""
df = pd.read_csv(file, comment="#", names=["Op", "Api", "Bytes", "ElapsedMs"])
df["MiB"] = df.Bytes / 1024 / 1024
df["MiBs"] = df.MiB * 1000 / df.ElapsedMs
return df
# %%
# %%
parser = argparse.ArgumentParser()
parser.add_argument(
"--input-file",
type=argparse.FileType("r"),
required=True,
help="the benchmark output file to load",
)
parser.add_argument(
"--output-file", type=str, required=True, help="the name for the output plot"
)
args = parser.parse_args()
# %%
data = load_benchmark_output(args.input_file)
# %%
print(data.head())
# %%
print(data.describe())
# %%
(
p9.ggplot(
data=data[(data.Op != "CREATE") & (data.Op != "DELETE")],
mapping=p9.aes(x="Op", y="MiBs", color="Api"),
)
+ p9.facet_wrap(facets="Op", labeller="label_both", scales="free")
+ p9.geom_boxplot()
).save(args.output_file)
# %%
compare_api(data, "READ")
compare_api(data, "WRITE")
| 26.857143 | 86 | 0.657529 |
b5d20cd2d199da5465fadfa36c7ff94c0bda75f4 | 711 | py | Python | djAidESILV/products/migrations/0002_auto_20200522_1921.py | Kulumbaf/AidESILV | 04dad828048edffdd3662b24c415edce22fd3ea3 | [
"MIT"
] | null | null | null | djAidESILV/products/migrations/0002_auto_20200522_1921.py | Kulumbaf/AidESILV | 04dad828048edffdd3662b24c415edce22fd3ea3 | [
"MIT"
] | null | null | null | djAidESILV/products/migrations/0002_auto_20200522_1921.py | Kulumbaf/AidESILV | 04dad828048edffdd3662b24c415edce22fd3ea3 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.12 on 2020-05-22 19:21
from django.db import migrations, models
| 29.625 | 186 | 0.590717 |
b5d2438e72ede4149becee229525d2ab304971e9 | 939 | py | Python | vietocr/train.py | lzmisscc/vietocr | df0d9a53e714d08d6b0b4ee52ab46fbc0b991bf3 | [
"Apache-2.0"
] | null | null | null | vietocr/train.py | lzmisscc/vietocr | df0d9a53e714d08d6b0b4ee52ab46fbc0b991bf3 | [
"Apache-2.0"
] | null | null | null | vietocr/train.py | lzmisscc/vietocr | df0d9a53e714d08d6b0b4ee52ab46fbc0b991bf3 | [
"Apache-2.0"
] | null | null | null | import argparse
import logging
from vietocr.model.trainer import Trainer
from vietocr.tool.config import Cfg
import sys
sys.path.insert(0, './')
from char import character
logging.basicConfig(level=logging.INFO, )
if __name__ == '__main__':
main()
| 28.454545 | 68 | 0.707135 |
b5d34bad89b324ae2f55b466eea757d21d9ed3d6 | 363 | py | Python | django_eveonline_connector/migrations/0018_remove_evetoken_primary.py | KryptedGaming/django-eveonline-connector | 95fa146f4fcdf6bce84548b5cac1e5bf09cd72a0 | [
"MIT"
] | 3 | 2020-03-07T13:58:45.000Z | 2021-02-06T20:16:50.000Z | django_eveonline_connector/migrations/0018_remove_evetoken_primary.py | KryptedGaming/django-eveonline-connector | 95fa146f4fcdf6bce84548b5cac1e5bf09cd72a0 | [
"MIT"
] | 66 | 2019-12-17T20:54:22.000Z | 2021-06-10T20:39:04.000Z | django_eveonline_connector/migrations/0018_remove_evetoken_primary.py | KryptedGaming/django-eveonline-connector | 95fa146f4fcdf6bce84548b5cac1e5bf09cd72a0 | [
"MIT"
] | 2 | 2020-01-17T20:04:52.000Z | 2021-07-11T22:11:42.000Z | # Generated by Django 2.2.10 on 2020-02-27 18:21
from django.db import migrations
| 20.166667 | 78 | 0.630854 |
b5d4e05a5e5fe08d9de941f7f2c1980a53f27d2a | 598 | py | Python | Plug-and-play module/SematicEmbbedBlock.py | riciche/SimpleCVReproduction | 4075de39f9c61f1359668a413f6a5d98903fcf97 | [
"Apache-2.0"
] | 923 | 2020-01-11T06:36:53.000Z | 2022-03-31T00:26:57.000Z | Plug-and-play module/SematicEmbbedBlock.py | riciche/SimpleCVReproduction | 4075de39f9c61f1359668a413f6a5d98903fcf97 | [
"Apache-2.0"
] | 25 | 2020-02-27T08:35:46.000Z | 2022-01-25T08:54:19.000Z | Plug-and-play module/SematicEmbbedBlock.py | riciche/SimpleCVReproduction | 4075de39f9c61f1359668a413f6a5d98903fcf97 | [
"Apache-2.0"
] | 262 | 2020-01-02T02:19:40.000Z | 2022-03-23T04:56:16.000Z | import torch.nn as nn
"""
https://zhuanlan.zhihu.com/p/76378871
arxiv: 1804.03821
ExFuse
""" | 29.9 | 67 | 0.688963 |
b5d543bdac2737ffc8b7efa718d7cd3c1a92a7cd | 1,539 | py | Python | utilities.py | Fredrik-Oberg/volkswagencarnet | 877123f4053c66d11d1f99abfc1dc4bbc74effde | [
"MIT"
] | null | null | null | utilities.py | Fredrik-Oberg/volkswagencarnet | 877123f4053c66d11d1f99abfc1dc4bbc74effde | [
"MIT"
] | null | null | null | utilities.py | Fredrik-Oberg/volkswagencarnet | 877123f4053c66d11d1f99abfc1dc4bbc74effde | [
"MIT"
] | null | null | null | from datetime import date, datetime
from base64 import b64encode
from string import ascii_letters as letters, digits
from sys import argv
from os import environ as env
from os.path import join, dirname, expanduser
from itertools import product
import json
import logging
import re
_LOGGER = logging.getLogger(__name__)
def find_path(src, path):
"""Simple navigation of a hierarchical dict structure using XPATH-like syntax.
>>> find_path(dict(a=1), 'a')
1
>>> find_path(dict(a=1), '')
{'a': 1}
>>> find_path(dict(a=None), 'a')
>>> find_path(dict(a=1), 'b')
Traceback (most recent call last):
...
KeyError: 'b'
>>> find_path(dict(a=dict(b=1)), 'a.b')
1
>>> find_path(dict(a=dict(b=1)), 'a')
{'b': 1}
>>> find_path(dict(a=dict(b=1)), 'a.c')
Traceback (most recent call last):
...
KeyError: 'c'
"""
if not path:
return src
if isinstance(path, str):
path = path.split(".")
return find_path(src[path[0]], path[1:])
def is_valid_path(src, path):
"""
>>> is_valid_path(dict(a=1), 'a')
True
>>> is_valid_path(dict(a=1), '')
True
>>> is_valid_path(dict(a=1), None)
True
>>> is_valid_path(dict(a=1), 'b')
False
"""
try:
find_path(src, path)
return True
except KeyError:
return False
def camel2slug(s):
"""Convert camelCase to camel_case.
>>> camel2slug('fooBar')
'foo_bar'
"""
return re.sub("([A-Z])", "_\\1", s).lower().lstrip("_")
| 19.481013 | 82 | 0.587394 |
b5d85732ed11a9abee1adac3c37bfb5f5d7fe0c2 | 9,874 | py | Python | nslsii/__init__.py | ke-zhang-rd/nslsii | d3f942cda8eac713ac625dbcf4285e108c04f154 | [
"BSD-3-Clause"
] | null | null | null | nslsii/__init__.py | ke-zhang-rd/nslsii | d3f942cda8eac713ac625dbcf4285e108c04f154 | [
"BSD-3-Clause"
] | null | null | null | nslsii/__init__.py | ke-zhang-rd/nslsii | d3f942cda8eac713ac625dbcf4285e108c04f154 | [
"BSD-3-Clause"
] | null | null | null | from IPython import get_ipython
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
def configure_base(user_ns, broker_name, *,
bec=True, epics_context=False, magics=True, mpl=True,
ophyd_logging=True, pbar=True):
"""
Perform base setup and instantiation of important objects.
This factory function instantiates essential objects to data collection
environments at NSLS-II and adds them to the current namespace. In some
cases (documented below), it will check whether certain variables already
exist in the user name space, and will avoid creating them if so. The
following are added:
* ``RE`` -- a RunEngine
This is created only if an ``RE`` instance does not currently exist in
the namespace.
* ``db`` -- a Broker (from "databroker"), subscribe to ``RE``
* ``bec`` -- a BestEffortCallback, subscribed to ``RE``
* ``peaks`` -- an alias for ``bec.peaks``
* ``sd`` -- a SupplementalData preprocessor, added to ``RE.preprocessors``
* ``pbar_maanger`` -- a ProgressBarManager, set as the ``RE.waiting_hook``
And it performs some low-level configuration:
* creates a context in ophyd's control layer (``ophyd.setup_ophyd()``)
* turns out interactive plotting (``matplotlib.pyplot.ion()``)
* bridges the RunEngine and Qt event loops
(``bluesky.utils.install_kicker()``)
* logs ERROR-level log message from ophyd to the standard out
Parameters
----------
user_ns: dict
a namespace --- for example, ``get_ipython().user_ns``
broker_name : Union[str, Broker]
Name of databroker configuration or a Broker instance.
bec : boolean, optional
True by default. Set False to skip BestEffortCallback.
epics_context : boolean, optional
True by default. Set False to skip ``setup_ophyd()``.
magics : boolean, optional
True by default. Set False to skip registration of custom IPython
magics.
mpl : boolean, optional
True by default. Set False to skip matplotlib ``ion()`` at event-loop
bridging.
ophyd_logging : boolean, optional
True by default. Set False to skip ERROR-level log configuration for
ophyd.
pbar : boolean, optional
True by default. Set false to skip ProgressBarManager.
Returns
-------
names : list
list of names added to the namespace
Examples
--------
Configure IPython for CHX.
>>>> configure_base(get_ipython().user_ns, 'chx');
"""
ns = {} # We will update user_ns with this at the end.
# Set up a RunEngine and use metadata backed by a sqlite file.
from bluesky import RunEngine
from bluesky.utils import get_history
# if RunEngine already defined grab it
# useful when users make their own custom RunEngine
if 'RE' in user_ns:
RE = user_ns['RE']
else:
RE = RunEngine(get_history())
ns['RE'] = RE
# Set up SupplementalData.
# (This is a no-op until devices are added to it,
# so there is no need to provide a 'skip_sd' switch.)
from bluesky import SupplementalData
sd = SupplementalData()
RE.preprocessors.append(sd)
ns['sd'] = sd
if isinstance(broker_name, str):
# Set up a Broker.
from databroker import Broker
db = Broker.named(broker_name)
ns['db'] = db
else:
db = broker_name
RE.subscribe(db.insert)
if pbar:
# Add a progress bar.
from bluesky.utils import ProgressBarManager
pbar_manager = ProgressBarManager()
RE.waiting_hook = pbar_manager
ns['pbar_manager'] = pbar_manager
if magics:
# Register bluesky IPython magics.
from bluesky.magics import BlueskyMagics
get_ipython().register_magics(BlueskyMagics)
if bec:
# Set up the BestEffortCallback.
from bluesky.callbacks.best_effort import BestEffortCallback
_bec = BestEffortCallback()
RE.subscribe(_bec)
ns['bec'] = _bec
ns['peaks'] = _bec.peaks # just as alias for less typing
if mpl:
# Import matplotlib and put it in interactive mode.
import matplotlib.pyplot as plt
ns['plt'] = plt
plt.ion()
# Make plots update live while scans run.
from bluesky.utils import install_kicker
install_kicker()
if epics_context:
# Create a context in the underlying EPICS client.
from ophyd import setup_ophyd
setup_ophyd()
if not ophyd_logging:
# Turn on error-level logging, particularly useful for knowing when
# pyepics callbacks fail.
import logging
import ophyd.ophydobj
ch = logging.StreamHandler()
ch.setLevel(logging.ERROR)
ophyd.ophydobj.logger.addHandler(ch)
# convenience imports
# some of the * imports are for 'back-compatibility' of a sort -- we have
# taught BL staff to expect LiveTable and LivePlot etc. to be in their
# namespace
import numpy as np
ns['np'] = np
import bluesky.callbacks
ns['bc'] = bluesky.callbacks
import_star(bluesky.callbacks, ns)
import bluesky.plans
ns['bp'] = bluesky.plans
import_star(bluesky.plans, ns)
import bluesky.plan_stubs
ns['bps'] = bluesky.plan_stubs
import_star(bluesky.plan_stubs, ns)
# special-case the commonly-used mv / mvr and its aliases mov / movr4
ns['mv'] = bluesky.plan_stubs.mv
ns['mvr'] = bluesky.plan_stubs.mvr
ns['mov'] = bluesky.plan_stubs.mov
ns['movr'] = bluesky.plan_stubs.movr
import bluesky.preprocessors
ns['bpp'] = bluesky.preprocessors
import bluesky.callbacks.broker
import_star(bluesky.callbacks.broker, ns)
import bluesky.simulators
import_star(bluesky.simulators, ns)
user_ns.update(ns)
return list(ns)
def configure_olog(user_ns, *, callback=None, subscribe=True):
"""
Setup a callback that publishes some metadata from the RunEngine to Olog.
Also, add the public contents of pyOlog.ophyd_tools to the namespace.
This is expected to be run after :func:`configure_base`. It expects to find
an instance of RunEngine named ``RE`` in the user namespace. Additionally,
if the user namespace contains the name ``logbook``, that is expected to be
an instance ``pyOlog.SimpleOlogClient``.
Parameters
----------
user_ns: dict
a namespace --- for example, ``get_ipython().user_ns``
callback : callable, optional
a hook for customizing the logbook_cb_factory; if None a default is
used
subscribe : boolean, optional
True by default. Set to False to skip the subscription. (You still get
pyOlog.ophyd_tools.)
Returns
-------
names : list
list of names added to the namespace
Examples
--------
Configure the Olog.
>>>> configure_olog(get_ipython().user_ns);
"""
# Conceptually our task is simple: add a subscription to the RunEngine that
# publishes to the Olog using the Python wrapper of its REST API, pyOlog.
# In practice this is messy because we have deal with the many-layered API
# of pyOlog and, more importantly, ensure that slowness or errors from the
# Olog do not affect the run. Historically the Olog deployment has not been
# reliable, so it is important to be robust against these issues. Of
# course, by ignoring Olog errors, we leave gaps in the log, which is not
# great, but since all data is saved to a databroker anyway, we can always
# re-generate them later.
ns = {} # We will update user_ns with this at the end.
from bluesky.callbacks.olog import logbook_cb_factory
from functools import partial
from pyOlog import SimpleOlogClient
import queue
import threading
from warnings import warn
# This is for pyOlog.ophyd_tools.get_logbook, which simply looks for
# a variable called 'logbook' in the global IPython namespace.
if 'logbook' in user_ns:
simple_olog_client = user_ns['logbook']
else:
simple_olog_client = SimpleOlogClient()
ns['logbook'] = simple_olog_client
if subscribe:
if callback is None:
# list of logbook names to publish to
LOGBOOKS = ('Data Acquisition',)
generic_logbook_func = simple_olog_client.log
configured_logbook_func = partial(generic_logbook_func,
logbooks=LOGBOOKS)
callback = logbook_cb_factory(configured_logbook_func)
olog_queue = queue.Queue(maxsize=100)
olog_thread = threading.Thread(target=submit_to_olog,
args=(olog_queue, callback),
daemon=True)
olog_thread.start()
RE = user_ns['RE']
RE.subscribe(send_to_olog_queue, 'start')
import pyOlog.ophyd_tools
import_star(pyOlog.ophyd_tools, ns)
user_ns.update(ns)
return list(ns)
| 34.404181 | 79 | 0.645331 |
b5d8aec11bfc5cc12bac4a3e909d08cecced6658 | 6,260 | py | Python | utils/depot.py | Nikronic/Optimized-MDVRP | 92587bf4c110c7e6597cc3120dd0556a6e170ce2 | [
"MIT"
] | 16 | 2019-09-08T13:04:10.000Z | 2022-03-04T06:52:34.000Z | utils/depot.py | zhangruijuan/Optimized-MDVRP | 92587bf4c110c7e6597cc3120dd0556a6e170ce2 | [
"MIT"
] | 6 | 2019-09-19T20:38:19.000Z | 2019-10-14T17:35:54.000Z | utils/depot.py | Nikronic/Optimized-MDVRP | 92587bf4c110c7e6597cc3120dd0556a6e170ce2 | [
"MIT"
] | 4 | 2021-01-15T11:45:16.000Z | 2021-12-18T14:14:54.000Z | import numpy as np
from typing import List
from copy import deepcopy
from utils.customer import Customer
| 35.367232 | 118 | 0.585304 |
b5d915f6cc267b773bbe24b2332fae333a3982c5 | 714 | py | Python | fake.py | Wsky51/dfs-node-restapi | bab7605c609d4b53cd11686a576b74c1ae2871b7 | [
"Apache-2.0"
] | null | null | null | fake.py | Wsky51/dfs-node-restapi | bab7605c609d4b53cd11686a576b74c1ae2871b7 | [
"Apache-2.0"
] | null | null | null | fake.py | Wsky51/dfs-node-restapi | bab7605c609d4b53cd11686a576b74c1ae2871b7 | [
"Apache-2.0"
] | null | null | null | """create fake data to the db file"""
from config import data_nodes, get_db
from type import DataNodeStatus, DataNode
from datetime import timedelta
from config import get_second_datetime
if __name__ == '__main__':
create_fake_data()
| 24.62069 | 49 | 0.648459 |
b5d9899c07fca487f770f0c61e19c1fd8ac3a831 | 78 | py | Python | config.py | NormanLo4319/Project-1 | a7b6bf6adc681a94cc23be5934ddbed1cf7ab6a6 | [
"MIT"
] | 1 | 2020-07-19T07:10:01.000Z | 2020-07-19T07:10:01.000Z | config.py | NormanLo4319/Food-Enviroment-Project | a7b6bf6adc681a94cc23be5934ddbed1cf7ab6a6 | [
"MIT"
] | null | null | null | config.py | NormanLo4319/Food-Enviroment-Project | a7b6bf6adc681a94cc23be5934ddbed1cf7ab6a6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
gkey="Enter Your Key Here"
| 8.666667 | 26 | 0.602564 |
b5da3aa622fc6ae31ee50b900da3081886393cc7 | 215 | py | Python | category_tree/helper.py | bharathramh92/shop | 0c5800b2d36fbe1bfffaf555c3dc741d020aa5d7 | [
"MIT"
] | 1 | 2016-05-27T22:13:37.000Z | 2016-05-27T22:13:37.000Z | category_tree/helper.py | bharathramh92/shop | 0c5800b2d36fbe1bfffaf555c3dc741d020aa5d7 | [
"MIT"
] | null | null | null | category_tree/helper.py | bharathramh92/shop | 0c5800b2d36fbe1bfffaf555c3dc741d020aa5d7 | [
"MIT"
] | null | null | null | from category_tree.categories import data
| 26.875 | 56 | 0.75814 |
b5db0b0b72cf05ff56cc67988018bcfa4797221d | 371 | py | Python | tests/pull_keys.py | patleeman/geckoboard_push | 52c05db22b3c630d326a9650551720f583f0168f | [
"MIT"
] | null | null | null | tests/pull_keys.py | patleeman/geckoboard_push | 52c05db22b3c630d326a9650551720f583f0168f | [
"MIT"
] | null | null | null | tests/pull_keys.py | patleeman/geckoboard_push | 52c05db22b3c630d326a9650551720f583f0168f | [
"MIT"
] | null | null | null | '''
Module to pull keys from test geckoboard widgets.
'''
import os
import json
if __name__ == '__main__':
print(get_keys()) | 23.1875 | 71 | 0.692722 |
b5db74f8420d00fdc906f19f599f41aad18c69af | 2,596 | py | Python | pajbot/web/common/menu.py | JoachimFlottorp/pajbot | 4fb88c403dedb20d95be80e38da72be1ed064901 | [
"MIT"
] | 128 | 2015-12-28T01:02:30.000Z | 2019-05-24T21:20:50.000Z | pajbot/web/common/menu.py | JoachimFlottorp/pajbot | 4fb88c403dedb20d95be80e38da72be1ed064901 | [
"MIT"
] | 277 | 2015-05-03T18:48:57.000Z | 2019-05-23T17:41:28.000Z | pajbot/web/common/menu.py | JoachimFlottorp/pajbot | 4fb88c403dedb20d95be80e38da72be1ed064901 | [
"MIT"
] | 96 | 2015-08-07T18:49:50.000Z | 2019-05-20T19:49:27.000Z | from __future__ import annotations
from typing import Any, Dict, List, Union
import logging
from pajbot.web.utils import get_cached_enabled_modules
log = logging.getLogger(__name__)
| 35.081081 | 108 | 0.571649 |
b5db8ac1529ed13c3cad056d88e711f36bbfbbe1 | 611 | py | Python | Python/463.py | FlyAndNotDown/LeetCode | 889819ff7f64819e966fc6f9dd80110cf2bf6d3c | [
"MIT"
] | 4 | 2018-06-18T05:39:25.000Z | 2022-01-04T07:35:52.000Z | Python/463.py | FlyAndNotDown/LeetCode | 889819ff7f64819e966fc6f9dd80110cf2bf6d3c | [
"MIT"
] | 20 | 2019-11-30T03:42:40.000Z | 2020-05-17T03:25:43.000Z | Python/463.py | FlyAndNotDown/LeetCode | 889819ff7f64819e966fc6f9dd80110cf2bf6d3c | [
"MIT"
] | 2 | 2020-02-08T14:10:42.000Z | 2021-09-23T13:51:36.000Z | """
@no 463
@name Island Perimeter
"""
| 30.55 | 80 | 0.392799 |
b5dde242388a3c0b90abd4420143d4c4d72acbeb | 914 | py | Python | docker_retag/utils/auth_helper.py | aiopsclub/docker_retag | 0019917b0cdd7860c7ff79afdb78101878f5c1b1 | [
"MIT"
] | null | null | null | docker_retag/utils/auth_helper.py | aiopsclub/docker_retag | 0019917b0cdd7860c7ff79afdb78101878f5c1b1 | [
"MIT"
] | null | null | null | docker_retag/utils/auth_helper.py | aiopsclub/docker_retag | 0019917b0cdd7860c7ff79afdb78101878f5c1b1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import requests
| 26.114286 | 80 | 0.682713 |
b5de2f232c7693a7a9e178d8efeaacaaaf172cb4 | 1,081 | py | Python | app/__init__.py | SomeoneLixin/api-dock | 3958a3a3286ae7f8802df9aba5ece2908ca4361e | [
"MIT"
] | 4 | 2018-05-07T15:39:17.000Z | 2019-07-03T21:28:10.000Z | app/__init__.py | SomeoneLixin/api-dock | 3958a3a3286ae7f8802df9aba5ece2908ca4361e | [
"MIT"
] | 4 | 2020-09-05T10:57:19.000Z | 2021-05-09T16:01:22.000Z | app/__init__.py | SomeoneLixin/api-dock | 3958a3a3286ae7f8802df9aba5ece2908ca4361e | [
"MIT"
] | 1 | 2018-05-09T07:57:03.000Z | 2018-05-09T07:57:03.000Z | from flask import Flask, g
from flask_cors import CORS
from flask_jwt_extended import JWTManager
from config import config
from app.models import db, ma
from app.models.RevokedToken import RevokedToken
| 29.216216 | 80 | 0.719704 |
b5df02ad3bc4934c674cd77a38e8acef0d4d0b9f | 730 | py | Python | Snippets/auto_scroll.py | ColinShark/Pyrogram-Snippets | 50ede9ca9206bd6d66c6877217b4a80b4f845294 | [
"WTFPL"
] | 59 | 2021-01-07T16:19:48.000Z | 2022-02-22T06:56:36.000Z | Snippets/auto_scroll.py | Mrvishal2k2/Pyrogram-Snippets | d4e66876f6aff1252dfb88423fedd66e18057446 | [
"WTFPL"
] | 4 | 2019-10-14T14:02:38.000Z | 2020-11-06T11:47:03.000Z | Snippets/auto_scroll.py | ColinShark/Pyrogram-Snippets | 50ede9ca9206bd6d66c6877217b4a80b4f845294 | [
"WTFPL"
] | 26 | 2021-03-02T14:31:51.000Z | 2022-03-23T21:19:14.000Z | # Send .autoscroll in any chat to automatically read all sent messages until you call
# .autoscroll again. This is useful if you have Telegram open on another screen.
from pyrogram import Client, filters
from pyrogram.types import Message
app = Client("my_account")
f = filters.chat([])
app.run()
| 25.172414 | 85 | 0.710959 |
b5e13346685449cfbebc7876faf4f41723fbe5c9 | 2,977 | py | Python | _demos/paint.py | imdaveho/intermezzo | 3fe4824a747face996e301ca5190caec0cb0a6fd | [
"MIT"
] | 8 | 2018-02-26T16:24:07.000Z | 2021-06-30T07:40:52.000Z | _demos/paint.py | imdaveho/intermezzo | 3fe4824a747face996e301ca5190caec0cb0a6fd | [
"MIT"
] | null | null | null | _demos/paint.py | imdaveho/intermezzo | 3fe4824a747face996e301ca5190caec0cb0a6fd | [
"MIT"
] | null | null | null | from intermezzo import Intermezzo as mzo
curCol = [0]
curRune = [0]
backbuf = []
bbw, bbh = 0, 0
runes = [' ', '', '', '', '']
colors = [
mzo.color("Black"),
mzo.color("Red"),
mzo.color("Green"),
mzo.color("Yellow"),
mzo.color("Blue"),
mzo.color("Magenta"),
mzo.color("Cyan"),
mzo.color("White"),
]
if __name__ == "__main__":
try:
main()
finally:
mzo.close()
| 29.186275 | 88 | 0.518979 |
b5e16df4333ead8fee7050f33874cfa2a8d52eb0 | 1,896 | py | Python | amt/media_reader_cli.py | lsxta/amt | 7dcff9b1ce570abe103d0d8c50fd334f2c93af7d | [
"MIT"
] | 5 | 2021-12-22T08:49:23.000Z | 2022-02-22T12:38:40.000Z | amt/media_reader_cli.py | lsxta/amt | 7dcff9b1ce570abe103d0d8c50fd334f2c93af7d | [
"MIT"
] | 1 | 2022-01-30T00:51:05.000Z | 2022-02-03T04:59:42.000Z | amt/media_reader_cli.py | lsxta/amt | 7dcff9b1ce570abe103d0d8c50fd334f2c93af7d | [
"MIT"
] | 1 | 2022-01-29T09:38:16.000Z | 2022-01-29T09:38:16.000Z | import logging
from .media_reader import MediaReader
from .util.media_type import MediaType
| 38.693878 | 188 | 0.642405 |
b5e250ffeccc9fb9e0d710d9d521ebecc7097405 | 1,272 | py | Python | src/webapi/libs/deps/__init__.py | VisionTale/StreamHelper | 29a5e5d5c68401f2c1d1b9cf54a7c68fb41d623a | [
"MIT"
] | null | null | null | src/webapi/libs/deps/__init__.py | VisionTale/StreamHelper | 29a5e5d5c68401f2c1d1b9cf54a7c68fb41d623a | [
"MIT"
] | 37 | 2020-12-16T06:30:22.000Z | 2022-03-28T03:04:28.000Z | src/webapi/libs/deps/__init__.py | VisionTale/StreamHelper | 29a5e5d5c68401f2c1d1b9cf54a7c68fb41d623a | [
"MIT"
] | null | null | null | """
Dependency management package.
"""
def debug_print(message: str, verbose: bool):
"""
Print if verbose is set to true.
:param message: message to print
:param verbose: whether to print
:return:
"""
if verbose:
print(message)
def download_and_unzip_archive(url: str, zip_file_fp: str, static_folder: str, remove: bool = True, verbose: bool = True):
"""
Downloads and unzips an archive.
:param url: url to request
:param zip_file_fp: filepath for zip
:param static_folder: folder for flasks static files
:param remove: whether to remove the zip after unpacking, defaults to true.
:param verbose: whether to print information, defaults to true.
:exception OSError: os.remove, requests.get, open, TextIOWrapper.write, ZipFile, ZipFile.extractall
"""
from requests import get
r = get(url)
debug_print("Saving archive..", verbose)
with open(zip_file_fp, 'wb') as f:
f.write(r.content)
debug_print("Extracting..", verbose)
from zipfile import ZipFile
with ZipFile(zip_file_fp, 'r') as zip_file:
zip_file.extractall(static_folder)
if remove:
debug_print("Removing archive..", verbose)
from os import remove
remove(zip_file_fp)
| 30.285714 | 122 | 0.677673 |
b5e3ba2877ce6a63efd56ee6ed3e28f80e3fe47d | 1,096 | py | Python | fixture/soap.py | DiastroniX/python_training_mantis | 86f145285bea716246788d7967e1de7c23661bae | [
"Apache-2.0"
] | null | null | null | fixture/soap.py | DiastroniX/python_training_mantis | 86f145285bea716246788d7967e1de7c23661bae | [
"Apache-2.0"
] | null | null | null | fixture/soap.py | DiastroniX/python_training_mantis | 86f145285bea716246788d7967e1de7c23661bae | [
"Apache-2.0"
] | null | null | null | from suds.client import Client
from suds import WebFault
from model.project import Project
| 34.25 | 117 | 0.588504 |
b5e50a13752cec91e8412a4602fb057eaceaa6b0 | 1,113 | py | Python | demos/runner/validate.py | Tanbobobo/DL-starter | be4678171bd51ae9e4f61079fa6422e3378d7ce4 | [
"Apache-2.0"
] | null | null | null | demos/runner/validate.py | Tanbobobo/DL-starter | be4678171bd51ae9e4f61079fa6422e3378d7ce4 | [
"Apache-2.0"
] | null | null | null | demos/runner/validate.py | Tanbobobo/DL-starter | be4678171bd51ae9e4f61079fa6422e3378d7ce4 | [
"Apache-2.0"
] | null | null | null | import torch
import wandb
def val(
criterion=None,
metric=None,
loader=None,
model=None,
device=None
):
r'''
Args:
criterion: a differentiable function to provide gratitude for backward
metric: a score to save best model
loader: a data iterator
model: model
device: calculation device, cpu or cuda.
Returns:
a metric socre on behalf of the accuracy on unseen dataset of the prediction of the model
'''
model.eval()
model.to(device)
loss_value_mean = 0
with torch.no_grad():
for idx, data in enumerate(loader):
img = data['img'].to(device)
gt = data['gt'].to(device)
pred = model(img)
loss_value = criterion(pred, gt)
loss_value_mean += loss_value
metric.accumulate(pred, gt)
wandb.log({'val_loss': loss_value})
metric_value = metric.value
loss_value_mean = loss_value_mean / len(loader)
return model, metric_value, loss_value_mean
| 27.146341 | 98 | 0.574124 |
b5e65e7ea71fdd5c4688f420edd49d985bd3eb75 | 89 | py | Python | coding/calculate-5-6/code.py | mowshon/python-quiz | 215fb23dbb0fa42b438f988e49172b87b48bade3 | [
"MIT"
] | 2 | 2020-07-17T21:08:26.000Z | 2020-08-16T03:12:07.000Z | coding/calculate-5-6/code.py | mowshon/python-quiz | 215fb23dbb0fa42b438f988e49172b87b48bade3 | [
"MIT"
] | 2 | 2021-06-08T22:04:35.000Z | 2022-01-13T03:03:32.000Z | coding/calculate-5-6/code.py | mowshon/python-quiz | 215fb23dbb0fa42b438f988e49172b87b48bade3 | [
"MIT"
] | null | null | null |
print(calculate(5, 6)) | 17.8 | 28 | 0.629213 |
b5e76e091ee3230443db9902e3df57b4dbeb04c4 | 4,428 | py | Python | plot_fig07e_varying.py | victorcroisfelt/cf-ra-spatial-separability | 60611c85079dd13848c70e3192331ea2a9f55138 | [
"MIT"
] | null | null | null | plot_fig07e_varying.py | victorcroisfelt/cf-ra-spatial-separability | 60611c85079dd13848c70e3192331ea2a9f55138 | [
"MIT"
] | null | null | null | plot_fig07e_varying.py | victorcroisfelt/cf-ra-spatial-separability | 60611c85079dd13848c70e3192331ea2a9f55138 | [
"MIT"
] | 2 | 2022-01-08T12:18:43.000Z | 2022-02-23T07:59:18.000Z | ########################################
# plot_fig07d_anaa_practical.py
#
# Description. Script used to actually plot Fig. 07 (d) of the paper.
#
# Author. @victorcroisfelt
#
# Date. December 29, 2021
#
# This code is part of the code package used to generate the numeric results
# of the paper:
#
# Croisfelt, V., Abro, T., and Marinello, J. C., User-Centric Perspective in
# Random Access Cell-Free Aided by Spatial Separability, arXiv e-prints, 2021.
#
# Available on:
#
# https://arxiv.org/abs/2107.10294
#
# Comment. Please, make sure that you have the required data files. They are
# obtained by running the scripts:
#
# - data_fig07_08_bcf.py
# - data_fig07_08_cellular.py
# - data_fig07_08_cellfree.py
#
########################################
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import warnings
########################################
# Preamble
########################################
# Comment the line below to see possible warnings related to python version
# issues
warnings.filterwarnings("ignore")
axis_font = {'size':'12'}
plt.rcParams.update({'font.size': 12})
matplotlib.rc('xtick', labelsize=12)
matplotlib.rc('ytick', labelsize=12)
matplotlib.rc('text', usetex=True)
matplotlib.rcParams['text.latex.preamble']=[r"\usepackage{amsmath}"]
########################################
# Loading data
########################################
data_bcf = np.load('data/fig07e_bcf.npz')
data_cellfree_est1 = np.load('data/fig07e_cellfree_est1.npz')
data_cellfree_est2 = np.load('data/fig07e_cellfree_est2.npz')
data_cellfree_est3 = np.load('data/fig07e_cellfree_est3.npz')
# Extract x-axis
L_range = data_cellfree_est1["L_range"]
N_range = data_cellfree_est1["N_range"]
# Extract ANAA
anaa_bcf = data_bcf["anaa"]
anaa_cellfree_est1 = data_cellfree_est1["anaa"]
anaa_cellfree_est2 = data_cellfree_est2["anaa"]
anaa_cellfree_est3 = data_cellfree_est3["anaa"]
########################################
# Plot
########################################
# Fig. 07e
fig, ax = plt.subplots(figsize=(4/3 * 3.15, 2))
#fig, ax = plt.subplots(figsize=(1/3 * (6.30), 3))
# Go through all values of N
for nn, N in enumerate(N_range):
plt.gca().set_prop_cycle(None)
if N == 1:
# BCF
ax.plot(L_range[:-2], anaa_bcf[:-2], linewidth=1.5, linestyle=(0, (3, 1, 1, 1, 1, 1)), color='black', label='BCF')
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=1.5, linestyle='--', color='black', label='CF-SUCRe: Est. 1')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=1.5, linestyle='-.', color='black', label='CF-SUCRe: Est. 2')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=1.5, linestyle=':', color='black', label='CF-SUCRe: Est. 3')
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=1.5, linestyle='--')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=1.5, linestyle='-.')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=1.5, linestyle=':')
elif N == 8:
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=1.5, linestyle='--')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=1.5, linestyle='-.')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=1.5, linestyle=':')
plt.gca().set_prop_cycle(None)
if N == 1:
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=0.0, marker='^', color='black', label='$N=1$')
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=0.0, marker='^')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=0.0, marker='^')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=0.0, marker='^')
elif N == 8:
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=0.0, marker='v', color='black', label='$N=8$')
ax.plot(L_range[:-2], anaa_cellfree_est1[:-2, nn], linewidth=0.0, marker='v')
ax.plot(L_range[:-2], anaa_cellfree_est2[:-2, nn], linewidth=0.0, marker='v')
ax.plot(L_range[:-2], anaa_cellfree_est3[:-2, nn], linewidth=0.0, marker='v')
ax.set_xscale('function', functions=(forward, inverse))
ax.set_xticks(L_range[:-2])
ax.set_yticks(np.array([1, 3, 5, 7, 9, 10]))
ax.grid(visible=True, alpha=0.25, linestyle='--')
ax.set_xlabel(r'number of APs $L$')
ax.set_ylabel('ANAA')
ax.legend(fontsize='xx-small', markerscale=.5)
plt.show()
| 30.537931 | 124 | 0.630759 |
b5e97f4578877e1fcf5bd928b8d18930e062681c | 6,697 | py | Python | Meters/IEC/Datasets/get_time.py | Runamook/PyCharmProjects | 1b1a063345e052451f00e3fdea82e31bdd2a0cae | [
"MIT"
] | null | null | null | Meters/IEC/Datasets/get_time.py | Runamook/PyCharmProjects | 1b1a063345e052451f00e3fdea82e31bdd2a0cae | [
"MIT"
] | null | null | null | Meters/IEC/Datasets/get_time.py | Runamook/PyCharmProjects | 1b1a063345e052451f00e3fdea82e31bdd2a0cae | [
"MIT"
] | null | null | null | import datetime
from time import sleep
import re
import pytz
# try:
# from .emhmeter import MeterBase, create_input_vars, logger
# except ModuleNotFoundError:
# from emhmeter import MeterBase, create_input_vars, logger
# TODO: Not working
if __name__ == "__main__":
meter = {
"meterNumber": "04180616",
"Manufacturer": "",
"ip": "10.124.2.48",
"InstallationDate": "2018-10-10T10:00:00",
"IsActive": True,
"voltageRatio": 200,
"currentRatio": 10,
"totalFactor": 210
}
meter = {
"meterNumber": "05296170",
"Manufacturer": "EMH",
"ip": "10.124.2.120",
"InstallationDate": "2019-02-20T09:00:00",
"IsActive": True,
"voltageRatio": 200,
"currentRatio": 15,
"totalFactor": 215
}
variables = {"port": MeterBase.get_port(meter["ip"]),
"timestamp": MeterBase.get_dt(),
"data_handler": "P.01",
"exporter": "Zabbix",
"server": "192.168.33.33",
"meter": meter
}
logger.setLevel("DEBUG")
m = GetTime(variables)
data = m.get()
print(m.parse(data))
| 31.441315 | 102 | 0.551441 |
b5ea159a84e98d9a3984e6fe5b31678efa676891 | 143 | py | Python | References/Geovana Neves/TCC_Geovana_Neves_GitHub/SUAVE_modifications/SUAVE-feature-constant_throttle_EAS/trunk/SUAVE/Analyses/Results.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | References/Geovana Neves/TCC_Geovana_Neves_GitHub/SUAVE_modifications/SUAVE-feature-constant_throttle_EAS/trunk/SUAVE/Analyses/Results.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | References/Geovana Neves/TCC_Geovana_Neves_GitHub/SUAVE_modifications/SUAVE-feature-constant_throttle_EAS/trunk/SUAVE/Analyses/Results.py | Vinicius-Tanigawa/Undergraduate-Research-Project | e92372f07882484b127d7affe305eeec2238b8a9 | [
"MIT"
] | null | null | null | # Results.py
#
# Created: Jan 2015, T. Lukacyzk
# Modified: Feb 2016, T. MacDonald
from SUAVE.Core import Data | 15.888889 | 34 | 0.699301 |
b5ea1cb63e2208d12c4791c91ece989cd820bf44 | 3,889 | py | Python | instagrapi/direct.py | chaulaode1257/instagrapi | cfb8cb53d3a63092c0146f3a0b7a086c760908c9 | [
"MIT"
] | 11 | 2021-01-09T22:52:30.000Z | 2022-03-22T18:33:38.000Z | instagrapi/direct.py | chaulaode1257/instagrapi | cfb8cb53d3a63092c0146f3a0b7a086c760908c9 | [
"MIT"
] | null | null | null | instagrapi/direct.py | chaulaode1257/instagrapi | cfb8cb53d3a63092c0146f3a0b7a086c760908c9 | [
"MIT"
] | 4 | 2020-12-26T06:14:53.000Z | 2022-01-05T05:00:16.000Z | import re
from typing import List
from .utils import dumps
from .types import DirectThread, DirectMessage
from .exceptions import ClientNotFoundError, DirectThreadNotFound
from .extractors import extract_direct_thread, extract_direct_message
| 35.678899 | 108 | 0.558498 |
b5eee5ae8e8ac24bba961d0d4420546bd6f06e1d | 26,090 | py | Python | src/main/python/cybercaptain/visualization/bar.py | FHNW-CyberCaptain/CyberCaptain | 07c989190e997353fbf57eb7a386947d6ab8ffd5 | [
"MIT"
] | 1 | 2018-10-01T10:59:55.000Z | 2018-10-01T10:59:55.000Z | src/main/python/cybercaptain/visualization/bar.py | FHNW-CyberCaptain/CyberCaptain | 07c989190e997353fbf57eb7a386947d6ab8ffd5 | [
"MIT"
] | null | null | null | src/main/python/cybercaptain/visualization/bar.py | FHNW-CyberCaptain/CyberCaptain | 07c989190e997353fbf57eb7a386947d6ab8ffd5 | [
"MIT"
] | 1 | 2021-11-01T00:09:00.000Z | 2021-11-01T00:09:00.000Z | """
This module contains the visualization bar class.
"""
import glob
import os
import re
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import matplotlib.cm as cmx
from mpl_toolkits.mplot3d import Axes3D
from matplotlib.ticker import FuncFormatter
from cybercaptain.utils.exceptions import ValidationError
from cybercaptain.visualization.base import visualization_base
from cybercaptain.utils.jsonFileHandler import json_file_reader
from cybercaptain.utils.helpers import str2bool | 41.086614 | 175 | 0.617555 |
b5f0389774cedeaa041026bfccf255de23607efa | 3,560 | py | Python | app/profiles/schemas/update.py | MrPeker/acikkaynak-service | 21c3f2faaa84342d2fa95709293bc84d1e2a23ae | [
"Apache-2.0"
] | 5 | 2021-02-28T22:29:13.000Z | 2021-11-29T00:24:28.000Z | app/profiles/schemas/update.py | MrPeker/acikkaynak-service | 21c3f2faaa84342d2fa95709293bc84d1e2a23ae | [
"Apache-2.0"
] | null | null | null | app/profiles/schemas/update.py | MrPeker/acikkaynak-service | 21c3f2faaa84342d2fa95709293bc84d1e2a23ae | [
"Apache-2.0"
] | 3 | 2021-03-03T19:56:30.000Z | 2021-03-06T22:10:35.000Z | import graphene
from app.common.library import graphql
from app.common.models import City
from ..models import Profile
from .queries import ProfileNode
# queries
# mutations
| 31.504425 | 85 | 0.601404 |
b5f1bcd8c2a8c9268b813650480c225371c73233 | 7,401 | py | Python | kubevirt/models/v1_generation_status.py | ansijain/client-python | 444ab92a68371c1ccd89314753fa7ab5c4ac9bbe | [
"Apache-2.0"
] | 21 | 2018-02-21T23:59:28.000Z | 2021-12-08T05:47:37.000Z | kubevirt/models/v1_generation_status.py | ansijain/client-python | 444ab92a68371c1ccd89314753fa7ab5c4ac9bbe | [
"Apache-2.0"
] | 47 | 2018-02-01T15:35:01.000Z | 2022-02-11T07:45:54.000Z | kubevirt/models/v1_generation_status.py | ansijain/client-python | 444ab92a68371c1ccd89314753fa7ab5c4ac9bbe | [
"Apache-2.0"
] | 19 | 2018-04-03T09:20:52.000Z | 2021-06-01T06:07:28.000Z | # coding: utf-8
"""
KubeVirt API
This is KubeVirt API an add-on for Kubernetes.
OpenAPI spec version: 1.0.0
Contact: kubevirt-dev@googlegroups.com
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1GenerationStatus):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 27.411111 | 125 | 0.580327 |
b5f230d3037e9e1528cdc347b55ec3805c78a481 | 3,352 | py | Python | scripts/plot_fits.py | trichter/robust_earthquake_spectra | ef816e30944293e27c0d5da4d31ec2184e6d187b | [
"MIT"
] | 8 | 2021-07-23T13:01:29.000Z | 2022-03-27T17:57:36.000Z | scripts/plot_fits.py | trichter/robust_earthquake_spectra | ef816e30944293e27c0d5da4d31ec2184e6d187b | [
"MIT"
] | null | null | null | scripts/plot_fits.py | trichter/robust_earthquake_spectra | ef816e30944293e27c0d5da4d31ec2184e6d187b | [
"MIT"
] | null | null | null | # Copyright 2021 Tom Eulenfeld, MIT license
import matplotlib as mpl
import matplotlib.gridspec as gridspec
import matplotlib.pyplot as plt
import numpy as np
import pickle
from qopen.core import get_pair, Gsmooth
from qopen.rt import G as G_func
if __name__ == '__main__':
fname = '../qopen/01_go/fits_20186784_04.00Hz-08.00Hz.pkl'
with open(fname, 'rb') as f:
tup = pickle.load(f)
plot_fits(*tup)
plt.savefig('../figs/qopen_fits_20186784_4-8Hz.pdf', bbox_inches='tight')
| 34.204082 | 77 | 0.568019 |
b5f35bed476c5278cc37b5eb93da2b3545e9bfe8 | 957 | py | Python | magmango/tests/test_potcar.py | nimalec/Magno | 016bed1c2fb8275ac76ece3d0b7f39c4ebc45551 | [
"MIT"
] | 1 | 2021-01-08T18:22:13.000Z | 2021-01-08T18:22:13.000Z | magmango/tests/test_potcar.py | nimalec/Magno | 016bed1c2fb8275ac76ece3d0b7f39c4ebc45551 | [
"MIT"
] | null | null | null | magmango/tests/test_potcar.py | nimalec/Magno | 016bed1c2fb8275ac76ece3d0b7f39c4ebc45551 | [
"MIT"
] | null | null | null | import unittest
import os
import numpy as np
from pymatgen import Structure
from magmango.calculation.potcar import PotcarSettings
#
# class PotcarSettingsTest(unittest.TestCase):
# def setUp(self):
# self.potcar_file_path = "data/potcar_pto"
# #self.structure = Structure.from_file(self.poscar_file_path)
#
# def test_from_input(self):
# #poscar_sett = PoscarSettings(self.structure, self.poscar_file_path)
# #self.assertEqual(poscar_sett._structure, self.structure)
#
# # def test_from_file(self):
# # poscar_infile_sett = PoscarSettings()
# # poscar_infile_sett.poscar_from_file(self.poscar_file_path)
# # struct = poscar_infile_sett._structure
# # self.assertEqual(struct, self.structure)
#
# def test_update_settings(self):
# poscar_infile_sett = PoscarSettings()
# poscar_infile_sett.poscar_from_file(self.poscar_file_path)
# poscar_sett = poscar_infile_sett._structure
| 35.444444 | 76 | 0.736677 |
b5f407423805cba0b85dc8b97c1c27b8ba3da9b6 | 225 | py | Python | answers/Aryan Goyal/Day 10/Que 1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 22 | 2021-03-16T14:07:47.000Z | 2021-08-13T08:52:50.000Z | answers/Aryan Goyal/Day 10/Que 1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 174 | 2021-03-16T21:16:40.000Z | 2021-06-12T05:19:51.000Z | answers/Aryan Goyal/Day 10/Que 1.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 135 | 2021-03-16T16:47:12.000Z | 2021-06-27T14:22:38.000Z | # main
string1 = input()
if(pangram(string1) == True):
print("Yes")
else:
print("No")
| 17.307692 | 35 | 0.6 |
b5f4eae105a3ccda0bbf32f61e4d9bc409056d85 | 773 | py | Python | website/addons/dropbox/tests/test_serializer.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | website/addons/dropbox/tests/test_serializer.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | 13 | 2020-03-24T15:29:41.000Z | 2022-03-11T23:15:28.000Z | website/addons/dropbox/tests/test_serializer.py | DanielSBrown/osf.io | 98dda2ac237377197acacce78274bc0a4ce8f303 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Serializer tests for the Dropbox addon."""
from nose.tools import * # noqa (PEP8 asserts)
from website.addons.base.testing.serializers import StorageAddonSerializerTestSuiteMixin
from website.addons.dropbox.tests.utils import MockDropbox
from website.addons.dropbox.tests.factories import DropboxAccountFactory
from website.addons.dropbox.serializer import DropboxSerializer
from tests.base import OsfTestCase
mock_client = MockDropbox()
| 32.208333 | 88 | 0.798189 |
b5f7ed8a0664870db210f6051f62a7c08134ae57 | 9,357 | py | Python | tumblrlikes.py | cesarmiquel/Tumblr-Likes | 3a96e979dbb420553535dd73320f3e7206bcbbfc | [
"MIT"
] | 1 | 2017-03-09T23:47:19.000Z | 2017-03-09T23:47:19.000Z | tumblrlikes.py | cesarmiquel/Tumblr-Likes | 3a96e979dbb420553535dd73320f3e7206bcbbfc | [
"MIT"
] | null | null | null | tumblrlikes.py | cesarmiquel/Tumblr-Likes | 3a96e979dbb420553535dd73320f3e7206bcbbfc | [
"MIT"
] | null | null | null | import os
import urllib
import json
import pprint
from google.appengine.api import users
from google.appengine.ext import ndb
from google.appengine.api import urlfetch
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader = jinja2.FileSystemLoader(os.path.dirname(__file__) + '/templates'),
extensions = ['jinja2.ext.autoescape'])
# Blogs
# Blog Post Image
# Blog post entity
# Get blog likes and add them to queue
# Update blog stats and information
# Retrieve the list of available blogs
# Retrieve posts for a blog
# Retrieve posts for a blog
application = webapp2.WSGIApplication([
('/', MainPage),
('/blogs', GetBlogList),
('/posts', GetBlogPosts),
('/postshtml', GetBlogPostsHtml),
('/process', ProcessBlogLikes),
('/updatestats', UpdateBlogInfo),
], debug=True)
| 36.267442 | 167 | 0.578284 |
b5f8afd3209dc9c313d59f605ef9e611cf525951 | 9,348 | py | Python | tests/test_reliable_redis_backend.py | thread/django-lightweight-queue | 2c67eb13a454fa1a02f8445c26915b6e9261fdad | [
"BSD-3-Clause"
] | 23 | 2015-04-29T04:47:02.000Z | 2022-03-11T12:43:01.000Z | tests/test_reliable_redis_backend.py | thread/django-lightweight-queue | 2c67eb13a454fa1a02f8445c26915b6e9261fdad | [
"BSD-3-Clause"
] | 23 | 2015-02-27T14:30:47.000Z | 2021-12-02T14:18:34.000Z | tests/test_reliable_redis_backend.py | thread/django-lightweight-queue | 2c67eb13a454fa1a02f8445c26915b6e9261fdad | [
"BSD-3-Clause"
] | 1 | 2015-08-18T12:27:08.000Z | 2015-08-18T12:27:08.000Z | import datetime
import unittest
import contextlib
import unittest.mock
from typing import Any, Dict, Tuple, Mapping, Iterator, Optional
import fakeredis
from django_lightweight_queue.job import Job
from django_lightweight_queue.types import QueueName
from django_lightweight_queue.backends.reliable_redis import (
ReliableRedisBackend,
)
from . import settings
from .mixins import RedisCleanupMixin
| 28.5 | 81 | 0.578947 |
b5f91ae2a0e4966e6263d4fa5ec3616c068ac79a | 653 | py | Python | src/waldur_slurm/migrations/0019_fill_allocation_user_usage.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 26 | 2017-10-18T13:49:58.000Z | 2021-09-19T04:44:09.000Z | src/waldur_slurm/migrations/0019_fill_allocation_user_usage.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 14 | 2018-12-10T14:14:51.000Z | 2021-06-07T10:33:39.000Z | src/waldur_slurm/migrations/0019_fill_allocation_user_usage.py | geant-multicloud/MCMS-mastermind | 81333180f5e56a0bc88d7dad448505448e01f24e | [
"MIT"
] | 32 | 2017-09-24T03:10:45.000Z | 2021-10-16T16:41:09.000Z | from django.db import migrations
| 28.391304 | 79 | 0.715161 |
b5fd2934ba1f4d9447596711eac5fb882a9d016a | 2,430 | py | Python | SBGCobraTools.py | dsanleo/SBGCobraTools | 2cc3a012e1d398ec9185de6ed0d6fa94526afc85 | [
"MIT"
] | null | null | null | SBGCobraTools.py | dsanleo/SBGCobraTools | 2cc3a012e1d398ec9185de6ed0d6fa94526afc85 | [
"MIT"
] | null | null | null | SBGCobraTools.py | dsanleo/SBGCobraTools | 2cc3a012e1d398ec9185de6ed0d6fa94526afc85 | [
"MIT"
] | null | null | null | # Get all carbon sources and return the objective flux. It can be normalized by the carbon input
| 69.428571 | 168 | 0.637037 |
b5fe08cd114c3ed382e1d1703c6401c43f46dc9b | 17,970 | py | Python | Testing/test_StableMotifs.py | jcrozum/StableMotifs | 8a9d640d3e8b074e0f05e9b45b8ef8bef8d8b5c7 | [
"MIT"
] | 9 | 2020-04-03T14:18:06.000Z | 2021-05-18T12:08:20.000Z | Testing/test_StableMotifs.py | jcrozum/StableMotifs | 8a9d640d3e8b074e0f05e9b45b8ef8bef8d8b5c7 | [
"MIT"
] | 30 | 2020-04-06T16:08:45.000Z | 2021-06-14T15:15:41.000Z | Testing/test_StableMotifs.py | jcrozum/StableMotifs | 8a9d640d3e8b074e0f05e9b45b8ef8bef8d8b5c7 | [
"MIT"
] | 2 | 2021-01-14T15:21:51.000Z | 2021-05-18T12:04:17.000Z | import sys
sys.path.append('../')
import unittest
import sys
sys.path.insert(0,"C:/Users/jcroz/github/StableMotifs")
import pystablemotifs as sm
import pyboolnet.file_exchange
if __name__ == '__main__':
unittest.main()
| 63.723404 | 134 | 0.420701 |
b5ffeb36473c0df68ff9596c309080a9ed5b0766 | 4,584 | py | Python | environments/env_locust.py | jwallnoefer/projectivesimulation | b8f7b3d7d492b5d5f6df7f9f0802bead33c946ca | [
"Apache-2.0"
] | 14 | 2018-02-13T17:39:58.000Z | 2021-07-06T18:09:28.000Z | environments/env_locust.py | jwallnoefer/projectivesimulation | b8f7b3d7d492b5d5f6df7f9f0802bead33c946ca | [
"Apache-2.0"
] | null | null | null | environments/env_locust.py | jwallnoefer/projectivesimulation | b8f7b3d7d492b5d5f6df7f9f0802bead33c946ca | [
"Apache-2.0"
] | 8 | 2018-03-22T04:12:31.000Z | 2021-01-31T19:14:28.000Z | # -*- coding: utf-8 -*-
"""
Copyright 2018 Alexey Melnikov and Katja Ried.
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License.
Please acknowledge the authors when re-using this code and maintain this notice intact.
Code written by Katja Ried, implementing ideas from
'Modelling collective motion based on the principle of agency'
Katja Ried, Thomas Muller & Hans J. Briegel
arXiv:1712.01334 (2017)
"""
import numpy as np
def dist_mod(num1,num2,mod):
"""Distance between num1 and num2 (absolute value)
if they are given modulo an integer mod, ie between zero and mod.
Also works if num1 is an array (not a list) and num2 a number or vice versa."""
diff=np.remainder(num1-num2,mod)
diff=np.minimum(diff, mod-diff)
return(diff)
| 49.290323 | 128 | 0.695681 |
bd0162bf0a28c31d37370edf04366759674e96cb | 1,174 | py | Python | masktools/superskims/slit.py | adwasser/masktools | c96c8f375f0e94ee2791466d0ce6d31007f58022 | [
"MIT"
] | null | null | null | masktools/superskims/slit.py | adwasser/masktools | c96c8f375f0e94ee2791466d0ce6d31007f58022 | [
"MIT"
] | null | null | null | masktools/superskims/slit.py | adwasser/masktools | c96c8f375f0e94ee2791466d0ce6d31007f58022 | [
"MIT"
] | null | null | null | from __future__ import (absolute_import, division,
print_function, unicode_literals)
| 39.133333 | 98 | 0.581772 |
bd0183d07de9ad7a1f13f37bb28f41e2ff5b5a7b | 1,940 | py | Python | gemmforge/instructions/builders/alloctor_builder.py | ravil-mobile/gemmforge | 6381584c2d1ce77eaa938de02bc4f130f19cb2e4 | [
"MIT"
] | null | null | null | gemmforge/instructions/builders/alloctor_builder.py | ravil-mobile/gemmforge | 6381584c2d1ce77eaa938de02bc4f130f19cb2e4 | [
"MIT"
] | 2 | 2021-02-01T16:31:22.000Z | 2021-05-05T13:44:43.000Z | gemmforge/instructions/builders/alloctor_builder.py | ravil-mobile/gemmforge | 6381584c2d1ce77eaa938de02bc4f130f19cb2e4 | [
"MIT"
] | null | null | null | from .abstract_builder import AbstractBuilder
from gemmforge.symbol_table import SymbolType, Symbol
from gemmforge.basic_types import RegMemObject, ShrMemObject
from gemmforge.instructions import RegisterAlloc, ShrMemAlloc
from gemmforge.basic_types import GeneralLexicon
from abc import abstractmethod
| 28.955224 | 72 | 0.723196 |
bd04f09ba2aeaba23212f09a5a18c36cfe707aa2 | 1,104 | py | Python | solutions/LeetCode/Python3/1049.py | timxor/leetcode-journal | 5f1cb6bcc44a5bc33d88fb5cdb4126dfc6f4232a | [
"MIT"
] | 854 | 2018-11-09T08:06:16.000Z | 2022-03-31T06:05:53.000Z | solutions/LeetCode/Python3/1049.py | timxor/leetcode-journal | 5f1cb6bcc44a5bc33d88fb5cdb4126dfc6f4232a | [
"MIT"
] | 29 | 2019-06-02T05:02:25.000Z | 2021-11-15T04:09:37.000Z | solutions/LeetCode/Python3/1049.py | timxor/leetcode-journal | 5f1cb6bcc44a5bc33d88fb5cdb4126dfc6f4232a | [
"MIT"
] | 347 | 2018-12-23T01:57:37.000Z | 2022-03-12T14:51:21.000Z | __________________________________________________________________________________________________
sample 32 ms submission
__________________________________________________________________________________________________
sample 36 ms submission
__________________________________________________________________________________________________
sample 40 ms submission | 39.428571 | 104 | 0.673007 |
bd0555b1790f397fc8d762146f856a6acab0847d | 3,043 | py | Python | Python3/809.expressive-words.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/809.expressive-words.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | Python3/809.expressive-words.py | 610yilingliu/leetcode | 30d071b3685c2131bd3462ba77c6c05114f3f227 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=809 lang=python3
#
# [809] Expressive Words
#
# https://leetcode.com/problems/expressive-words/description/
#
# algorithms
# Medium (46.84%)
# Likes: 320
# Dislikes: 823
# Total Accepted: 45.2K
# Total Submissions: 96.2K
# Testcase Example: '"heeellooo"\n["hello", "hi", "helo"]'
#
# Sometimes people repeat letters to represent extra feeling, such as "hello"
# -> "heeellooo", "hi" -> "hiiii". In these strings like "heeellooo", we have
# groups of adjacent letters that are all the same: "h", "eee", "ll", "ooo".
#
# For some given string S, a query word is stretchy if it can be made to be
# equal to S by anynumber ofapplications of the following extension
# operation: choose a group consisting ofcharacters c, and add some number of
# characters c to the group so that the size of the group is 3 or more.
#
# For example, starting with "hello", we could do an extension on the group "o"
# to get "hellooo", but we cannot get "helloo" since the group "oo" has size
# less than 3. Also, we could do another extension like "ll" -> "lllll" to get
# "helllllooo". If S = "helllllooo", then the query word "hello" would be
# stretchy because of these two extension operations:query = "hello" ->
# "hellooo" ->"helllllooo" = S.
#
# Given a list of query words, return the number of words that are
# stretchy.
#
#
#
#
# Example:
# Input:
# S = "heeellooo"
# words = ["hello", "hi", "helo"]
# Output: 1
# Explanation:
# We can extend "e" and "o" in the word "hello" to get "heeellooo".
# We can't extend "helo" to get "heeellooo" because the group "ll" is not size
# 3 or more.
#
#
#
# Constraints:
#
#
# 0 <= len(S) <= 100.
# 0 <= len(words) <= 100.
# 0 <= len(words[i]) <= 100.
# S and all words in wordsconsist only oflowercase letters
#
#
#
# @lc code=start
# @lc code=end
| 29.833333 | 141 | 0.57049 |
bd068843b439a58814f27d16075e43744d08bd52 | 1,601 | py | Python | settings/Microscope_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | null | null | null | settings/Microscope_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 1 | 2019-10-22T21:28:31.000Z | 2019-10-22T21:39:12.000Z | settings/Microscope_settings.py | bopopescu/Lauecollect | 60ae2b05ea8596ba0decf426e37aeaca0bc8b6be | [
"MIT"
] | 2 | 2019-06-06T15:06:46.000Z | 2020-07-20T02:03:22.000Z | Size = (1255, 1160)
Position = (39, 26)
ScaleFactor = 1.0
ZoomLevel = 32.0
Orientation = 0
Mirror = False
NominalPixelSize = 0.125
filename = 'Z:\\All Projects\\Crystallization\\2018.08.27.caplilary with crystals inspection\\2018.08.27 CypA 2.jpg'
ImageWindow.Center = (649, 559)
ImageWindow.ViewportCenter = (2.41796875, 2.0)
ImageWindow.crosshair_color = (255, 0, 255)
ImageWindow.boxsize = (0.04, 0.04)
ImageWindow.box_color = (255, 0, 0)
ImageWindow.show_box = False
ImageWindow.Scale = [[0.21944444444444444, -0.0763888888888889], [0.46944444444444444, -0.075]]
ImageWindow.show_scale = True
ImageWindow.scale_color = (255, 0, 0)
ImageWindow.crosshair_size = (0.05, 0.05)
ImageWindow.show_crosshair = False
ImageWindow.show_profile = False
ImageWindow.show_FWHM = False
ImageWindow.show_center = False
ImageWindow.calculate_section = False
ImageWindow.profile_color = (255, 0, 255)
ImageWindow.FWHM_color = (0, 0, 255)
ImageWindow.center_color = (0, 0, 255)
ImageWindow.ROI = [[-0.5194444444444445, -0.3458333333333333], [0.225, 0.19305555555555556]]
ImageWindow.ROI_color = (255, 255, 0)
ImageWindow.show_saturated_pixels = False
ImageWindow.mask_bad_pixels = False
ImageWindow.saturation_threshold = 233
ImageWindow.saturated_color = (255, 0, 0)
ImageWindow.linearity_correction = False
ImageWindow.bad_pixel_threshold = 233
ImageWindow.bad_pixel_color = (30, 30, 30)
ImageWindow.show_grid = False
ImageWindow.grid_type = 'xy'
ImageWindow.grid_color = (0, 0, 255)
ImageWindow.grid_x_spacing = 0.3
ImageWindow.grid_x_offset = 0.0
ImageWindow.grid_y_spacing = 0.5
ImageWindow.grid_y_offset = 0.0
| 37.232558 | 116 | 0.775141 |
bd07434502bfcaa7d1b29853452ba88cedddad3e | 3,259 | py | Python | model_rocke3d.py | projectcuisines/gcm_ana | cd9f7d47dd4a9088bcd7556b4955d9b8e09b9741 | [
"MIT"
] | 1 | 2021-09-29T18:03:56.000Z | 2021-09-29T18:03:56.000Z | model_rocke3d.py | projectcuisines/thai_trilogy_code | cd9f7d47dd4a9088bcd7556b4955d9b8e09b9741 | [
"MIT"
] | null | null | null | model_rocke3d.py | projectcuisines/thai_trilogy_code | cd9f7d47dd4a9088bcd7556b4955d9b8e09b9741 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Utilities for the ROCKE3D output."""
import dask.array as da
import xarray as xr
from grid import reverse_along_dim, roll_da_to_pm180
from model_um import calc_um_rel
from names import rocke3d
__all__ = ("adjust_rocke3d_grid", "calc_rocke3d_rei", "calc_rocke3d_rel")
calc_rocke3d_rel = calc_um_rel
def adjust_rocke3d_grid(darr, lon_name="lon", lat_name="lat"):
"""
Adjust the grid of a ROCKE3D data array.
Reverse the latitude dimension and shift the substellar coordinate
from -180 degrees to 0 degree in longitude.
"""
out = darr
if lat_name in out.dims:
out = reverse_along_dim(out, lat_name)
if lon_name in out.dims:
# Shift data along the longitude to center the substellar at (0,0)
out = roll_da_to_pm180(
out.assign_coords(**{lon_name: out[lon_name] + 180}), lon_name=lon_name
)
return out
def calc_rocke3d_rei(ds):
"""
Aggregate parametrization based on effective dimension.
In the initial form, the same approach is used for stratiform
and convective cloud.
The fit provided here is based on Stephan Havemann's fit of
Dge with temperature, consistent with David Mitchell's treatment
of the variation of the size distribution with temperature. The
parametrization of the optical properties is based on De
(=(3/2)volume/projected area), whereas Stephan's fit gives Dge
(=(2*SQRT(3)/3)*volume/projected area), which explains the
conversion factor. The fit to Dge is in two sections, because
Mitchell's relationship predicts a cusp at 216.208 K. Limits
of 8 and 124 microns are imposed on Dge: these are based on this
relationship and should be reviewed if it is changed. Note also
that the relationship given here is for polycrystals only.
Parameters
----------
ds: xarray.Dataset
ROCKE-3D data set
These are the parameters used in the temperature dependent
parameterizations for ice cloud particle sizes below.
Parameters for the aggregate parametrization
a0_agg_cold = 7.5094588E-04,
b0_agg_cold = 5.0830326E-07,
a0_agg_warm = 1.3505403E-04,
b0_agg_warm = 2.6517429E-05,
t_switch = 216.208,
t0_agg = 279.5,
s0_agg = 0.05,
Returns
-------
rei: xarray.DataArray
Ice effective radius [um].
"""
a0_agg_cold = 7.5094588e-04
b0_agg_cold = 5.0830326e-07
a0_agg_warm = 1.3505403e-04
b0_agg_warm = 2.6517429e-05
t_switch = 216.208
t0_agg = 279.5
s0_agg = 0.05
# Air temperature in ROCKE-3D
air_temp = ds[rocke3d.temp]
# Calculate the R_eff
rei = xr.where(
air_temp < t_switch,
a0_agg_cold * da.exp(s0_agg * (air_temp - t0_agg)) + b0_agg_cold,
a0_agg_warm * da.exp(s0_agg * (air_temp - t0_agg)) + b0_agg_warm,
)
# Limit of the parameterization
rei = (
(3 / 2)
* (3 / (2 * da.sqrt(3)))
* xr.ufuncs.minimum(1.24e-04, xr.ufuncs.maximum(8.0e-06, rei))
)
rei = rei.rename("ice_cloud_condensate_effective_radius")
rei.attrs.update(
{
"long_name": "ice_cloud_condensate_effective_radius",
"units": "micron",
}
)
return rei
| 30.745283 | 83 | 0.666769 |
bd080979389c4fa7ca1e77a7f150acdec97764c3 | 4,090 | py | Python | models/wordcloud.py | mcxwx123/RecGFI | 6e872c3b8c5398959b119e5ba14e665bbb45c56b | [
"MIT"
] | 9 | 2022-01-28T14:24:35.000Z | 2022-01-30T05:05:03.000Z | models/wordcloud.py | mcxwx123/RecGFI | 6e872c3b8c5398959b119e5ba14e665bbb45c56b | [
"MIT"
] | null | null | null | models/wordcloud.py | mcxwx123/RecGFI | 6e872c3b8c5398959b119e5ba14e665bbb45c56b | [
"MIT"
] | 1 | 2022-01-28T14:24:41.000Z | 2022-01-28T14:24:41.000Z | from wordcloud import WordCloud,STOPWORDS
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
import multidict as multidict
from collections import Counter
import json
import datetime
import os
plt.switch_backend('agg')
| 29.854015 | 138 | 0.596822 |
bd08ddc4c6e6b83523aa9e949593219788ab5e5c | 2,996 | py | Python | favorites_updater.py | techonerd/moepoi | 6440f39653bc3560e39429570bd25b7c564b7f54 | [
"MIT"
] | 36 | 2020-07-21T16:19:48.000Z | 2022-03-21T15:31:02.000Z | favorites_updater.py | gaesant/moepoi | cd478ca00afa5140bb8057c7d37b1ccb2fcbe3b6 | [
"MIT"
] | 1 | 2022-02-18T07:41:14.000Z | 2022-02-18T07:41:14.000Z | favorites_updater.py | gaesant/moepoi | cd478ca00afa5140bb8057c7d37b1ccb2fcbe3b6 | [
"MIT"
] | 176 | 2020-07-22T19:24:14.000Z | 2022-03-30T23:42:58.000Z | from python_graphql_client import GraphqlClient
import pathlib
import re
import os
root = pathlib.Path(__file__).parent.resolve()
client = GraphqlClient(endpoint="https://graphql.anilist.co")
TOKEN = os.environ.get("ANILIST_TOKEN", "")
if __name__ == "__main__":
readme = root / "README.md"
readme_contents = readme.open().read()
# Favorites Anime
data = fetch_favorites(TOKEN, types='anime')
res = "\n".join(
[
"* [{title}]({url})".format(**x)
for x in data
]
)
print (res)
rewritten = replace_chunk(readme_contents, "favorites_anime", res)
# Favorites Manga
data = fetch_favorites(TOKEN, types='manga')
res = "\n".join(
[
"* [{title}]({url})".format(**x)
for x in data
]
)
print (res)
rewritten = replace_chunk(readme_contents, "favorites_manga", res)
# Favorites Characters
data = fetch_favorites(TOKEN, types='characters')
res = "\n".join(
[
"* [{title}]({url})".format(**x)
for x in data
]
)
print (res)
rewritten = replace_chunk(readme_contents, "favorites_characters", res)
readme.open("w").write(rewritten)
| 23.046154 | 94 | 0.502003 |
bd0a67b7badc84d9a3a79ed71754a0226bee9e55 | 844 | py | Python | moistmaster/analytics/migrations/0001_initial.py | benjohnsonnlp/robosquirt | f96c58421532f9b956cec2277b7978022c7c1d80 | [
"BSD-3-Clause"
] | null | null | null | moistmaster/analytics/migrations/0001_initial.py | benjohnsonnlp/robosquirt | f96c58421532f9b956cec2277b7978022c7c1d80 | [
"BSD-3-Clause"
] | 7 | 2020-02-12T00:56:32.000Z | 2022-02-10T09:57:40.000Z | moistmaster/analytics/migrations/0001_initial.py | benjohnsonnlp/robosquirt | f96c58421532f9b956cec2277b7978022c7c1d80 | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 2.0.6 on 2019-07-10 03:56
from django.db import migrations, models
| 28.133333 | 99 | 0.535545 |
bd0c339764aca9d1b1dc4bb3784afbd33f7e553d | 30,324 | py | Python | stcloud/api/apps_api.py | sematext/sematext-api-client-python | 16e025cd3d32aa58deb70fc5930ae4165afebe97 | [
"Apache-2.0"
] | 1 | 2020-05-01T12:15:52.000Z | 2020-05-01T12:15:52.000Z | stcloud/api/apps_api.py | sematext/sematext-api-client-python | 16e025cd3d32aa58deb70fc5930ae4165afebe97 | [
"Apache-2.0"
] | null | null | null | stcloud/api/apps_api.py | sematext/sematext-api-client-python | 16e025cd3d32aa58deb70fc5930ae4165afebe97 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Sematext Cloud API
API Explorer provides access and documentation for Sematext REST API. The REST API requires the API Key to be sent as part of `Authorization` header. E.g.: `Authorization : apiKey e5f18450-205a-48eb-8589-7d49edaea813`. # noqa: E501
OpenAPI spec version: v3
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from stcloud.api_client import ApiClient
| 38.777494 | 236 | 0.614991 |