hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1f9508b579771bc7e41b7b6de9c4a49ddf05f51e | 3,368 | py | Python | models/generatorUnet.py | ctyler9/cartoon-gan | 48ec80cfcf23c6f30c5d1c446c12ff6f9c81afc8 | [
"MIT"
] | 177 | 2020-01-31T08:32:07.000Z | 2022-03-28T02:20:29.000Z | models/generatorUnet.py | ctyler9/cartoon-gan | 48ec80cfcf23c6f30c5d1c446c12ff6f9c81afc8 | [
"MIT"
] | 10 | 2020-06-26T04:46:26.000Z | 2022-02-01T18:17:10.000Z | models/generatorUnet.py | ctyler9/cartoon-gan | 48ec80cfcf23c6f30c5d1c446c12ff6f9c81afc8 | [
"MIT"
] | 44 | 2020-03-11T17:21:51.000Z | 2022-03-16T16:09:22.000Z | import torch
import torch.nn as nn
import torch.nn.functional as F
def single_conv(in_channels, out_channels, ks=3):
return nn.Sequential(
nn.ReflectionPad2d(ks//2),
nn.Conv2d(in_channels, out_channels, 3, bias=False),
nn.ReLU(inplace=True)
) | 32.384615 | 101 | 0.561758 |
1f964a207f38c7145c92fc77855d4848bb25de63 | 1,716 | py | Python | app/calc/utility.py | sajeeshen/WebCalculatorAPI | d951e688e84741cc594877914d292fbddb4e9542 | [
"MIT"
] | null | null | null | app/calc/utility.py | sajeeshen/WebCalculatorAPI | d951e688e84741cc594877914d292fbddb4e9542 | [
"MIT"
] | null | null | null | app/calc/utility.py | sajeeshen/WebCalculatorAPI | d951e688e84741cc594877914d292fbddb4e9542 | [
"MIT"
] | null | null | null | import math
from datetime import datetime
AVAILABLE_ACTIONS = [{'action': 'add', 'admin_required': False,
'operator': '+'},
{'action': 'subtract', 'admin_required': False,
'operator': '-'},
{'action': 'multiply', 'admin_required': False,
'operator': '*'},
{'action': 'divide', 'admin_required': False,
'operator': '/'},
{'action': 'power', 'admin_required': True,
'operator': '**'},
{'action': 'sqrt', 'admin_required': True,
'operator': 'sqrt'},
]
def get_available_options(action):
"""
Go through the available options and find it, then return that object
:param action: string
:return: list
"""
return [obj for obj in AVAILABLE_ACTIONS
if obj['action'] == action.lower()]
def do_calculation(action, x, y):
"""
This function does all the calculation thig
:param action: string
:param x: int
:param y: int
:return: int ( the result )
"""
operator = get_available_options((action))[0]['operator']
ops = {
'+': lambda x, y: x + y,
'-': lambda x, y: x - y,
'*': lambda x, y: x * y,
'/': lambda x, y: x / y if y else 0,
'**': lambda x, y: x ** y,
'sqrt': lambda x, y: math.sqrt(int(x))
}
return ops[operator](int(x), int(y))
| 28.131148 | 73 | 0.501166 |
2f06bad44169797de0c1276f26ece53ea110fad2 | 6,009 | py | Python | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/api/v1/models.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 3 | 2021-12-15T04:58:18.000Z | 2022-02-06T12:15:37.000Z | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/api/v1/models.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | null | null | null | Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/commerce/api/v1/models.py | osoco/better-ways-of-thinking-about-software | 83e70d23c873509e22362a09a10d3510e10f6992 | [
"MIT"
] | 1 | 2019-01-02T14:38:50.000Z | 2019-01-02T14:38:50.000Z | """ API v1 models. """
import logging
from itertools import groupby
from django.db import transaction
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from common.djangoapps.course_modes.models import CourseMode
from lms.djangoapps.verify_student.models import VerificationDeadline
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
log = logging.getLogger(__name__)
UNDEFINED = object()
| 40.328859 | 112 | 0.669496 |
2f082e2906c7c51226d4204e5140aa52273e420e | 984 | py | Python | model_code/grid_search/DecisionTreeClassifier.py | lacava/sklearn-benchmarks | bec1d5468f40b1fea08b605a11d5f7795fe5bb1b | [
"MIT"
] | 213 | 2016-02-03T02:56:40.000Z | 2022-02-26T06:44:27.000Z | model_code/grid_search/DecisionTreeClassifier.py | lacava/sklearn-benchmarks | bec1d5468f40b1fea08b605a11d5f7795fe5bb1b | [
"MIT"
] | 30 | 2016-02-03T14:32:27.000Z | 2020-05-12T17:32:40.000Z | model_code/grid_search/DecisionTreeClassifier.py | arunsinghyadav/sklearn-benchmarks | a917336f6fd3ffb89efd94b1c7f60b3a05ba780f | [
"MIT"
] | 59 | 2016-02-03T14:32:58.000Z | 2021-01-12T23:48:46.000Z | import sys
import pandas as pd
import numpy as np
import itertools
from sklearn.preprocessing import RobustScaler
from sklearn.tree import DecisionTreeClassifier
from evaluate_model import evaluate_model
dataset = sys.argv[1]
pipeline_components = [RobustScaler, DecisionTreeClassifier]
pipeline_parameters = {}
min_impurity_decrease_values = np.arange(0., 0.005, 0.00025)
max_features_values = [0.1, 0.25, 0.5, 0.75, 'sqrt', 'log2', None]
criterion_values = ['gini', 'entropy']
random_state = [324089]
all_param_combinations = itertools.product(min_impurity_decrease_values, max_features_values, criterion_values, random_state)
pipeline_parameters[DecisionTreeClassifier] = \
[{'min_impurity_decrease': min_impurity_decrease, 'max_features': max_features, 'criterion': criterion, 'random_state': random_state}
for (min_impurity_decrease, max_features, criterion, random_state) in all_param_combinations]
evaluate_model(dataset, pipeline_components, pipeline_parameters)
| 39.36 | 136 | 0.813008 |
2f0914ec0565214e9bbc4b09ca688ebda76940dd | 3,428 | py | Python | training_v1_backup/training/PPO/run_ppo.py | prasoonpatidar/multiagentRL-resource-sharing | e63ba7fc3c7ab019e9fd109cd45b739e3322152f | [
"MIT"
] | null | null | null | training_v1_backup/training/PPO/run_ppo.py | prasoonpatidar/multiagentRL-resource-sharing | e63ba7fc3c7ab019e9fd109cd45b739e3322152f | [
"MIT"
] | null | null | null | training_v1_backup/training/PPO/run_ppo.py | prasoonpatidar/multiagentRL-resource-sharing | e63ba7fc3c7ab019e9fd109cd45b739e3322152f | [
"MIT"
] | null | null | null | '''
Wrapper function to run PPO algorithm for training
'''
import numpy as np
import matplotlib.pyplot as plt
import time
import math
import logging
from scipy.optimize import minimize, LinearConstraint
# custom libraries
from training.PPO.run_helper import buyerPenaltiesCalculator, buyerUtilitiesCalculator, evaluation
from training.PPO.run_helper import logger_handle, initialize_agent, get_ys, choose_prob, cumlativeBuyerExp, getPurchases
| 38.516854 | 130 | 0.698658 |
2f093dab61a4920e6658955efc331ab3c70a322c | 850 | py | Python | tests/custom/test_clean_dateTime.py | arkhn/cleaning-scripts | ffe88598b476b2e6b53fd06e8ce6092ef0351b19 | [
"Apache-2.0"
] | 9 | 2019-03-31T03:46:51.000Z | 2020-05-20T13:05:06.000Z | tests/custom/test_clean_dateTime.py | arkhn/cleaning-scripts | ffe88598b476b2e6b53fd06e8ce6092ef0351b19 | [
"Apache-2.0"
] | 18 | 2019-09-11T09:19:45.000Z | 2021-07-13T09:16:23.000Z | tests/custom/test_clean_dateTime.py | arkhn/cleaning-scripts | ffe88598b476b2e6b53fd06e8ce6092ef0351b19 | [
"Apache-2.0"
] | 2 | 2019-09-18T15:20:10.000Z | 2021-07-25T06:46:57.000Z | import pytest
from scripts.custom import clean_dateTime
| 31.481481 | 71 | 0.555294 |
2f0957f3db94b5ef71452361a51b110a5a627030 | 14,927 | py | Python | mlprogram/entrypoint/train.py | HiroakiMikami/mlprogram | 573e94c567064705fa65267dd83946bf183197de | [
"MIT"
] | 9 | 2020-05-24T11:25:01.000Z | 2022-03-28T15:32:10.000Z | mlprogram/entrypoint/train.py | HiroakiMikami/mlprogram | 573e94c567064705fa65267dd83946bf183197de | [
"MIT"
] | 87 | 2020-05-09T08:56:55.000Z | 2022-03-31T14:46:45.000Z | mlprogram/entrypoint/train.py | HiroakiMikami/NL2Prog | 573e94c567064705fa65267dd83946bf183197de | [
"MIT"
] | 3 | 2021-02-22T20:38:29.000Z | 2021-11-11T18:48:44.000Z | import os
import traceback
from dataclasses import dataclass
from typing import Any, Callable, List, Optional, Union
import pytorch_pfn_extras as ppe
import torch
from pytorch_pfn_extras.training import extension, extensions
from torch import nn
from torch.utils.data import DataLoader
from mlprogram import distributed, logging
from mlprogram.builtins import Environment
from mlprogram.pytorch_pfn_extras import SaveTopKModel, StopByThreshold
from mlprogram.synthesizers import Synthesizer
logger = logging.Logger(__name__)
Length = Union[Epoch, Iteration]
def create_extensions_manager(n_iter: int, evaluation_interval_iter: int,
snapshot_interval_iter: int,
iter_per_epoch: int,
model: nn.Module,
optimizer: torch.optim.Optimizer,
evaluate: Optional[Callable[[], None]],
metric: str, maximize: bool,
threshold: Optional[float],
output_dir: str,
report_metrics: Optional[List[str]] = None):
model_dir = os.path.join(output_dir, "model")
logger.info("Prepare pytorch-pfn-extras")
manager = ppe.training.ExtensionsManager(
model, optimizer, n_iter / iter_per_epoch,
out_dir=os.path.join(output_dir),
extensions=[],
iters_per_epoch=iter_per_epoch,
)
manager.extend(
extensions.FailOnNonNumber(),
trigger=Trigger(evaluation_interval_iter, n_iter)
)
if evaluate is not None:
manager.extend(
Call(evaluate),
trigger=Trigger(evaluation_interval_iter, n_iter),
)
if distributed.is_main_process():
manager.extend(
extensions.LogReport(
trigger=Trigger(100, n_iter),
filename="log.json",
)
)
manager.extend(extensions.ProgressBar())
manager.extend(
SaveTopKModel(model_dir, 1, metric, model, maximize=maximize),
trigger=Trigger(evaluation_interval_iter, n_iter),
)
metrics = report_metrics or []
manager.extend(
extensions.PrintReport(entries=[
"loss", *metrics,
"iteration", "epoch",
"time.iteration", "gpu.time.iteration", "elapsed_time"
]),
trigger=Trigger(100, n_iter),
)
if threshold is not None:
manager.extend(
StopByThreshold(metric, threshold, maximize=maximize),
trigger=Trigger(evaluation_interval_iter, n_iter),
)
if distributed.is_initialized():
snapshot = extensions.snapshot(autoload=True, n_retains=1,
saver_rank=0)
snapshot._rank = distributed.rank()
snapshot._size = distributed.size()
snapshot._local_rank = distributed.rank()
else:
snapshot = extensions.snapshot(autoload=True, n_retains=1)
manager.extend(snapshot, trigger=Trigger(snapshot_interval_iter, n_iter))
return manager
def create_dataloader(dataset: torch.utils.data.Dataset,
batch_size: int, n_worker: int, collate_fn: Callable) \
-> torch.utils.data.DataLoader:
if hasattr(dataset, "__len__"):
is_iterable = False
else:
is_iterable = True
if is_iterable:
return DataLoader(dataset, batch_size=batch_size,
shuffle=False, num_workers=n_worker,
collate_fn=collate_fn)
else:
return DataLoader(dataset, batch_size=batch_size,
shuffle=True, num_workers=n_worker,
collate_fn=collate_fn)
def get_world_process_group(device: torch.device) \
-> Optional[torch.distributed.group]:
if not distributed.is_initialized():
return None
else:
if device.type == "cuda":
return distributed.groups["world_nccl"]
else:
return distributed.groups["world_gloo"]
def setup_distributed_training(
model: nn.Module,
loss: nn.Module,
group: torch.distributed.group
):
model = TrainModule(model, loss)
if group is None:
return model
else:
return ppe.nn.parallel.distributed.DistributedDataParallel(
module=model,
process_group=group,
)
def save_results(output_dir: str,
model: nn.Module, optimizer: torch.optim.Optimizer) -> None:
if distributed.is_main_process():
logger.info("Dump the last model")
torch.save(model.state_dict(), os.path.join(output_dir, "model.pt"))
torch.save(optimizer.state_dict(), os.path.join(output_dir, "optimizer.pt"))
| 37.599496 | 88 | 0.554231 |
2f09b816cae5d16accf1cca62376da23fd995e52 | 3,381 | py | Python | visualization.py | aditya-srikanth/Data-Mining-Assignment-3 | 7dc44d7ca8884680130db9b52a75e3036cf2f8a7 | [
"MIT"
] | null | null | null | visualization.py | aditya-srikanth/Data-Mining-Assignment-3 | 7dc44d7ca8884680130db9b52a75e3036cf2f8a7 | [
"MIT"
] | null | null | null | visualization.py | aditya-srikanth/Data-Mining-Assignment-3 | 7dc44d7ca8884680130db9b52a75e3036cf2f8a7 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import math
import numpy as np
| 34.85567 | 121 | 0.55102 |
2f0b0a77f9fa1f45efa368882434f52b3044f388 | 322 | py | Python | 20211001_PythonIntro/ex2/ex2.py | alessandro-massarenti/Cybersec2021 | 3d6dcc4b255dd425b1be66d440df1d94d5ea5ac0 | [
"BSD-3-Clause"
] | 15 | 2021-10-01T16:10:48.000Z | 2022-02-19T20:45:35.000Z | 20211001_PythonIntro/ex2/ex2.py | alessandro-massarenti/Cybersec2021 | 3d6dcc4b255dd425b1be66d440df1d94d5ea5ac0 | [
"BSD-3-Clause"
] | null | null | null | 20211001_PythonIntro/ex2/ex2.py | alessandro-massarenti/Cybersec2021 | 3d6dcc4b255dd425b1be66d440df1d94d5ea5ac0 | [
"BSD-3-Clause"
] | 2 | 2021-11-06T08:32:41.000Z | 2021-12-11T16:18:54.000Z | from operator import add, itruediv, mul, sub
ops = [add, sub, mul, itruediv]
a = float(input("Inserisci un numero: "))
b = float(input("Inserisci un altro numero: "))
op = int(
input("Inserisci un operatore (0 per addizione, 1 per sottrazione, 2 per moltiplicazione oppure 3 per divisione: ")
)
print(ops[op](a, b))
| 29.272727 | 119 | 0.695652 |
2f0df6e28987fcaa913b236b22575fcae954bfe4 | 3,639 | py | Python | robotidy/transformers/ext_ExtraIndentForKeywordArguments.py | josflorap/robotframework-tidy | 9d4e1ccc6a50c415187468305235830f80f3373b | [
"Apache-2.0"
] | null | null | null | robotidy/transformers/ext_ExtraIndentForKeywordArguments.py | josflorap/robotframework-tidy | 9d4e1ccc6a50c415187468305235830f80f3373b | [
"Apache-2.0"
] | null | null | null | robotidy/transformers/ext_ExtraIndentForKeywordArguments.py | josflorap/robotframework-tidy | 9d4e1ccc6a50c415187468305235830f80f3373b | [
"Apache-2.0"
] | null | null | null | from robot.api.parsing import ModelTransformer, get_model, ModelVisitor, Token
import os, sys
keywordlist = []
other_keywords = []
used_keywords = []
| 50.541667 | 140 | 0.569387 |
2f0e2ccc0b7fb78f69f72c37d56b7289930132ef | 6,581 | py | Python | Common/Strategies/TechIndicators/MacdStrategy.py | enriqueescobar-askida/Kinito.Finance | 5308748b64829ac798a858161f9b4a9e5829db44 | [
"MIT"
] | 2 | 2020-03-04T11:18:38.000Z | 2020-05-10T15:36:42.000Z | Common/Strategies/TechIndicators/MacdStrategy.py | enriqueescobar-askida/Kinito.Finance | 5308748b64829ac798a858161f9b4a9e5829db44 | [
"MIT"
] | 6 | 2020-03-30T16:42:47.000Z | 2021-12-13T20:37:21.000Z | Common/Strategies/TechIndicators/MacdStrategy.py | enriqueescobar-askida/Kinito.Finance | 5308748b64829ac798a858161f9b4a9e5829db44 | [
"MIT"
] | 1 | 2020-04-14T11:26:16.000Z | 2020-04-14T11:26:16.000Z | from typing import Tuple
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from Common.Strategies.TechIndicators.AbstractTechStrategy import AbstractTechStrategy
from Common.TechIndicators.MacdIndicator import MacdIndicator
| 46.34507 | 124 | 0.621942 |
2f1305b235214a028b433be662b9539aa5ea50e7 | 7,572 | py | Python | dayu_widgets/wizard.py | xiaonuoAndy/dayu_widgets | 0a87e40b5b3b10e9f1f3f98c17a252c107118257 | [
"MIT"
] | null | null | null | dayu_widgets/wizard.py | xiaonuoAndy/dayu_widgets | 0a87e40b5b3b10e9f1f3f98c17a252c107118257 | [
"MIT"
] | null | null | null | dayu_widgets/wizard.py | xiaonuoAndy/dayu_widgets | 0a87e40b5b3b10e9f1f3f98c17a252c107118257 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
###################################################################
# Author: Mu yanru
# Date : 2018.5
# Email : muyanru345@163.com
###################################################################
from collections import defaultdict
import utils
from qt import *
from separator import DayuHSeparator
from field_mixin import MFieldMixin
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
test = MWizard()
test.register_field('formats', [])
test.register_field('type_group', 'element')
test.register_field('current_step', 'prep')
test.set_title('Publish Element')
page0 = MWizardPage('Select Publish Type')
page1 = MWizardPage('Write Comment')
page2 = MWizardPage('Upload Thumbnail')
page3 = MWizardPage('Quality Check')
test.add_page(page0)
test.add_page(page3)
test.add_page(page1)
test.add_page(page2)
test.go_to(0)
test.show()
sys.exit(app.exec_())
| 34.108108 | 100 | 0.633386 |
2f14ec3187ef5944e2d523b10e6eabf13148caae | 897 | py | Python | examples/TechChangeModel.py | timkittel/PyViability | 63b628df47ab506e9317a908a63a49a556232137 | [
"BSD-2-Clause"
] | null | null | null | examples/TechChangeModel.py | timkittel/PyViability | 63b628df47ab506e9317a908a63a49a556232137 | [
"BSD-2-Clause"
] | null | null | null | examples/TechChangeModel.py | timkittel/PyViability | 63b628df47ab506e9317a908a63a49a556232137 | [
"BSD-2-Clause"
] | null | null | null |
from __future__ import division, print_function, generators
import numpy as np
pi = np.pi
def techChange_sunny(p):
"""sunny constraint for techChangeModel"""
return p[:, 0] > 0.325
| 20.860465 | 68 | 0.528428 |
2f1545a93541c971b7ff89f3c71a62f913a542c9 | 2,502 | py | Python | tests/test_heif.py | Cykooz/cykooz.heif | cfd60687406763503a57fe949bdf01fb9997cae8 | [
"MIT"
] | 5 | 2020-03-05T20:31:23.000Z | 2021-11-24T00:22:18.000Z | tests/test_heif.py | Cykooz/cykooz.heif | cfd60687406763503a57fe949bdf01fb9997cae8 | [
"MIT"
] | 3 | 2021-01-14T15:23:04.000Z | 2021-11-24T00:30:37.000Z | tests/test_heif.py | Cykooz/cykooz.heif | cfd60687406763503a57fe949bdf01fb9997cae8 | [
"MIT"
] | 1 | 2020-06-12T01:29:10.000Z | 2020-06-12T01:29:10.000Z | # -*- coding: utf-8 -*-
"""
:Authors: cykooz
:Date: 23.06.2019
"""
from pathlib import Path
import piexif
import pytest
from PIL import Image
from cykooz.heif.errors import HeifError
from cykooz.heif.image import RawHeifImage
from cykooz.heif.pil import register_heif_opener
def test_raw_heif_image_form_path(data_path):
img = RawHeifImage.from_path(data_path / 'test.heic')
assert img.width == 3024
assert img.height == 4032
assert img.mode == 'RGB'
assert len(img.data) == 36578304
assert img.stride == 9072
assert len(img.exif) == 2026
def test_raw_heif_image_form_reader(data_path):
img_path = data_path / 'test.heic'
with img_path.open('rb') as f:
img = RawHeifImage.from_stream(f)
assert img.width == 3024
assert img.height == 4032
assert img.mode == 'RGB'
assert len(img.data) == 36578304
assert img.stride == 9072
assert len(img.exif) == 2026
def test_raw_heif_image_form_reader_errors(data_path):
img_path = data_path / 'test.heic'
with img_path.open('rb') as f:
img = RawHeifImage.from_stream(f)
assert img.width == 3024
assert img.height == 4032
# File is closed
with pytest.raises(HeifError):
_ = img.data
| 24.529412 | 79 | 0.63709 |
2f15770186ad88ae65932854e1cbbe4f54f58e9d | 3,960 | py | Python | ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py | risdenk/ambari | 3809bdc6d5fe367c2c3207812ee42856214db8de | [
"Apache-2.0"
] | null | null | null | ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py | risdenk/ambari | 3809bdc6d5fe367c2c3207812ee42856214db8de | [
"Apache-2.0"
] | 1 | 2018-10-22T17:50:00.000Z | 2018-10-22T17:50:00.000Z | ambari-agent/src/main/python/ambari_agent/StatusCommandsExecutor.py | risdenk/ambari | 3809bdc6d5fe367c2c3207812ee42856214db8de | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
'''
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import os
import signal
import threading
import logging
import multiprocessing
from ambari_agent.PythonReflectiveExecutor import PythonReflectiveExecutor
from ambari_agent.RemoteDebugUtils import bind_debug_signal_handlers
from ambari_agent.ExitHelper import ExitHelper
logger = logging.getLogger(__name__)
| 41.684211 | 195 | 0.769192 |
2f16819a3d5eb873ef8eef277cfd895042d5e5d1 | 5,630 | py | Python | blender/addons/2.8/mifth_tools/mifth_tools_ui.py | feynmanliang/mifthtools | cf99bc5811215a8747c43d84895ba4fa806812b7 | [
"BSD-3-Clause"
] | null | null | null | blender/addons/2.8/mifth_tools/mifth_tools_ui.py | feynmanliang/mifthtools | cf99bc5811215a8747c43d84895ba4fa806812b7 | [
"BSD-3-Clause"
] | null | null | null | blender/addons/2.8/mifth_tools/mifth_tools_ui.py | feynmanliang/mifthtools | cf99bc5811215a8747c43d84895ba4fa806812b7 | [
"BSD-3-Clause"
] | null | null | null | import bpy
from bpy.props import *
from bpy.types import Operator, AddonPreferences
| 37.533333 | 89 | 0.675311 |
2f1729df6cf48161f37c48656ac64fd0cceb2a63 | 11,830 | py | Python | fabfile.py | bbayles/link | 48cf656fac6c31c0aa82152ce68767e469ed5f06 | [
"Apache-2.0"
] | 9 | 2015-03-18T18:23:41.000Z | 2016-11-18T09:16:02.000Z | fabfile.py | bbayles/link | 48cf656fac6c31c0aa82152ce68767e469ed5f06 | [
"Apache-2.0"
] | 3 | 2015-11-07T16:56:51.000Z | 2016-11-22T19:32:09.000Z | fabfile.py | bbayles/link | 48cf656fac6c31c0aa82152ce68767e469ed5f06 | [
"Apache-2.0"
] | 7 | 2015-05-15T18:12:40.000Z | 2017-03-16T18:42:25.000Z | """
Fabfile for deploying and setting up code that looks like the production
environment. it also makes it easy to start up the servers
If you want to run on the localhost you may need to first do::
rm -rf ~/.ssh/known_hosts
"""
from __future__ import with_statement
import os
import re
from fabric.api import local, settings, abort, run , cd, env, lcd, sudo, prompt
from fabric.contrib.console import confirm
from fabric.contrib import files
env.roledefs = {'local':['localhost']}
env.use_ssh_config=True
TAG_REGEX = re.compile('^[0-9]+\.[0-9]+\.[0-9]+')
STABLE_MSG = '**stable**'
LINK_CODE_DIR = os.path.split(os.path.abspath(__file__))[0]
def dir_code_base():
"""
If you are using any localhost then it will use the current directory.
Otherwise you will use the code_dir
"""
if 'localhost' in env.host_string:
return os.getcwd()
return code_dir
def dir_scripts():
"""
The directory where you house all the scripts
"""
return '%s/scripts' % (dir_code_base())
config_dir = '~/.link'
def configure():
"""
Create the base configuration so that you can change it. Might want to
include the configuration in a different repo
"""
if not files.exists(config_dir):
run('mkdir %s' % config_dir)
lnk_config = '%s/link.config' % config_dir
if not files.exists(lnk_config):
run('touch %s' % lnk_config)
def script(script_name, command = 'python', **args):
"""
Will run the script that is in the scripts folder. you can pass in a
dictionory of args and it will pass it through to the script as command line
args in this format
fab -R local script:example.py,arg1=value1,arg2=value2
that will result in running this command
<command> <scripts_directory>/<scriptname> --arg1=value1 --arg2=value2
"""
with cd(dir_scripts()):
parameters = ''
if args:
parameters = ' '.join(['--%s=%s' % (key, value) for key,value in
args.iteritems()])
run("%s %s %s" % (command , script_name, parameters))
def commit(msg=None):
"""
Commit your changes to git
:msg: @todo
:returns: @todo
"""
print '---Commiting---'
print
msg = msg or prompt('Commit message: ')
commit = False
commit = prompt('Confirm commit? [y/n]') == 'y'
if commit:
with settings(warn_only=True):
_commit = not local('git commit -a -m "%s"' % msg).failed
if not _commit:
#nothing was committed
commit = False
print "Nothing to commit"
else:
abort('commit aborted')
print
print '---Done---'
return commit
def check_tag_format(tag):
"""
Checks the tag format and returns the component parts
"""
parsed = tag.split('.')
try:
#allow for at most 2 minor decimals...i mean comeon
major = int(parsed[0])
minor = int(parsed[1])
build = int(parsed[2][0:2])
return (major, minor, build)
except Exception as e:
print e
abort("""Must be of the form <major_version>.<minor>.<maintence>, like
0.0.1. Only integers allowed""")
def write_version(version):
"""
Write out the version python file to the link directory before installing
version needs to be a list or tuple of the form (<major>, <minor>, <build>)
or a string in the format <major>.<minor>.<build> all ints
"""
file_name ='link/__init__.py'
init = open(file_name)
init_read = init.readlines()
init.close()
version_line = [idx for idx, x in enumerate(init_read) if '__version__ = ' in x]
if len(version_line)>1:
raise Exception('version is in there more than once')
if isinstance(version, str):
try:
version_split = map(int, version.split('.'))
except:
raise Exception("Version string must be in the format <major>.<minor>.<build>")
if not isinstance(version_split, (list, tuple)) or len(version_split)!=3:
raise Exception('invalid version %s' % version)
init_read[version_line[0]] = "__version__ = '%s'\n" % version
init = open(file_name, 'w')
try:
init.write(''.join(init_read))
finally:
init.close()
def prompt_for_tag(default_offset=1, stable_only = False):
"""
Prompt for the tag you want to use, offset for the default by input
"""
tags = tag_names(10, stable_only)
print "Showing latest tags for reference"
default = '0.0.1'
if tags:
default = tags[0]
(major, minor, build) = check_tag_format(default)
build = build+default_offset
new_default = '%s.%s.%s' % (major, minor, build)
tag = prompt('Tag name [in format x.xx] (default: %s) ? ' % new_default)
tag = tag or new_default
return tag
def push_to_pypi():
"""
Will push the code to pypi
"""
if prompt('would you like to tag a new version first [y/n]') == 'y':
tag()
local('python setup.py sdist upload')
def prompt_commit():
"""
prompts if you would like to commit
"""
local('git status')
print
print
_commit = prompt('Do you want to commit? [y/n]') == 'y'
if _commit:
msg = prompt('Commit message: ')
return commit(msg)
def tag(mark_stable=False):
"""
Tag a release, will prompt you for the tag version. You can mark it as
stable here as well
"""
tag = prompt_for_tag()
print "writing this tag version to version.py before commiting"
write_version(tag)
print
_commit = prompt_commit()
print
if not _commit and not tag:
print
print "Nothing commited, using default tag %s" % default
print
tag = default
else:
msg = ''
if mark_stable:
msg = STABLE_MSG + ' '
msg += prompt("enter msg for tag: ")
local('git tag %(ref)s -m "%(msg)s"' % { 'ref': tag, 'msg':msg})
local('git push --tags')
return tag
def merge(branch=None, merge_to = 'master'):
"""
Merge your changes and delete the old branch
"""
if not branch:
print "no branch specified, using current"
branch = current_branch()
if prompt('confirm merge with of branch %s to %s [y/N]' % (branch, merge_to)) == 'y':
prompt_commit()
local('git checkout %s ' % merge_to)
local('git merge %s ' % branch)
if prompt('delete the old branch locally and remotely? [y/N]') == 'y':
local('git branch -d %s' % branch)
local('git push origin :%s' % branch)
else:
print "leaving branch where it is"
if prompt('push results [y/N]' ) == 'y':
local('git push')
def tag_deploy(mark_stable=False):
"""
Asks you to tag this release and Figures out what branch you are on.
It then calls the deploy function
"""
local('git fetch --tags')
branch = local('git branch | grep "^*" | cut -d" " -f2', capture=True)
_tag = tag(mark_stable=mark_stable)
deploy(_tag, branch)
def retag(tag, msg):
"""
Retag a tag with a new message
"""
local('git tag %s %s -f -m "%s"' % (tag, tag, msg))
local('git push --tags')
def mark_stable(tag, msg = None):
"""
Mark a previous tag as stable
"""
retag(tag, '%s %s' % (STABLE_MSG, msg) )
def deploy(tag=None, branch=None, stable_only=False):
"""
This is only for deployment on a dev box where everything can be owned by
this user. This is NOT for production deployment. Put's the code in
code_dir
"""
if not tag:
tag = prompt_for_tag(0, stable_only = stable_only)
configure()
setup_environment()
#check out all the code in the right place
with cd(code_dir):
# i **THINK** you have to have the branch checked out before you can
# checkout the tag
if branch:
#then you haven't even checkout this branch
branches = run('git branch')
if branch not in branches:
run('git checkout -b %s' % branch)
_current_branch = current_branch()
if "* %s" % branch != _current_branch:
run('git checkout %s' % branch)
#pull the latest
run('git pull origin %s' % branch)
else:
run("git pull origin master")
#check out a specific tag
if tag:
run("git fetch --tags")
run("git checkout %s" % tag)
#hacky
if env.user == 'root':
#make sure everything is still owned by the deployer
run('chown -R %s %s' % (deploy_user, code_dir))
###
# How to setup a fresh box. You probably have to run this as root for it to
# work
###
def install_easy_install():
"""
Installs setup tool, this should also go into an RPM
"""
run('wget http://pypi.python.org/packages/2.7/s/setuptools/setuptools-0.6c11-py2.7.egg#md5=fe1f997bc722265116870bc7919059ea')
run('sh setuptools-0.6c11-py2.7.egg')
def install_python():
"""
Installs python, I should be able to create an RPM eventually
"""
run('wget http://python.org/ftp/python/2.7.2/Python-2.7.2.tgz')
run('tar -xvf Python-2.7.2.tgz')
with cd('Python-2.7.2'):
run('./configure')
run('make')
run('make install')
###
# This isn't reall necessary but i'll keep it for now
###
def install_python_dependancies():
"""
Easy install all the packages we need
"""
run('easy_install requests')
run('easy_install numpy')
run('easy_install pandas')
run('easy_install happybase')
run('easy_install flask')
run('easy_install ipython')
run('easy_install gunicorn')
run('easy_install link')
run('easy_install pymongo')
run('easy_install mysql-python')
run('easy_install docutils')
def install_box_libraries():
"""
Installs the libs you need like readlines and libsqlite. This will only
run on a ubuntu machine with apt-get
"""
with settings(warn_only=True):
has_apt = run('which apt-get')
if has_apt:
run('apt-get install make')
run('apt-get install libsqlite3-dev')
run('apt-get install libreadline6 libreadline6-dev')
run('apt-get install libmysqlclient-dev')
else:
print "this is not an ubuntu system...skipping"
def setup_box():
"""
Will install python and all libs needed to set up this box to run the
examjam code. Eventually this needs to be more RPM based
"""
#place_pub_key()
install_box_libraries()
install_python()
install_easy_install()
install_python_dependancies()
| 28.995098 | 129 | 0.608876 |
2f17c5de8625cd4bead31cfebf12c8291e262c52 | 183 | py | Python | jails/routing.py | himrock922/jaisting | a1a53371043c05f0bb82fb7e2e3e16aecb1eba42 | [
"Apache-2.0"
] | 9 | 2019-03-23T08:38:58.000Z | 2021-01-27T05:54:32.000Z | jails/routing.py | himrock922/jaisting | a1a53371043c05f0bb82fb7e2e3e16aecb1eba42 | [
"Apache-2.0"
] | 16 | 2019-03-23T07:35:01.000Z | 2022-01-22T04:23:46.000Z | jails/routing.py | himrock922/jaisting | a1a53371043c05f0bb82fb7e2e3e16aecb1eba42 | [
"Apache-2.0"
] | 1 | 2019-03-24T13:17:18.000Z | 2019-03-24T13:17:18.000Z | from channels.routing import ProtocolTypeRouter
from django.urls import re_path
from . import consumers
websocket_urlpatterns = [
re_path(r'/websocket', consumers.VNCConsumer)
]
| 22.875 | 49 | 0.803279 |
2f190acf1519186091c3bd6551e361c43ae96fd6 | 515 | py | Python | layers/poky/meta/lib/oeqa/runtime/case.py | dtischler/px30-test | 55dce0b7aff1c4a7dea3ac94f94cc9c67fba7c9f | [
"Apache-2.0"
] | 53 | 2018-02-28T08:51:32.000Z | 2022-02-28T06:49:23.000Z | layers/poky/meta/lib/oeqa/runtime/case.py | dtischler/px30-test | 55dce0b7aff1c4a7dea3ac94f94cc9c67fba7c9f | [
"Apache-2.0"
] | 27 | 2018-01-25T00:26:53.000Z | 2020-08-09T05:20:04.000Z | layers/poky/meta/lib/oeqa/runtime/case.py | dtischler/px30-test | 55dce0b7aff1c4a7dea3ac94f94cc9c67fba7c9f | [
"Apache-2.0"
] | 51 | 2018-02-21T04:46:08.000Z | 2022-03-02T04:20:41.000Z | # Copyright (C) 2016 Intel Corporation
# Released under the MIT license (see COPYING.MIT)
from oeqa.core.case import OETestCase
from oeqa.utils.package_manager import install_package, uninstall_package
| 28.611111 | 73 | 0.735922 |
2f194f4c6d0e43f1d9af761e30aabf62de1d5d85 | 393 | py | Python | tests/analysis/test_general.py | trumanw/ScaffoldGraph | a594e5c5effe6c5e45c0061a235ccbeb64e416f9 | [
"MIT"
] | 121 | 2019-12-12T15:30:16.000Z | 2022-02-28T02:00:54.000Z | tests/analysis/test_general.py | trumanw/ScaffoldGraph | a594e5c5effe6c5e45c0061a235ccbeb64e416f9 | [
"MIT"
] | 8 | 2020-04-04T15:37:26.000Z | 2021-11-17T07:30:31.000Z | tests/analysis/test_general.py | trumanw/ScaffoldGraph | a594e5c5effe6c5e45c0061a235ccbeb64e416f9 | [
"MIT"
] | 28 | 2019-12-16T11:58:53.000Z | 2021-11-19T09:57:46.000Z | """
scaffoldgraph tests.analysis.test_general
"""
from scaffoldgraph.analysis import get_singleton_scaffolds, get_virtual_scaffolds
from ..test_network import long_test_network
| 23.117647 | 81 | 0.78626 |
2f1989e325bb85e0738bbeae4175fa2a163031d0 | 1,750 | py | Python | Problem 001-150 Python/pb035.py | Adamssss/projectEuler | 25881b1bd82876e81197756f62ab5b0d73e3e6c8 | [
"MIT"
] | 2 | 2015-02-11T05:47:42.000Z | 2015-02-11T05:47:51.000Z | Problem 001-150 Python/pb035.py | Adamssss/projectEuler | 25881b1bd82876e81197756f62ab5b0d73e3e6c8 | [
"MIT"
] | 1 | 2015-04-13T06:36:21.000Z | 2015-04-13T06:36:21.000Z | Problem 001-150 Python/pb035.py | Adamssss/projectEuler | 25881b1bd82876e81197756f62ab5b0d73e3e6c8 | [
"MIT"
] | null | null | null | import math
import time
t1 = time.time()
N = 1000000
n = (N+1)//2
p = [True]*(n)
i = 1
prime = [2]
while i < n:
if p[i]:
t = 2*i+1
prime.append(t)
j = i
while j < n:
p[j] = False
j += t
i += 1
# define a binary search
target = prime[:]
count = 0
while len(target) > 0:
#print(target)
#print (count)
test = target[0]
dig = math.floor(math.log10(test))+1
target.pop(0)
if dig == 1:
count += 1
continue
if dig > 1:
i = 1
counted = 0
tl = True
while i < dig:
test = test//10 + (test%10)*math.pow(10,dig-1)
if isPrime(test):
i += 1
ind = isInList(test,target)
if ind >= 0:
target.pop(ind)
else:
counted += 1
else:
tl = False
break
if tl:
count += dig - counted
print (count)
print("time:",time.time()-t1)
| 18.617021 | 58 | 0.430857 |
2f19e1c9987607e703c57f23deb45035eb248b71 | 87 | py | Python | izone/apps/secret/apps.py | shenjl/vmatrix | 8f510d04005aa707cb6b296825f459f852cb59f6 | [
"MIT"
] | null | null | null | izone/apps/secret/apps.py | shenjl/vmatrix | 8f510d04005aa707cb6b296825f459f852cb59f6 | [
"MIT"
] | 2 | 2020-02-11T23:34:28.000Z | 2020-06-05T17:33:09.000Z | izone/apps/secret/apps.py | selonsy/vmatrix | 8f510d04005aa707cb6b296825f459f852cb59f6 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
| 14.5 | 33 | 0.747126 |
2f1a5c2760e9a1b86d6eb2f562c21e3dbc87be05 | 2,190 | py | Python | BAP/adapters.py | EleutherAGI/summarisation | d432873e1ba171f47371b8b0df7235478b52ca99 | [
"CC-BY-4.0"
] | 11 | 2021-05-12T14:11:58.000Z | 2022-01-25T04:23:38.000Z | BAP/adapters.py | EleutherAGI/summarisation | d432873e1ba171f47371b8b0df7235478b52ca99 | [
"CC-BY-4.0"
] | 3 | 2021-05-13T11:37:35.000Z | 2021-05-13T11:50:15.000Z | BAP/adapters.py | EleutherAGI/summarisation | d432873e1ba171f47371b8b0df7235478b52ca99 | [
"CC-BY-4.0"
] | null | null | null | import torch
import torch.nn as nn
from collections import OrderedDict
### GPT NEO VERSION ######
'''
# couldn't get it to work with class inheritance
def add_adapters(model, reduction_factor):
n_layers = len(model.h)
hidden_size = model.config.hidden_size
for n in range(n_layers):
model.h[n].mlp = nn.Sequential(OrderedDict([('MLP', model.h[n].mlp),
('Adapter', AdapterLayer(hidden_size, reduction_factor))]))
return model
'''
# couldn't get it to work with class inheritance
| 39.818182 | 111 | 0.594977 |
2f1b669092b8b167d53d53cce79bec39a591e1c1 | 3,934 | py | Python | tests/test_PrependError.py | hutoTUM/macke-opt-llvm | 95830cb4e1416a6d1fb538f2b91d1c4720d4bde7 | [
"Apache-2.0"
] | 4 | 2018-05-11T08:33:46.000Z | 2019-12-16T01:49:37.000Z | tests/test_PrependError.py | aheroine/use-llvm-opt | 407102740f563f57a7abb952e198f6a65800deaa | [
"Apache-2.0"
] | null | null | null | tests/test_PrependError.py | aheroine/use-llvm-opt | 407102740f563f57a7abb952e198f6a65800deaa | [
"Apache-2.0"
] | null | null | null | import unittest
import os
import re
import subprocess
| 36.766355 | 73 | 0.576004 |
2f1da8ae305ab06e7ec0677f650d3ae476d39207 | 1,851 | py | Python | water_modelling/hydrus/desktop/hydrus_desktop_deployer.py | Water-Modelling-Agh/Hydrus-Modflow-Syngery-Engine | 4b28f75fb74647d6453385a893149a48f797eeed | [
"MIT"
] | null | null | null | water_modelling/hydrus/desktop/hydrus_desktop_deployer.py | Water-Modelling-Agh/Hydrus-Modflow-Syngery-Engine | 4b28f75fb74647d6453385a893149a48f797eeed | [
"MIT"
] | null | null | null | water_modelling/hydrus/desktop/hydrus_desktop_deployer.py | Water-Modelling-Agh/Hydrus-Modflow-Syngery-Engine | 4b28f75fb74647d6453385a893149a48f797eeed | [
"MIT"
] | null | null | null | import os
import subprocess
from typing import Optional
from hydrus import hydrus_log_analyzer
from hydrus.hydrus_deployer_interface import IHydrusDeployer
from simulation.simulation_error import SimulationError
from utils import path_formatter
| 41.133333 | 98 | 0.703944 |
2f1dfd7483d1c7356a889232b88033380a6fbee8 | 3,600 | py | Python | src/openprocurement/framework/electroniccatalogue/views/submission.py | ProzorroUKR/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 10 | 2020-02-18T01:56:21.000Z | 2022-03-28T00:32:57.000Z | src/openprocurement/framework/electroniccatalogue/views/submission.py | quintagroup/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 26 | 2018-07-16T09:30:44.000Z | 2021-02-02T17:51:30.000Z | src/openprocurement/framework/electroniccatalogue/views/submission.py | ProzorroUKR/openprocurement.api | 2855a99aa8738fb832ee0dbad4e9590bd3643511 | [
"Apache-2.0"
] | 15 | 2019-08-08T10:50:47.000Z | 2022-02-05T14:13:36.000Z | from openprocurement.api.utils import APIResource, json_view, context_unpack, get_now, generate_id
from openprocurement.framework.core.utils import (
submissionsresource,
apply_patch,
save_qualification,
)
from openprocurement.framework.core.validation import (
validate_patch_submission_data,
validate_operation_submission_in_not_allowed_period,
validate_submission_status,
validate_update_submission_in_not_allowed_status,
validate_activate_submission,
validate_action_in_not_allowed_framework_status,
)
from openprocurement.framework.electroniccatalogue.models import Qualification
| 36.734694 | 102 | 0.656111 |
2f222448d0c305c6158a8a8cb410ef32dcbf5429 | 7,090 | py | Python | util.py | gmshashank/pytorch_yolo | 9736006639acba9743b4e3ff56285668357097f9 | [
"MIT"
] | null | null | null | util.py | gmshashank/pytorch_yolo | 9736006639acba9743b4e3ff56285668357097f9 | [
"MIT"
] | null | null | null | util.py | gmshashank/pytorch_yolo | 9736006639acba9743b4e3ff56285668357097f9 | [
"MIT"
] | null | null | null | from __future__ import division
from torch.autograd import Variable
import cv2
import numpy as np
import torch
| 34.754902 | 87 | 0.572779 |
2f2228d6057ad9c4100fbf0aed98528ab280f726 | 743 | py | Python | 922.py | BLUECARVIN/LeetCode | 0d085ed2dbee47c57d22ac368872161076369ff9 | [
"MIT"
] | null | null | null | 922.py | BLUECARVIN/LeetCode | 0d085ed2dbee47c57d22ac368872161076369ff9 | [
"MIT"
] | null | null | null | 922.py | BLUECARVIN/LeetCode | 0d085ed2dbee47c57d22ac368872161076369ff9 | [
"MIT"
] | null | null | null | # ---------- 320ms, 15.9MB ---------- #
# ---------- 320ms, 16.1MB ---------- # | 28.576923 | 61 | 0.414536 |
2f224c8f917dc2d903a60f297bdfff121e03b7dc | 1,190 | py | Python | mainConsumer.py | cmoshe390/pythonProj | 7123255abbb53e4330c9548be16dd9e237f8a51d | [
"Unlicense",
"MIT"
] | null | null | null | mainConsumer.py | cmoshe390/pythonProj | 7123255abbb53e4330c9548be16dd9e237f8a51d | [
"Unlicense",
"MIT"
] | null | null | null | mainConsumer.py | cmoshe390/pythonProj | 7123255abbb53e4330c9548be16dd9e237f8a51d | [
"Unlicense",
"MIT"
] | null | null | null | from rabbitConsumer import *
from socketConsumer import SocketConsumer
from dlx import *
import threading
import sys
if __name__ == '__main__':
work_with = sys.argv[1]
r_k = ['*.jpg', '*.jpeg', '#']
threads = []
dlx = ReconnectingDlx()
threads.append(threading.Thread(target=dlx.run))
for j in range(1, 4):
if work_with == 'rabbit':
# consumer = RabbitConsumer(_id_consumer=j, _exchange='exchange1',
# _queue=f'queue{j}', _routing_key=r_k[j - 1], _exchange_type='topic',
# _producer_to_dlx=dlx)
consumer = RabbitReconnectingConsumer(_id_consumer=j, _exchange='exchange1',
_queue=f'queue{j}', _routing_key=r_k[j - 1], _exchange_type='topic',
_producer_to_dlx=dlx)
elif work_with == 'socket':
consumer = SocketConsumer(_id_consumer=j)
else:
print("the parameter in args must be 'rabbit' or 'socket'!")
threads.append(threading.Thread(target=consumer.run))
for thread in threads:
thread.start()
| 34 | 118 | 0.561345 |
2f237c48f402b5312560d0ad14f693b93cf182f6 | 1,797 | py | Python | backend/flask-api/migrations/versions/6fdbb9233bd6_.py | lucasbibianot/inova-cnj-time16 | e621d7027bd462d348e233ffd6ed88648c53704b | [
"Apache-2.0"
] | null | null | null | backend/flask-api/migrations/versions/6fdbb9233bd6_.py | lucasbibianot/inova-cnj-time16 | e621d7027bd462d348e233ffd6ed88648c53704b | [
"Apache-2.0"
] | null | null | null | backend/flask-api/migrations/versions/6fdbb9233bd6_.py | lucasbibianot/inova-cnj-time16 | e621d7027bd462d348e233ffd6ed88648c53704b | [
"Apache-2.0"
] | 2 | 2020-10-19T22:03:31.000Z | 2020-11-29T21:22:33.000Z | """Mapeamento das tabelas para persistir os processos datajud
Revision ID: 6fdbb9233bd6
Revises: 8d2eb6149b1d
Create Date: 2020-10-18 09:22:06.650559
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '6fdbb9233bd6'
down_revision = '8d2eb6149b1d'
branch_labels = None
depends_on = None
| 36.673469 | 76 | 0.709516 |
2f239d716de5c5b3e73637e42e5427fd0197839a | 1,991 | py | Python | analyses/quantifications/scripts/2019_11_12_CC414022_quantifications.py | brendano257/Zugspitze-Schneefernerhaus | 64bb86ece2eec147f2a7fb412f87ff2313388753 | [
"MIT"
] | null | null | null | analyses/quantifications/scripts/2019_11_12_CC414022_quantifications.py | brendano257/Zugspitze-Schneefernerhaus | 64bb86ece2eec147f2a7fb412f87ff2313388753 | [
"MIT"
] | null | null | null | analyses/quantifications/scripts/2019_11_12_CC414022_quantifications.py | brendano257/Zugspitze-Schneefernerhaus | 64bb86ece2eec147f2a7fb412f87ff2313388753 | [
"MIT"
] | null | null | null | """
A set of CC412022, CC416168 were run back to back without blanks on 2019-11-12.
Rough quantification is done by the below.
"""
__package__ = 'Z'
from datetime import datetime
from settings import CORE_DIR, DB_NAME
from IO.db import connect_to_db, GcRun, Integration, Standard, SampleQuant
from processing import blank_subtract
from reporting import compile_quant_report
engine, session = connect_to_db(DB_NAME, CORE_DIR)
standard_to_quantify_with = session.query(Standard).filter(Standard.name == 'cc416168').one_or_none()
# get standard cert values for the quantifier
certified_values_of_sample = (session.query(Standard)
.filter(Standard.name == 'cc412022_noaa_provided')
.one().quantifications)
# get standard cert values for the sample being quantified
vocs = session.query(Standard).filter(Standard.name == 'vocs').one_or_none()
vocs = [q.name for q in vocs.quantifications]
samples = (session.query(GcRun).join(Integration, Integration.run_id == GcRun.id)
.filter(GcRun.date > datetime(2019, 11, 12), GcRun.date < datetime(2019, 11, 13))
.filter(Integration.filename.like('%CC412022___.D'))
.order_by(GcRun.date)
.all())
standards = (session.query(GcRun).join(Integration, Integration.run_id == GcRun.id)
.filter(GcRun.date > datetime(2019, 11, 12), GcRun.date < datetime(2019, 11, 13))
.filter(Integration.filename.like('%CC416168___.D'))
.order_by(GcRun.date)
.all())
quants = []
for sample, standard in zip(samples, standards):
blank_subtract(sample, vocs, session, blank=None, force_no_blank=True)
blank_subtract(standard, vocs, session, blank=None, force_no_blank=True)
quant = SampleQuant(sample, standard, None, standard_to_quantify_with)
quant.quantify()
quants.append(quant)
compile_quant_report(quants, 'CC412022', 'CC416168', certified_values_of_sample, date=datetime(2019, 11, 12))
| 40.632653 | 109 | 0.70668 |
2f25439acb972903c75d41093b0f43be910845ab | 310 | py | Python | main.py | mesmacosta/datacatalog-fileset-enricher | 0792632fc181b13696f89ef3335da4e2ce1dca4a | [
"MIT"
] | 3 | 2020-04-01T15:28:25.000Z | 2020-06-06T18:30:34.000Z | main.py | mesmacosta/datacatalog-fileset-enricher | 0792632fc181b13696f89ef3335da4e2ce1dca4a | [
"MIT"
] | null | null | null | main.py | mesmacosta/datacatalog-fileset-enricher | 0792632fc181b13696f89ef3335da4e2ce1dca4a | [
"MIT"
] | 1 | 2020-07-09T06:05:24.000Z | 2020-07-09T06:05:24.000Z | import logging
import sys
from datacatalog_fileset_enricher import datacatalog_fileset_enricher_cli
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
argv = sys.argv
datacatalog_fileset_enricher_cli.\
DatacatalogFilesetEnricherCLI.run(argv[1:] if len(argv) > 0 else argv)
| 31 | 78 | 0.780645 |
2f2590662675a6fa11503eafa56e671b78fe7a23 | 10,473 | py | Python | srcds/events/csgo.py | w4rum/pysrcds | a9dbc198c6f087757e40d9af14ca8de9a39cef74 | [
"MIT"
] | 17 | 2015-06-26T08:49:07.000Z | 2021-09-11T09:02:40.000Z | srcds/events/csgo.py | w4rum/pysrcds | a9dbc198c6f087757e40d9af14ca8de9a39cef74 | [
"MIT"
] | 5 | 2015-04-27T13:44:58.000Z | 2022-02-07T19:00:42.000Z | srcds/events/csgo.py | w4rum/pysrcds | a9dbc198c6f087757e40d9af14ca8de9a39cef74 | [
"MIT"
] | 12 | 2015-02-13T15:34:47.000Z | 2021-09-11T09:02:30.000Z | # Copyright (C) 2013 Peter Rowlands
"""csgo events module
Contains event classes for CS:S and CS:GO events
"""
from __future__ import absolute_import, unicode_literals
from future.utils import python_2_unicode_compatible
from .generic import (BaseEvent, PlayerEvent, PlayerTargetEvent, KillEvent,
AttackEvent)
CSGO_EVENTS = [
SwitchTeamEvent,
BuyEvent,
ThrowEvent,
CsgoAssistEvent,
CsgoKillEvent,
CsgoAttackEvent,
]
| 36.491289 | 83 | 0.531175 |
2f2781811c4aeb325fd30cc295a58030636b2c7d | 695 | py | Python | formacao-python/brasilidades/Telefone.py | hollowrm08/python-alura | eb43be24c7160b38f1598d8da25582bfe04ade29 | [
"MIT"
] | null | null | null | formacao-python/brasilidades/Telefone.py | hollowrm08/python-alura | eb43be24c7160b38f1598d8da25582bfe04ade29 | [
"MIT"
] | null | null | null | formacao-python/brasilidades/Telefone.py | hollowrm08/python-alura | eb43be24c7160b38f1598d8da25582bfe04ade29 | [
"MIT"
] | null | null | null | import re
| 24.821429 | 100 | 0.579856 |
2f27bd70a0bac448a69a312f5b0f06826fe66bdd | 670 | py | Python | Listing_19-1.py | PrinceChou/Play-Python-with-Alisa | 808ab2744a99c548de4633b5707af27112bcdccf | [
"Apache-2.0"
] | null | null | null | Listing_19-1.py | PrinceChou/Play-Python-with-Alisa | 808ab2744a99c548de4633b5707af27112bcdccf | [
"Apache-2.0"
] | null | null | null | Listing_19-1.py | PrinceChou/Play-Python-with-Alisa | 808ab2744a99c548de4633b5707af27112bcdccf | [
"Apache-2.0"
] | null | null | null | # Listing_19-1.py
# Copyright Warren & Carter Sande, 2013
# Released under MIT license http://www.opensource.org/licenses/mit-license.php
# Version $version ----------------------------
# Trying out sounds in Pygame
import pygame
pygame.init()
pygame.mixer.init()
screen = pygame.display.set_mode([640,480])
pygame.time.delay(1000) # Wait a second for the mixer to finish initializing
splat = pygame.mixer.Sound("splat.wav") # Create the Sound object
splat.play() # Play the sound
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
| 29.130435 | 81 | 0.649254 |
2f2f6a510aa43446af03b23b36744744444b6c67 | 1,532 | py | Python | docker_emperor/commands/context/set.py | workon-io/docker-emperor | d827bb2806494dcba97920dd83c5934d0a300089 | [
"Apache-2.0"
] | null | null | null | docker_emperor/commands/context/set.py | workon-io/docker-emperor | d827bb2806494dcba97920dd83c5934d0a300089 | [
"Apache-2.0"
] | null | null | null | docker_emperor/commands/context/set.py | workon-io/docker-emperor | d827bb2806494dcba97920dd83c5934d0a300089 | [
"Apache-2.0"
] | null | null | null | import six
import docker_emperor.logger as logger
from docker_emperor.nodes.context import Context
| 39.282051 | 103 | 0.539817 |
2f2f9ccd72b1ada4944e0fb6d3cba3a6b6b3d3fc | 759 | py | Python | bnc/scripts/instance_lock_test.py | dotzhou/geodesy-ausgeoid | 7d4fbcc1d88738de6ab84ccdba362407cbaeb117 | [
"Apache-2.0"
] | null | null | null | bnc/scripts/instance_lock_test.py | dotzhou/geodesy-ausgeoid | 7d4fbcc1d88738de6ab84ccdba362407cbaeb117 | [
"Apache-2.0"
] | null | null | null | bnc/scripts/instance_lock_test.py | dotzhou/geodesy-ausgeoid | 7d4fbcc1d88738de6ab84ccdba362407cbaeb117 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
import time
from instance_lock import InstanceLock
################################################################################
################################################################################
if __name__ == '__main__':
main()
| 19.973684 | 88 | 0.524374 |
2f30a5cc06c93cc21cd8f006b81cb7e3a4339ab4 | 1,194 | py | Python | examples/Sans_Sphere/guiFitSphere.py | DomiDre/modelexp | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | [
"MIT"
] | null | null | null | examples/Sans_Sphere/guiFitSphere.py | DomiDre/modelexp | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | [
"MIT"
] | null | null | null | examples/Sans_Sphere/guiFitSphere.py | DomiDre/modelexp | 1ec25f71e739dac27716f9a8637fa6ab067499b9 | [
"MIT"
] | null | null | null | import modelexp
from modelexp.experiments.sas import Sans
from modelexp.models.sas import Sphere
from modelexp.data import XyeData
from modelexp.fit import LevenbergMarquardt
from modelexp.models.sas import InstrumentalResolution
app = modelexp.App()
app.setExperiment(Sans)
dataRef = app.setData(XyeData)
dataRef.loadFromFile('./sansSphereData_sa.xye', 'sa')
dataRef.loadFromFile('./sansSphereData_la.xye', 'la')
dataRef.plotData()
modelRef = app.setModel(Sphere, InstrumentalResolution)
modelRef.setParam("r", 50.115979438653525, minVal = 0, maxVal = 100, vary = True)
modelRef.setParam("sldSphere", 4.5e-05, minVal = 0, maxVal = 0.00045000000000000004, vary = False)
modelRef.setParam("sldSolvent", 1e-05, minVal = 0, maxVal = 0.0001, vary = False)
modelRef.setParam("sigR", 0.0446, minVal = 0, maxVal = 0.2, vary = True)
modelRef.setParam("i0", 1.0082741570299425, minVal = 0, maxVal = 10, vary = True)
modelRef.setParam("bg", 0.0, minVal = 0, maxVal = 1, vary = False)
modelRef.setParam("dTheta_sa", 0.000174, minVal = 0, maxVal = 0.001, vary = True)
modelRef.setParam("dTheta_la", 0.000765, minVal = 0, maxVal = 0.001, vary = True)
app.setFit(LevenbergMarquardt)
app.show() | 39.8 | 99 | 0.742881 |
2f34112711a7f4d8c6fd98347f5ba592ca3f8d4f | 345 | py | Python | chapter03/demo_3_2_1_7_1.py | NetworkRanger/python-spider-project | f501e331a59608d9a321a0d7254fcbcf81b50ec2 | [
"MIT"
] | 1 | 2019-02-08T03:14:17.000Z | 2019-02-08T03:14:17.000Z | chapter03/demo_3_2_1_7_1.py | NetworkRanger/python-spider-project | f501e331a59608d9a321a0d7254fcbcf81b50ec2 | [
"MIT"
] | null | null | null | chapter03/demo_3_2_1_7_1.py | NetworkRanger/python-spider-project | f501e331a59608d9a321a0d7254fcbcf81b50ec2 | [
"MIT"
] | null | null | null | #!/usr/bin/python2.7
# -*- coding:utf-8 -*-
# Author: NetworkRanger
# Date: 2019/1/9 12:35
"""
ProxyHandler
"""
import urllib2
proxy = urllib2.ProxyHandler({'http': '127.0.0.1:8087'})
opener = urllib2.build_opener([proxy])
urllib2.install_opener(opener)
response = urllib2.urlopen('http://www.zhichu.com/')
print response.read() | 21.5625 | 56 | 0.713043 |
2f34c3e2255c1aaf56cddd4bf264efb8253bf37a | 1,254 | py | Python | scripts/run_metasv_bed2vcf.py | willrockout/metasv | b46f15cbe8a28941661855da6587451c971dc2e3 | [
"BSD-2-Clause"
] | 43 | 2015-01-12T20:58:24.000Z | 2021-11-24T07:30:06.000Z | scripts/run_metasv_bed2vcf.py | willrockout/metasv | b46f15cbe8a28941661855da6587451c971dc2e3 | [
"BSD-2-Clause"
] | 80 | 2015-01-08T00:34:55.000Z | 2022-02-16T08:30:34.000Z | scripts/run_metasv_bed2vcf.py | willrockout/metasv | b46f15cbe8a28941661855da6587451c971dc2e3 | [
"BSD-2-Clause"
] | 25 | 2015-04-30T06:30:28.000Z | 2022-02-22T02:48:20.000Z | #!/usr/bin/env python
import argparse
import logging
from metasv.generate_final_vcf import convert_metasv_bed_to_vcf
if __name__ == "__main__":
FORMAT = '%(levelname)s %(asctime)-15s %(name)-20s %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
logger = logging.getLogger(__name__)
parser = argparse.ArgumentParser(
description="Convert MetaSV final BED to VCF",
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--sample", help="Sample name", required=True)
parser.add_argument("--bed", help="MetaSV final BED", required=True)
parser.add_argument("--vcf", help="Final VCF to output", required=True)
parser.add_argument("--reference", help="Reference FASTA")
parser.add_argument("--work", help="Work directory", default="work")
parser.add_argument("--pass_only", action="store_true",
help="Output only PASS calls")
args = parser.parse_args()
convert_metasv_bed_to_vcf(bedfile=args.bed, vcf_out=args.vcf,
workdir=args.work,
sample=args.sample,
reference=args.reference,
pass_calls=args.pass_only) | 41.8 | 75 | 0.651515 |
2f373ae8b308ab8313e26c9ce9ba782726162914 | 2,273 | py | Python | almanac/pages/abstract_page.py | welchbj/almanac | 91db5921a27f7d089b4ad8463ffb6e1453c5126a | [
"MIT"
] | 4 | 2020-08-04T10:59:10.000Z | 2021-08-23T13:42:03.000Z | almanac/pages/abstract_page.py | welchbj/almanac | 91db5921a27f7d089b4ad8463ffb6e1453c5126a | [
"MIT"
] | null | null | null | almanac/pages/abstract_page.py | welchbj/almanac | 91db5921a27f7d089b4ad8463ffb6e1453c5126a | [
"MIT"
] | 2 | 2021-07-20T04:49:22.000Z | 2021-08-23T13:42:23.000Z | from __future__ import annotations
from abc import ABC, abstractmethod, abstractproperty
from typing import Any, Optional, Set
from .page_path import PagePath, PagePathLike
| 21.647619 | 83 | 0.57985 |
2f3740dbe908121e76457672fb1354e03d0a203a | 3,022 | py | Python | examples/VTK/PerfTests/scene-export-time.py | ajpmaclean/trame | 48ab4e80c6050a2bea8b04ef32fd7d8b2cc7f787 | [
"BSD-3-Clause"
] | null | null | null | examples/VTK/PerfTests/scene-export-time.py | ajpmaclean/trame | 48ab4e80c6050a2bea8b04ef32fd7d8b2cc7f787 | [
"BSD-3-Clause"
] | null | null | null | examples/VTK/PerfTests/scene-export-time.py | ajpmaclean/trame | 48ab4e80c6050a2bea8b04ef32fd7d8b2cc7f787 | [
"BSD-3-Clause"
] | null | null | null | from trame import state
from trame.html import vuetify, vtk
from trame.layouts import SinglePage
from vtkmodules.vtkImagingCore import vtkRTAnalyticSource
from vtkmodules.vtkFiltersGeometry import vtkGeometryFilter
from vtkmodules.vtkRenderingCore import (
vtkRenderer,
vtkRenderWindow,
vtkRenderWindowInteractor,
vtkDataSetMapper,
vtkActor,
)
# VTK factory initialization
from vtkmodules.vtkInteractionStyle import vtkInteractorStyleSwitch # noqa
import vtkmodules.vtkRenderingOpenGL2 # noqa
# -----------------------------------------------------------------------------
# VTK pipeline
# -----------------------------------------------------------------------------
DEFAULT_RESOLUTION = 10
renderer = vtkRenderer()
renderWindow = vtkRenderWindow()
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
renderWindowInteractor.GetInteractorStyle().SetCurrentStyleToTrackballCamera()
source = vtkRTAnalyticSource()
filter = vtkGeometryFilter()
filter.SetInputConnection(source.GetOutputPort())
mapper = vtkDataSetMapper()
actor = vtkActor()
mapper.SetInputConnection(filter.GetOutputPort())
actor.SetMapper(mapper)
renderer.AddActor(actor)
renderer.ResetCamera()
renderWindow.Render()
filter.Update()
_min, _max = filter.GetOutput().GetPointData().GetScalars().GetRange()
mapper.SetScalarRange(_min, _max)
actor.GetProperty().SetEdgeVisibility(1)
actor.GetProperty().SetEdgeColor(1, 1, 1)
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# GUI
# -----------------------------------------------------------------------------
# html_view = vtk.VtkLocalView(renderWindow)
# html_view = vtk.VtkRemoteView(renderWindow)
html_view = vtk.VtkRemoteLocalView(renderWindow, mode="local")
layout = SinglePage("Geometry export", on_ready=html_view.update)
layout.logo.click = html_view.reset_camera
layout.title.set_text("Geometry export")
with layout.toolbar as tb:
vuetify.VSpacer()
tb.add_child("{{ resolution }}")
vuetify.VSlider(
v_model=("resolution", DEFAULT_RESOLUTION),
min=10,
max=100,
step=1,
hide_details=True,
dense=True,
style="max-width: 300px",
)
vuetify.VBtn("Update", click=html_view.update)
with layout.content:
vuetify.VContainer(
fluid=True,
classes="pa-0 fill-height",
children=[html_view],
)
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
layout.start()
| 29.627451 | 81 | 0.617141 |
2f37d9b321c1b357a652919715d0a963e96430ee | 601 | py | Python | server/toolz_swap_app/migrations/0021_auto_20211217_2310.py | minerva-university/cs162-toolz-swap-service | d514d9b04118f26479cba71497c12dfa824c7c42 | [
"MIT"
] | null | null | null | server/toolz_swap_app/migrations/0021_auto_20211217_2310.py | minerva-university/cs162-toolz-swap-service | d514d9b04118f26479cba71497c12dfa824c7c42 | [
"MIT"
] | null | null | null | server/toolz_swap_app/migrations/0021_auto_20211217_2310.py | minerva-university/cs162-toolz-swap-service | d514d9b04118f26479cba71497c12dfa824c7c42 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.9 on 2021-12-17 22:10
from django.db import migrations, models
| 25.041667 | 87 | 0.599002 |
2f382211712726ce3bebece3524ea17b01c0cd4f | 2,540 | py | Python | saleor/dashboard/store/special_page/views.py | Chaoslecion123/Diver | 8c5c493701422eada49cbf95b0b0add08f1ea561 | [
"BSD-3-Clause"
] | null | null | null | saleor/dashboard/store/special_page/views.py | Chaoslecion123/Diver | 8c5c493701422eada49cbf95b0b0add08f1ea561 | [
"BSD-3-Clause"
] | null | null | null | saleor/dashboard/store/special_page/views.py | Chaoslecion123/Diver | 8c5c493701422eada49cbf95b0b0add08f1ea561 | [
"BSD-3-Clause"
] | null | null | null | from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.shortcuts import get_object_or_404, redirect
from django.template.response import TemplateResponse
from django.utils.translation import pgettext_lazy
from ....store.models import SpecialPage
from ...views import staff_member_required
from .forms import SpecialPageForm
| 40.31746 | 77 | 0.715748 |
2f38dea668d3c57cb5f9fffdb2e8a23821880993 | 96 | py | Python | pacote-download/Ex24.py | nkonai/Curso-em-video-Python | c05a60b3daa7d448e1e7f0d4d23f62df5d2c8df2 | [
"MIT"
] | null | null | null | pacote-download/Ex24.py | nkonai/Curso-em-video-Python | c05a60b3daa7d448e1e7f0d4d23f62df5d2c8df2 | [
"MIT"
] | null | null | null | pacote-download/Ex24.py | nkonai/Curso-em-video-Python | c05a60b3daa7d448e1e7f0d4d23f62df5d2c8df2 | [
"MIT"
] | null | null | null | cidade = str(input('Qual cidade voce mora?'))
print(cidade.strip().lower().startswith('santo'))
| 32 | 49 | 0.708333 |
2f3a828848ad3ed2bdecff21215f6a9e0ea54453 | 8,417 | py | Python | src/salt_finder_charts/standard_finder_charts.py | saltastroops/salt_finder_charts | f5b0f7a779f7f1c2b8a228ba6ed65a17bd17b4de | [
"MIT"
] | null | null | null | src/salt_finder_charts/standard_finder_charts.py | saltastroops/salt_finder_charts | f5b0f7a779f7f1c2b8a228ba6ed65a17bd17b4de | [
"MIT"
] | null | null | null | src/salt_finder_charts/standard_finder_charts.py | saltastroops/salt_finder_charts | f5b0f7a779f7f1c2b8a228ba6ed65a17bd17b4de | [
"MIT"
] | null | null | null | from datetime import datetime, timedelta
from typing import BinaryIO, Generator, Optional, Tuple
import astropy.units as u
import pytz
from astropy.units import Quantity
from salt_finder_charts.image import Survey, SurveyImageService
from salt_finder_charts.mode import (
Mode,
ModeDetails,
ImagingModeDetails,
LongslitModeDetails,
SlotModeDetails,
MOSModeDetails,
)
from salt_finder_charts.output import output_pdf, output_png, output_svg, OutputFormat
from salt_finder_charts.util import (
MagnitudeRange,
MOSMask,
julian_day_start,
julian_day_end,
)
from salt_finder_charts import finder_charts
from salt_finder_charts.ephemerides import (
HorizonsEphemerisService,
ConstantEphemerisService,
EphemerisService,
)
TimeInterval = Tuple[datetime, datetime]
def standard_finder_charts(
# arguments which are always required
mode: Mode,
output_format: OutputFormat,
# time interval
start_time: Optional[datetime] = None,
end_time: Optional[datetime] = None,
# ephemerides
ra: Optional[Quantity] = None,
dec: Optional[Quantity] = None,
min_magnitude: Optional[float] = None,
max_magnitude: Optional[float] = None,
bandpass: Optional[str] = None,
horizons_id: Optional[str] = None,
horizons_stepsize: Optional[Quantity] = None,
# image
survey: Survey = Survey.POSS2UKSTU_RED,
# instrument mode details
position_angle: Optional[Quantity] = None,
slitwidth: Optional[Quantity] = None,
mos_mask_rsmt: Optional[BinaryIO] = None,
# miscellaneous
basic_annotations: bool = False,
title: Optional[str] = None,
) -> Generator[BinaryIO, None, None]:
"""
Create standard SALT finder charts.
Some of the parameters are mutually exclusive. For example, it does mot make sense
to specify a slit width if you generate finding charts for imaging mode. In some
cases such combinations will raise an error, but in others some of the parameters
may just be ignored.
If no start time is given, the beginning of the current Julian day is assumed. If no
end time is given, the end of the current Julian day is assumed.
Parameters
----------
mode : Mode
Observation mode (such as imaging or MOS).
basic_annotations : bool
Whether only basic annotations should be added to the finder chart.
output_format : OutputFormat
Output format (such as PDF) to use for the generated finder charts.
start_time : datetime
Start time from which to generate finder charts.
end_time : datetime
End time until which to generate finder charts.
ra : Quantity
Right ascension of the finder chart center.
dec : Quantity
Declination of the finder chart center.
min_magnitude : float
Minimum magnitude of the target.
max_magnitude L: float
Maximum magnitude of the target.
bandpass : str
Bandpass (such as V) for the magnitudes,
horizons_id : str
Identifier for a target in the Horizons database.
horizons_stepsize : Quantity
Time between ephemerides queried from the Horizons service. The default is 5
minutes.
survey : Survey
The image survey from which the finder chart image shall be taken.
position_angle : Quantity
The position angle.
slitwidth : Quantity
The width of the longslit, as an angle.
mos_mask_rsmt : BinaryIO
Input stream containing an RSMT file for a MOS setup.
title : str
Title for the finder chart.
Returns
-------
Generator of BinaryIO
The finder charts as input streams.
"""
# time interval
# get default start and end time if need be
now = datetime.now(pytz.utc)
if not start_time:
start_time = julian_day_start(now)
if not end_time:
end_time = julian_day_end(now)
# ensure there are timezones
if start_time.tzinfo is None:
raise ValueError("The start time must be timezone-aware.")
if end_time.tzinfo is None:
raise ValueError("The end time must be timezone aware.")
# ephemerides
mos_mask: Optional[MOSMask] = None
if mode == Mode.MOS:
if mos_mask_rsmt is None:
raise ValueError(
"A RSMT file must be supplied if a finding chart is generated for MOS mode."
)
if ra or dec or position_angle:
raise ValueError(
"You must not supply a right ascension, declination or position angle in MOS mode, as they are taken from the MOS mask definition."
)
mos_mask = MOSMask(mos_mask_rsmt)
ra = mos_mask.right_ascension
dec = mos_mask.declination
position_angle = mos_mask.position_angle
if horizons_id:
# get ephemerides from Horizons
if ra is not None or dec is not None:
raise ValueError(
"No right ascension or declination must be supplied if a Horizons identifier is supplied."
)
if horizons_stepsize is None:
horizons_stepsize = 5 * u.minute
ephemeris_service: EphemerisService = HorizonsEphemerisService(
object_id=horizons_id,
start_time=start_time - timedelta(days=2),
end_time=end_time + timedelta(days=2),
stepsize=horizons_stepsize,
)
else:
# use ephemerides for a non-sidereal target
if ra is None:
raise ValueError("The right ascension is missing.")
if dec is None:
raise ValueError("The declination is missing.")
if min_magnitude is not None and (max_magnitude is None or bandpass is None):
raise ValueError(
"You must supply a maximum magnitude and bandpass if you supply a minimum magnitude."
)
if max_magnitude is not None and (min_magnitude is None or bandpass is None):
raise ValueError(
"You must supply a minimum magnitude and bandpass if you supply a maximum magnitude."
)
if bandpass is not None and (min_magnitude is None or max_magnitude is None):
raise ValueError(
"You must supply a minimum and maximum magnitude if you supply a bandpass."
)
magnitude_range: Optional[MagnitudeRange] = None
if (
min_magnitude is not None
and max_magnitude is not None
and bandpass is not None
):
magnitude_range = MagnitudeRange(
min_magnitude=min_magnitude,
max_magnitude=max_magnitude,
bandpass=bandpass,
)
ephemeris_service = ConstantEphemerisService(
ra=ra, dec=dec, magnitude_range=magnitude_range
)
# image
image_service = SurveyImageService(survey=survey)
# mode details
if mode is None:
raise ValueError("You must specify an instrument mode.")
if mode == Mode.IMAGING or mode == Mode.HRS:
mode_details: ModeDetails = ImagingModeDetails(position_angle)
elif mode == Mode.SLOT:
mode_details = SlotModeDetails(pa=position_angle)
elif mode == Mode.LONGSLIT:
if slitwidth is None:
raise ValueError(
"A slit width is required if a finding chart is generated for longslit mode."
)
mode_details = LongslitModeDetails(
slitwidth=slitwidth, pa=position_angle, center_ra=ra, center_dec=dec
)
elif mode == Mode.MOS:
if not mos_mask:
raise ValueError("No MOS mask has been supplied.")
mode_details = MOSModeDetails(mos_mask)
else:
raise ValueError(f"Mode unsupported: {mode.value}")
# output
if output_format == OutputFormat.PDF:
output = output_pdf
elif output_format == OutputFormat.PNG:
output = output_png
elif output_format == OutputFormat.SVG:
output = output_svg
else:
raise ValueError(f"Output format unsupported: {output_format.value}")
# generate the finder charts
return finder_charts(
mode_details=mode_details,
start_time=start_time,
end_time=end_time,
ephemeris_service=ephemeris_service,
image_service=image_service,
title=title,
basic_annotations=basic_annotations,
output=output,
)
| 34.63786 | 147 | 0.661756 |
2f3aae6740fa544f6fcbafd5b09e5b47c616d5d2 | 2,449 | py | Python | satstac/landsat/cli.py | developmentseed/sat-stac-landsat | f2263485043a827b4153aecc12f45a3d1363e9e2 | [
"MIT"
] | null | null | null | satstac/landsat/cli.py | developmentseed/sat-stac-landsat | f2263485043a827b4153aecc12f45a3d1363e9e2 | [
"MIT"
] | null | null | null | satstac/landsat/cli.py | developmentseed/sat-stac-landsat | f2263485043a827b4153aecc12f45a3d1363e9e2 | [
"MIT"
] | null | null | null | import argparse
import logging
import sys
from datetime import datetime
import satstac
from satstac import Catalog
import satstac.landsat as landsat
from .version import __version__
# quiet loggers
logging.getLogger('urllib3').propagate = False
logging.getLogger('requests').propagate = False
logger = logging.getLogger(__name__)
if __name__ == "__main__":
cli() | 37.106061 | 120 | 0.694978 |
2f3ae02cd059cdf4b269302e970b02d87301e8cf | 3,005 | py | Python | database.py | pratik-choudhari/squ.ez-url-shortener | ebd13da15501806d0ef30353fe77a9d3d6d1081a | [
"MIT"
] | 5 | 2020-12-20T14:50:31.000Z | 2021-09-20T06:39:18.000Z | database.py | pratik-choudhari/squ.ez-url-shortener | ebd13da15501806d0ef30353fe77a9d3d6d1081a | [
"MIT"
] | null | null | null | database.py | pratik-choudhari/squ.ez-url-shortener | ebd13da15501806d0ef30353fe77a9d3d6d1081a | [
"MIT"
] | 3 | 2020-12-20T18:18:09.000Z | 2021-11-14T09:42:07.000Z | import sqlite3
import random
import string
import re
import sys
# domain name
args = sys.argv
if len(args)==2:
if args[1] == 'localhost':
domain = "localhost:5000/"
else:
domain = "https://squez-url-shortener.herokuapp.com/"
else:
domain = "https://squez-url-shortener.herokuapp.com/"
# URL verification regex
regex = r"""(?i)\b((?:https?://|www\d{0,3}[.]{1}|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?]))"""
# check_same_thread=False to disable thread sync
conn = sqlite3.connect("url.db", check_same_thread=False)
def check_if_exists(id: str, flag: bool):
"""
returns true if record exists
params:
id: data to check in db
flag: True if shortened URL, else False
returns:
True if record exists else False
"""
if flag:
query = f'''SELECT COUNT(*) FROM URLS WHERE ID="{id}";'''
else:
query = f'''SELECT COUNT(*) FROM URLS WHERE ORIGINAL="{id}";'''
db_res = conn.execute(query)
if [i[0] for i in db_res] == [0]:
return False
return True
def insert_data(id: str, og: str, value: int):
"""
Insert data in db
Params:
id: short url(primary key)
og: original url
value: number of visit
returns:
True if successful else False
"""
query = f'''INSERT INTO URLS (ID, ORIGINAL, VISITS) VALUES ("{str(id)}", "{str(og)}", {int(value)});'''
db_res = conn.execute(query)
conn.commit()
if not db_res:
return False
return True
def get_original_url(id: str, flag: bool):
"""
returns record data if exists
params:
id: shortened or original url
flag: True for shortened id else False
returns:
False if data doesn't exist else return data
"""
if flag:
query = f'''SELECT ORIGINAL FROM URLS WHERE ID="{str(id)}";'''
else:
query = f'''SELECT ID FROM URLS WHERE ORIGINAL="{str(id)}";'''
db_res = conn.execute(query)
url = [i[0] for i in db_res]
if url:
return url[0]
return False
def get_valid_combination(url: str)-> str:
"""
finds and returns shortened URL
params:
url: original url
returns:
False if operation failed else return whole shortened link
"""
res = re.findall(regex, url)
url = re.sub(r"^(http://|https://){0,1}(www.|ww.|w.){0,1}", "", url)
data = False
if res:
if not check_if_exists(url, False):
while 1:
shrt = ''.join(random.choice(string.ascii_letters) for _ in range(8))
if not check_if_exists(shrt, True):
if not insert_data(shrt, url, 0):
return False
data = "".join([domain, shrt])
break
else:
shrt = get_original_url(url, False)
data = "".join([domain, shrt])
return data
| 28.084112 | 200 | 0.547088 |
2f3e4585789dca549a8fbdd15c298b8c2bf0a041 | 1,954 | py | Python | ball.py | b3mery/Python-Pong-Game | d0051942412c331a752cbade11815002be8d4d1e | [
"MIT"
] | null | null | null | ball.py | b3mery/Python-Pong-Game | d0051942412c331a752cbade11815002be8d4d1e | [
"MIT"
] | null | null | null | ball.py | b3mery/Python-Pong-Game | d0051942412c331a752cbade11815002be8d4d1e | [
"MIT"
] | null | null | null | from turtle import Turtle
from scoreboard import Scoreboard
WIDTH = 800
HEIGHT = 600
START_SPEED = 0.1 | 34.892857 | 84 | 0.590583 |
2f3f7fbb2e9c92a49ae40445269e03dc87f8856d | 185 | py | Python | tsai/data/basics.py | radi-cho/tsai | 32f24d55ee58df1a14d1e68618f230097a266c77 | [
"Apache-2.0"
] | 1 | 2022-01-02T18:21:27.000Z | 2022-01-02T18:21:27.000Z | tsai/data/basics.py | radi-cho/tsai | 32f24d55ee58df1a14d1e68618f230097a266c77 | [
"Apache-2.0"
] | 31 | 2021-12-01T23:08:51.000Z | 2021-12-29T02:59:49.000Z | tsai/data/basics.py | radi-cho/tsai | 32f24d55ee58df1a14d1e68618f230097a266c77 | [
"Apache-2.0"
] | 1 | 2022-03-13T16:47:04.000Z | 2022-03-13T16:47:04.000Z | from .validation import *
from .preparation import *
from .external import *
from .core import *
from .preprocessing import *
from .transforms import *
from .mixed_augmentation import * | 26.428571 | 33 | 0.778378 |
2f3f9137757f79baedb08f68f1da6c337e1ee99a | 703 | py | Python | push_notifications/migrations/0002_auto_20180408_1513.py | walison17/pulso-api | b9edfc3f6042676dbdb50d7efcdb461a19ea90ed | [
"MIT"
] | null | null | null | push_notifications/migrations/0002_auto_20180408_1513.py | walison17/pulso-api | b9edfc3f6042676dbdb50d7efcdb461a19ea90ed | [
"MIT"
] | null | null | null | push_notifications/migrations/0002_auto_20180408_1513.py | walison17/pulso-api | b9edfc3f6042676dbdb50d7efcdb461a19ea90ed | [
"MIT"
] | null | null | null | # Generated by Django 2.0 on 2018-04-08 15:13
from django.db import migrations, models
| 25.107143 | 99 | 0.5633 |
2f436e86cdf8ffd5b6c159aa475cc3ce92d884bf | 50 | py | Python | app/api/config.py | stdevelopr/Jtray | 287a4be1e26b2dab372323cc0bd8df1f8689fd97 | [
"MIT"
] | null | null | null | app/api/config.py | stdevelopr/Jtray | 287a4be1e26b2dab372323cc0bd8df1f8689fd97 | [
"MIT"
] | 1 | 2020-05-01T20:37:34.000Z | 2020-05-01T20:37:34.000Z | app/api/config.py | stdevelopr/JTray | 287a4be1e26b2dab372323cc0bd8df1f8689fd97 | [
"MIT"
] | null | null | null | jira_user_url = ""
jira_email = ""
jira_token = "" | 16.666667 | 18 | 0.66 |
2f43d99fa4ec9d66bba52027500997441d643a8e | 1,216 | py | Python | baseq/bed/__init__.py | basedata10/baseq | 0f1786c3392a51a6ec7cb0f32355cd28eaa5df29 | [
"MIT"
] | 1 | 2018-08-30T20:29:17.000Z | 2018-08-30T20:29:17.000Z | baseq/bed/__init__.py | basedata10/baseq | 0f1786c3392a51a6ec7cb0f32355cd28eaa5df29 | [
"MIT"
] | null | null | null | baseq/bed/__init__.py | basedata10/baseq | 0f1786c3392a51a6ec7cb0f32355cd28eaa5df29 | [
"MIT"
] | null | null | null | import subprocess, re, os
from baseq.utils.runcommand import run_it, run_generator
import pandas as pd
import random
"""
baseq dev bed ./bed
"""
import click, os, sys
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help']) | 31.179487 | 124 | 0.612664 |
2f44190ef14e633a5b67ab12f51b43692438c0da | 855 | py | Python | tests/unit/test_iostatic.py | Rogdham/python-xz | f53266dae8d4f7fcc74cd53222f22105e40d5112 | [
"MIT"
] | 3 | 2021-07-13T16:06:38.000Z | 2022-03-04T22:52:58.000Z | tests/unit/test_iostatic.py | Rogdham/python-xz | f53266dae8d4f7fcc74cd53222f22105e40d5112 | [
"MIT"
] | 3 | 2021-09-19T09:48:35.000Z | 2022-01-09T15:38:48.000Z | tests/unit/test_iostatic.py | Rogdham/python-xz | f53266dae8d4f7fcc74cd53222f22105e40d5112 | [
"MIT"
] | null | null | null | from io import UnsupportedOperation
import pytest
from xz.io import IOStatic
| 22.5 | 49 | 0.625731 |
2f4660a8cf58761bb602bec1315943879f761718 | 4,264 | py | Python | swtstore/application.py | janastu/swtstore | 7326138bf2fbf2a4ed8c7300c68092f91709dfc2 | [
"BSD-2-Clause"
] | 2 | 2015-04-28T00:35:21.000Z | 2016-02-11T19:31:15.000Z | swtstore/application.py | janastu/swtstore | 7326138bf2fbf2a4ed8c7300c68092f91709dfc2 | [
"BSD-2-Clause"
] | 9 | 2015-02-02T11:24:23.000Z | 2017-12-29T07:49:07.000Z | swtstore/application.py | janastu/swtstore | 7326138bf2fbf2a4ed8c7300c68092f91709dfc2 | [
"BSD-2-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""
__init__.py
"""
import os
import logging
from logging.handlers import RotatingFileHandler
from flask import Flask, request, jsonify, render_template, make_response
from classes.database import db
from config import DefaultConfig
from classes import views
#from classes import models
from classes import oauth
__all__ = ['create_app', 'getDBInstance']
DEFAULT_APP_NAME = __name__
DEFAULT_MODULES = (
(views.frontend, ''),
(views.api, '/api'),
(views.user, '/users'),
(views.context, '/contexts'),
(views.sweet, '/sweets'),
(views.app, '/apps'),
(views.Oauth, '/oauth')
)
# return the current db instance
# TODO: is this needed so much?
| 24.090395 | 78 | 0.633912 |
2f46d633a48c16504cc0737a6f08d56b6c8d1caf | 2,313 | py | Python | 2018/12a.py | apie/advent-of-code | c49abec01b044166a688ade40ebb1e642f0e5ce0 | [
"MIT"
] | 4 | 2018-12-04T23:33:46.000Z | 2021-12-07T17:33:27.000Z | 2018/12a.py | apie/advent-of-code | c49abec01b044166a688ade40ebb1e642f0e5ce0 | [
"MIT"
] | 17 | 2018-12-12T23:32:09.000Z | 2020-01-04T15:50:31.000Z | 2018/12a.py | apie/advent-of-code | c49abec01b044166a688ade40ebb1e642f0e5ce0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pytest
import fileinput
import sys
DAY=12
def test_answer(example_input, example_result):
plants = Plants(example_input)
print('Rules: ',plants.rules)
for i in range(0, 20+1):
if i > 0:
plants.gen()
print('Pots after {:2} generations: {}'.format(plants.generation, plants.print_pots()))
assert '{:2}: {}'.format(i, plants.print_pots()) == example_result[2+i]
assert plants.sum_pots() == 325
if __name__ == '__main__':
in_lines = [l.strip() for l in fileinput.input(sys.argv[1:] or '{:02}.input'.format(DAY))]
plants = Plants(in_lines)
for i in range(0, 20+1):
if i > 0:
plants.gen()
print('Pots after {:2} generations: {}'.format(plants.generation, plants.print_pots()))
print('Answer: {}'.format(plants.sum_pots()))
| 31.684932 | 92 | 0.587981 |
2f47e0e4afa3b0ef06fd5508f958beec6b26eb72 | 826 | py | Python | 03-Spark DFs/24-Solution (Group By).py | PacktPublishing/PySpark-and-AWS-Master-Big-Data-with-PySpark-and-AWS | 28726ada2a8f03557180b472eecf3efc72cab5a2 | [
"MIT"
] | 3 | 2021-09-29T04:11:44.000Z | 2021-12-21T06:28:48.000Z | Part 3/Code/03-Spark DFs/24-Solution (Group By).py | PacktPublishing/50-Hours-of-Big-Data-PySpark-AWS-Scala-and-Scraping | 8993a8ee10534a29aeee18fa91bdc48e3093bec5 | [
"MIT"
] | null | null | null | Part 3/Code/03-Spark DFs/24-Solution (Group By).py | PacktPublishing/50-Hours-of-Big-Data-PySpark-AWS-Scala-and-Scraping | 8993a8ee10534a29aeee18fa91bdc48e3093bec5 | [
"MIT"
] | 5 | 2021-11-17T15:47:36.000Z | 2022-03-09T05:13:09.000Z | # Databricks notebook source
from pyspark.sql import SparkSession
from pyspark.sql.functions import col, lit
from pyspark.sql.functions import sum,avg,max,min,mean,count
spark = SparkSession.builder.appName("Spark DataFrames").getOrCreate()
# COMMAND ----------
df = spark.read.options(header='True', inferSchema='True').csv('/FileStore/tables/StudentData.csv')
df.show()
# COMMAND ----------
# 1
df.groupBy("course").count().show()
df.groupBy("course").agg(count("*").alias("total_enrollment")).show()
# COMMAND ----------
# 2
df.groupBy("course", "gender").agg(count("*").alias("total_enrollment")).show()
# COMMAND ----------
# 3
df.groupBy("course", "gender").agg(sum("marks").alias("total_marks")).show()
# COMMAND ----------
# 4
df.groupBy("course", "age").agg(min("marks"), max("marks"), avg("marks")).show()
| 25.8125 | 99 | 0.659806 |
2f494c01c823bdfd4b8fa27dc3e019de599fda15 | 897 | py | Python | queues/list_queue/queue.py | joeb15/202Problems | a8ab3dc49cb899b640cc836863e28e52fb978466 | [
"MIT"
] | null | null | null | queues/list_queue/queue.py | joeb15/202Problems | a8ab3dc49cb899b640cc836863e28e52fb978466 | [
"MIT"
] | null | null | null | queues/list_queue/queue.py | joeb15/202Problems | a8ab3dc49cb899b640cc836863e28e52fb978466 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
"""
A queue is a first-in first-out type of data structure
For this to work, you must be able to enqueue (add) items to the queue, dequeue (remove) items from the queue
"""
| 19.933333 | 109 | 0.591973 |
2f4adf626e0639100f39276c7a36ef5fa92541f9 | 1,185 | py | Python | parse_xlsx.py | UoA-eResearch/OPIMD | 63d2279eea8de7db53b01c50e8e35b483ab572c4 | [
"MIT"
] | null | null | null | parse_xlsx.py | UoA-eResearch/OPIMD | 63d2279eea8de7db53b01c50e8e35b483ab572c4 | [
"MIT"
] | 2 | 2021-03-03T06:11:30.000Z | 2021-03-05T02:57:02.000Z | parse_xlsx.py | UoA-eResearch/OPIMD | 63d2279eea8de7db53b01c50e8e35b483ab572c4 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import pandas as pd
import json
df = pd.read_excel("OPIMD Calc_checked_03Feb21AL.xlsx", sheet_name=None)
obj = {}
dz = df["OPIMD15ACCESSDATAZONERANK"]
dz = dz.dropna(subset=["datazone"])
dz.datazone = dz.datazone.astype(int)
dz.index = dz.datazone
obj["dz"] = dz.OPIMDAccPopRank_AL.to_dict()
hlth = df["HEALTHCALC"]
hlth.index = hlth.HlthPattern
obj["hlth"] = hlth.HlthRank.to_dict()
inc = df["INCOMECALC"]
inc.index = inc.IncPattern
obj["inc"] = inc.IncRank.to_dict()
house = df["HOUSECALC"]
house.index = house.HouPattern
obj["house"] = house.HouRank.to_dict()
con = df["CONNECTCALC"]
con = con.dropna(subset=["ConPattern"])
con.ConPattern = con.ConPattern.astype(int)
con.index = con.ConPattern
obj["con"] = con.ConRank.to_dict()
assets = df["ASSETSCALC"]
assets = assets.dropna(subset=["AsPattern"])
assets.AsPattern = assets.AsPattern.astype(int)
assets.index = assets.AsPattern
obj["assets"] = assets.AsRank.to_dict()
breaks = df["OPIMDRankDecile"]
breaks = breaks.iloc[3:13,0:3]
breaks.columns = ["min", "max", "decile"]
obj["breaks"] = breaks.to_dict(orient='records')
with open("data.json", "w") as f:
json.dump(obj, f)
print("Saved") | 25.212766 | 72 | 0.709705 |
2f4c73bdc5e9b2d9296a2574b70a67727f97ee93 | 1,806 | py | Python | forvo.py | edoput/ForvoDownloader | d2d034ee5d9f22cd4faad76444172490a0ff23e2 | [
"MIT"
] | 9 | 2018-09-03T22:36:53.000Z | 2021-11-09T11:59:14.000Z | forvo.py | EdoPut/ForvoDownloader | d2d034ee5d9f22cd4faad76444172490a0ff23e2 | [
"MIT"
] | null | null | null | forvo.py | EdoPut/ForvoDownloader | d2d034ee5d9f22cd4faad76444172490a0ff23e2 | [
"MIT"
] | 2 | 2015-01-27T15:09:26.000Z | 2017-01-29T04:39:22.000Z | import requests
import urllib
| 31.137931 | 121 | 0.516058 |
2f4cad023005927c7b37c2c98bbb63ef5319fadc | 1,336 | py | Python | python/src/main/python/pyalink/alink/common/sql/sql_query_utils.py | wenwei8268/Alink | c00702538c95a32403985ebd344eb6aeb81749a7 | [
"Apache-2.0"
] | null | null | null | python/src/main/python/pyalink/alink/common/sql/sql_query_utils.py | wenwei8268/Alink | c00702538c95a32403985ebd344eb6aeb81749a7 | [
"Apache-2.0"
] | null | null | null | python/src/main/python/pyalink/alink/common/sql/sql_query_utils.py | wenwei8268/Alink | c00702538c95a32403985ebd344eb6aeb81749a7 | [
"Apache-2.0"
] | null | null | null | import re
__all__ = ['register_table_name', 'sql_query']
batch_table_name_map = dict()
stream_table_name_map = dict()
| 28.425532 | 65 | 0.654192 |
2f4d2891267d928eb5b2260208cbd4b134295605 | 3,790 | py | Python | salt/utils/win_chcp.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 9,425 | 2015-01-01T05:59:24.000Z | 2022-03-31T20:44:05.000Z | salt/utils/win_chcp.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 33,507 | 2015-01-01T00:19:56.000Z | 2022-03-31T23:48:20.000Z | salt/utils/win_chcp.py | Noah-Huppert/salt | 998c382f5f2c3b4cbf7d96aa6913ada6993909b3 | [
"Apache-2.0"
] | 5,810 | 2015-01-01T19:11:45.000Z | 2022-03-31T02:37:20.000Z | """
Functions for working with the codepage on Windows systems
"""
import logging
from contextlib import contextmanager
from salt.exceptions import CodePageError
log = logging.getLogger(__name__)
try:
import pywintypes
import win32console
HAS_WIN32 = True
except ImportError:
HAS_WIN32 = False
# Although utils are often directly imported, it is also possible to use the loader.
def __virtual__():
"""
Only load if Win32 Libraries are installed
"""
if not HAS_WIN32:
return False, "This utility requires pywin32"
return "win_chcp"
def get_codepage_id(raise_error=False):
"""
Get the currently set code page on windows
Args:
raise_error (bool):
``True`` will raise an error if the codepage fails to change.
``False`` will suppress the error
Returns:
int: A number representing the codepage
Raises:
CodePageError: On unsuccessful codepage change
"""
try:
return win32console.GetConsoleCP()
except pywintypes.error as exc:
_, _, msg = exc.args
error = "Failed to get the windows code page: {}".format(msg)
if raise_error:
raise CodePageError(error)
else:
log.error(error)
return -1
def set_codepage_id(page_id, raise_error=False):
"""
Set the code page on windows
Args:
page_id (str, int):
A number representing the codepage.
raise_error (bool):
``True`` will raise an error if the codepage fails to change.
``False`` will suppress the error
Returns:
int: A number representing the codepage
Raises:
CodePageError: On unsuccessful codepage change
"""
if not isinstance(page_id, int):
try:
page_id = int(page_id)
except ValueError:
error = "The `page_id` needs to be an integer, not {}".format(type(page_id))
if raise_error:
raise CodePageError(error)
log.error(error)
return -1
try:
win32console.SetConsoleCP(page_id)
return get_codepage_id(raise_error=raise_error)
except pywintypes.error as exc:
_, _, msg = exc.args
error = "Failed to set the windows code page: {}".format(msg)
if raise_error:
raise CodePageError(error)
else:
log.error(error)
return -1
| 25.608108 | 88 | 0.61715 |
2f4d57d728b00fc588f9af5da19650e009e95339 | 827 | py | Python | application/server.py | comov/fucked-up_schedule | 3e6a2972f46686829b655798cd641cd82559db24 | [
"MIT"
] | null | null | null | application/server.py | comov/fucked-up_schedule | 3e6a2972f46686829b655798cd641cd82559db24 | [
"MIT"
] | null | null | null | application/server.py | comov/fucked-up_schedule | 3e6a2972f46686829b655798cd641cd82559db24 | [
"MIT"
] | null | null | null | from flask import Flask, render_template
from application.settings import STATIC
from application.storage import storage
app = Flask(__name__, static_url_path=STATIC)
| 25.84375 | 50 | 0.562273 |
2f4e64d9de5293438f0fe185689a4d11efc8c4c9 | 1,857 | py | Python | cli_fun/commands/fun.py | e4r7hbug/cli-fun | 43f9a1bf788745783a24f315d80ceb969ff853e4 | [
"MIT"
] | null | null | null | cli_fun/commands/fun.py | e4r7hbug/cli-fun | 43f9a1bf788745783a24f315d80ceb969ff853e4 | [
"MIT"
] | null | null | null | cli_fun/commands/fun.py | e4r7hbug/cli-fun | 43f9a1bf788745783a24f315d80ceb969ff853e4 | [
"MIT"
] | null | null | null | """Fun section of CLI command."""
import json
import logging
import time
from pprint import pformat, pprint
import click
from fabric.colors import red
| 22.925926 | 70 | 0.611739 |
2f4ee2585931ea1270d6eb83cfe79d8eaf1f4d33 | 1,851 | py | Python | tests/algorithms/descriptor_generator/test_colordescriptor.py | joshanderson-kw/SMQTK | 594e7c733fe7f4e514a1a08a7343293a883a41fc | [
"BSD-3-Clause"
] | 82 | 2015-01-07T15:33:29.000Z | 2021-08-11T18:34:05.000Z | tests/algorithms/descriptor_generator/test_colordescriptor.py | joshanderson-kw/SMQTK | 594e7c733fe7f4e514a1a08a7343293a883a41fc | [
"BSD-3-Clause"
] | 230 | 2015-04-08T14:36:51.000Z | 2022-03-14T17:55:30.000Z | tests/algorithms/descriptor_generator/test_colordescriptor.py | joshanderson-kw/SMQTK | 594e7c733fe7f4e514a1a08a7343293a883a41fc | [
"BSD-3-Clause"
] | 65 | 2015-01-04T15:00:16.000Z | 2021-11-19T18:09:11.000Z | import unittest
import unittest.mock as mock
import pytest
from smqtk.algorithms.descriptor_generator import DescriptorGenerator
from smqtk.algorithms.descriptor_generator.colordescriptor.colordescriptor \
import ColorDescriptor_Image_csift # arbitrary leaf class
from smqtk.utils.configuration import configuration_test_helper
| 42.068182 | 79 | 0.690438 |
2f54a4c20f5d809def78444ea740f895640d9cbe | 557 | py | Python | conservation/migrations/0020_auto_20190418_1715.py | ropable/wastd | 295c60760548d177859de9c0bebdae93342767d0 | [
"MIT"
] | 3 | 2020-07-23T06:37:43.000Z | 2022-01-27T09:40:40.000Z | conservation/migrations/0020_auto_20190418_1715.py | ropable/wastd | 295c60760548d177859de9c0bebdae93342767d0 | [
"MIT"
] | 337 | 2018-07-12T05:56:29.000Z | 2022-03-30T02:40:41.000Z | conservation/migrations/0020_auto_20190418_1715.py | ropable/wastd | 295c60760548d177859de9c0bebdae93342767d0 | [
"MIT"
] | 2 | 2020-02-24T00:05:46.000Z | 2020-07-15T07:02:29.000Z | # Generated by Django 2.1.7 on 2019-04-18 09:15
from django.db import migrations, models
import django.db.models.deletion
import django_fsm
| 23.208333 | 52 | 0.642729 |
2f57b78b84caa4984e3516eb70876b6001368c78 | 1,141 | py | Python | src/news/migrations/0005_news_base_fields.py | Little-Pogchamp-Team/kinopoisk_on_django | 06e1b5ee14c7e77dd5b69140732461a02bf44566 | [
"MIT"
] | 10 | 2021-01-10T09:39:16.000Z | 2022-02-05T06:40:47.000Z | src/news/migrations/0005_news_base_fields.py | Little-Pogchamp-Team/kinopoisk_on_django | 06e1b5ee14c7e77dd5b69140732461a02bf44566 | [
"MIT"
] | null | null | null | src/news/migrations/0005_news_base_fields.py | Little-Pogchamp-Team/kinopoisk_on_django | 06e1b5ee14c7e77dd5b69140732461a02bf44566 | [
"MIT"
] | 1 | 2021-01-11T17:04:06.000Z | 2021-01-11T17:04:06.000Z | # Generated by Django 3.1.5 on 2021-04-28 16:22
import ckeditor.fields
from django.db import migrations, models
import django_minio_backend.models
| 28.525 | 179 | 0.595092 |
2f57fea89f96d0ae41ee6422418756f6c9d832f5 | 1,433 | py | Python | src/utils/common.py | Conni2461/admission_handler | 6ea2a696100c046fd5d5ede468febd9072f3763f | [
"MIT"
] | 1 | 2022-02-11T04:29:18.000Z | 2022-02-11T04:29:18.000Z | src/utils/common.py | Conni2461/admission_handler | 6ea2a696100c046fd5d5ede468febd9072f3763f | [
"MIT"
] | null | null | null | src/utils/common.py | Conni2461/admission_handler | 6ea2a696100c046fd5d5ede468febd9072f3763f | [
"MIT"
] | null | null | null | import socket
from threading import Thread, Timer
def get_real_ip():
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.connect(('1.1.1.1', 1))
return sock.getsockname()[0]
def get_hostname():
return socket.gethostname()
| 22.390625 | 59 | 0.5806 |
2f585f0414875528a6779f295b93677e10c21cf1 | 5,206 | py | Python | app/models.py | lilianwaweru/Oa_Online | 58644f5dd4ae1f396b43a2da980a9c464a9bfdd4 | [
"MIT"
] | null | null | null | app/models.py | lilianwaweru/Oa_Online | 58644f5dd4ae1f396b43a2da980a9c464a9bfdd4 | [
"MIT"
] | null | null | null | app/models.py | lilianwaweru/Oa_Online | 58644f5dd4ae1f396b43a2da980a9c464a9bfdd4 | [
"MIT"
] | 4 | 2019-04-30T09:07:22.000Z | 2019-07-02T08:51:22.000Z | from . import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import login_manager
from datetime import datetime
def __repr__(self):
return f'User {self.husband_name} and {self.wife_name}'
class Notice(db.Model):
__tablename__ = 'notices'
id = db.Column(db.Integer, primary_key = True)
district = db.Column(db.String(255))
spouse = db.Column(db.String(255))
g_name = db.Column(db.String(255))
g_condition = db.Column(db.String(255))
g_occupation = db.Column(db.String(255))
g_age = db.Column(db.Integer)
g_residence = db.Column(db.String(255))
g_consent = db.Column(db.String(255))
b_name = db.Column(db.String(255))
b_condition = db.Column(db.String(255))
b_occupation = db.Column(db.String(255))
b_age = db.Column(db.Integer)
b_residence = db.Column(db.String(255))
b_consent = db.Column(db.String(255))
dd = db.Column(db.Integer)
mm = db.Column(db.String(255))
yy = db.Column(db.Integer)
signature = db.Column(db.String(255))
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
class Certificate(db.Model):
__tablename__ = 'certificates'
id = db.Column(db.Integer, primary_key = True)
g_date = db.Column(db.Date())
g_name = db.Column(db.String(255))
g_condition = db.Column(db.String(255))
g_occupation = db.Column(db.String(255))
g_age = db.Column(db.Integer)
g_residence = db.Column(db.String(255))
g_fname = db.Column(db.String(255))
g_foccupation = db.Column(db.String(255))
b_date = db.Column(db.Date())
b_name = db.Column(db.String(255))
b_condition = db.Column(db.String(255))
b_occupation = db.Column(db.String(255))
b_age = db.Column(db.Integer)
b_residence = db.Column(db.String(255))
g_fname = db.Column(db.String(255))
g_foccupation = db.Column(db.String(255))
groom = db.Column(db.String(255))
bride = db.Column(db.String(255))
witness1 = db.Column(db.String(255))
witness2 = db.Column(db.String(255))
date = db.Column(db.Date())
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
class Impediment(db.Model):
__tablename__ = 'impediments'
id = db.Column(db.Integer, primary_key = True)
spouse = db.Column(db.String(255))
at = db.Column(db.String(255))
in_input = db.Column(db.String(255))
surname = db.Column(db.String(255))
forename = db.Column(db.String(255))
country = db.Column(db.String(255))
date = db.Column(db.Date())
father = db.Column(db.String(255))
sex = db.Column(db.String(255))
race = db.Column(db.String(255))
religion = db.Column(db.String(255))
residence = db.Column(db.String(255))
condition = db.Column(db.String(255))
occupation = db.Column(db.String(255))
dd = db.Column(db.Integer)
mm = db.Column(db.String(255))
yy = db.Column(db.Integer)
signature = db.Column(db.String(255))
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
class Agreement(db.Model):
__tablename__ = 'agreements'
id = db.Column(db.Integer, primary_key = True)
husband_vows = db.Column(db.String(255))
wife_vows = db.Column(db.String(255))
dowry_agreement = db.Column(db.String(255))
other_agreements = db.Column(db.String(255))
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
class Witness(db.Model):
__tablename__ = 'witnesses'
id = db.Column(db.Integer, primary_key = True)
witness1_name = db.Column(db.String(255))
witness2_name = db.Column(db.String(255))
witness1_id = db.Column(db.String(255))
witness2_id = db.Column(db.String(255))
witness1_dob = db.Column(db.Date())
witness2_dob = db.Column(db.Date())
user_id = db.Column(db.Integer,db.ForeignKey("users.id"))
| 37.185714 | 82 | 0.680177 |
2f59a50ee0f4047fe095b3e0f94aa7691fc20820 | 2,139 | py | Python | tests/server/datasets/test_dao.py | davidkartchner/rubrix | 33faa006d7498a806a9fd594036d4a42c7d70da2 | [
"Apache-2.0"
] | 1 | 2022-01-06T09:05:06.000Z | 2022-01-06T09:05:06.000Z | tests/server/datasets/test_dao.py | davidkartchner/rubrix | 33faa006d7498a806a9fd594036d4a42c7d70da2 | [
"Apache-2.0"
] | null | null | null | tests/server/datasets/test_dao.py | davidkartchner/rubrix | 33faa006d7498a806a9fd594036d4a42c7d70da2 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2021-present, the Recognai S.L. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pytest
from rubrix.server.commons.errors import ClosedDatasetError
from rubrix.server.commons.es_wrapper import create_es_wrapper
from rubrix.server.datasets.dao import DatasetsDAO
from rubrix.server.datasets.model import DatasetDB
from rubrix.server.tasks.commons import TaskType
from rubrix.server.tasks.commons.dao.dao import dataset_records_dao
from rubrix.server.tasks.text_classification.dao.es_config import (
text_classification_mappings,
)
es_wrapper = create_es_wrapper()
records = dataset_records_dao(es_wrapper)
records.register_task_mappings(
TaskType.text_classification, text_classification_mappings()
)
dao = DatasetsDAO.get_instance(es_wrapper, records)
| 36.254237 | 81 | 0.777466 |
2f59b443158d106a76a2bebe88570da44bbc0fe9 | 5,838 | py | Python | tests/tests_rotated_array_search.py | quervernetzt/find-value-in-rotated-sorted-array | b391b1502fd326a57973621500e984bf6f7df44a | [
"MIT"
] | null | null | null | tests/tests_rotated_array_search.py | quervernetzt/find-value-in-rotated-sorted-array | b391b1502fd326a57973621500e984bf6f7df44a | [
"MIT"
] | null | null | null | tests/tests_rotated_array_search.py | quervernetzt/find-value-in-rotated-sorted-array | b391b1502fd326a57973621500e984bf6f7df44a | [
"MIT"
] | null | null | null | import unittest
from solution.rotated_array_search import RotatedArraySearch
| 33.94186 | 107 | 0.601918 |
2f5b87677f26662c1ce0c7a5ee5aaf173034c184 | 3,637 | py | Python | tests/1_local/test_keygroup.py | aporlowski/cloudmesh-cloud | 247479361300f97bbb8b7b1f4c99308358e9e2b2 | [
"Apache-2.0"
] | 5 | 2019-05-06T01:27:55.000Z | 2020-03-12T09:50:08.000Z | tests/1_local/test_keygroup.py | aporlowski/cloudmesh-cloud | 247479361300f97bbb8b7b1f4c99308358e9e2b2 | [
"Apache-2.0"
] | 137 | 2019-04-06T12:35:29.000Z | 2020-05-05T10:02:36.000Z | tests/1_local/test_keygroup.py | aporlowski/cloudmesh-cloud | 247479361300f97bbb8b7b1f4c99308358e9e2b2 | [
"Apache-2.0"
] | 27 | 2019-04-05T22:03:41.000Z | 2021-03-05T00:05:00.000Z | ###############################################################
# pytest -v --capture=no tests/test_keygroup.py
# pytest -v tests/test_keygroup.py
###############################################################
#import pytest
import os
from cloudmesh.common.variables import Variables
from cloudmesh.common.Benchmark import Benchmark
from cloudmesh.compute.vm.Provider import Provider
from cloudmesh.configuration.Config import Config
from cloudmesh.mongo.CmDatabase import CmDatabase
# import pytest
import os
from cloudmesh.common.Benchmark import Benchmark
from cloudmesh.common.variables import Variables
from cloudmesh.compute.vm.Provider import Provider
from cloudmesh.configuration.Config import Config
from cloudmesh.mongo.CmDatabase import CmDatabase
Benchmark.debug()
user = Config()["cloudmesh.profile.user"]
variables = Variables()
KEY = "test-keygroup"
cloud = variables.parameter('cloud')
print(f"Test run for {cloud} on key {KEY}")
if cloud is None:
raise ValueError("cloud is not not set")
cm = CmDatabase()
provider = Provider(name=cloud)
#@pytest.mark.incremental
| 25.978571 | 63 | 0.56915 |
2f5cb793e2e748f1c572ea256bcf2c1a860ee543 | 2,344 | py | Python | blinpy/tests/test_models.py | solbes/blinpy | 89b4f26066c383fc07ca6b1cbfdc8a61397f3f08 | [
"MIT"
] | 3 | 2021-02-11T14:00:08.000Z | 2021-10-13T20:41:21.000Z | blinpy/tests/test_models.py | solbes/blinpy | 89b4f26066c383fc07ca6b1cbfdc8a61397f3f08 | [
"MIT"
] | null | null | null | blinpy/tests/test_models.py | solbes/blinpy | 89b4f26066c383fc07ca6b1cbfdc8a61397f3f08 | [
"MIT"
] | null | null | null | import pytest
import pandas as pd
import numpy as np
from blinpy import models
data = pd.DataFrame(
{'x': np.array(
[0.0, 1.0, 1.0, 2.0, 1.8, 3.0, 4.0, 5.2, 6.5, 8.0, 10.0]),
'y': np.array([5.0, 5.0, 5.1, 5.3, 5.5, 5.7, 6.0, 6.3, 6.7, 7.1, 7.5])}
)
| 22.980392 | 80 | 0.50128 |
2f5daa6050352bd82f556af83e14e2830de366ac | 3,226 | py | Python | packages/gtmapi/lmsrvlabbook/tests/test_jobstatus_queries.py | gigabackup/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 60 | 2018-09-26T15:46:00.000Z | 2021-10-10T02:37:14.000Z | packages/gtmapi/lmsrvlabbook/tests/test_jobstatus_queries.py | gigabackup/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 1,706 | 2018-09-26T16:11:22.000Z | 2021-08-20T13:37:59.000Z | packages/gtmapi/lmsrvlabbook/tests/test_jobstatus_queries.py | griffinmilsap/gigantum-client | 70fe6b39b87b1c56351f2b4c551b6f1693813e4f | [
"MIT"
] | 11 | 2019-03-14T13:23:51.000Z | 2022-01-25T01:29:16.000Z | import pprint
import time
import json
from gtmcore.dispatcher import Dispatcher, jobs
from lmsrvlabbook.tests.fixtures import fixture_working_dir
| 31.320388 | 103 | 0.532548 |
2f5e89412b184aa3f2abac3805b9bf927e055845 | 204 | py | Python | valid.py | whitereaper25/test_2 | 47212fc977bcd36e8879ada22f319691073accb1 | [
"Apache-2.0"
] | null | null | null | valid.py | whitereaper25/test_2 | 47212fc977bcd36e8879ada22f319691073accb1 | [
"Apache-2.0"
] | null | null | null | valid.py | whitereaper25/test_2 | 47212fc977bcd36e8879ada22f319691073accb1 | [
"Apache-2.0"
] | null | null | null | import re
n = int(input())
for i in range(n):
print(verify(input())) | 18.545455 | 31 | 0.553922 |
2f6096fad4d8b4fcb9ab49eab731fdc3465207c6 | 1,632 | py | Python | MAPLEAF/Rocket/sampleStatefulRocketComponent.py | henrystoldt/MAPLEAF | af970d3e8200832f5e70d537b15ad38dd74fa551 | [
"MIT"
] | 15 | 2020-09-11T19:25:07.000Z | 2022-03-12T16:34:53.000Z | MAPLEAF/Rocket/sampleStatefulRocketComponent.py | henrystoldt/MAPLEAF | af970d3e8200832f5e70d537b15ad38dd74fa551 | [
"MIT"
] | null | null | null | MAPLEAF/Rocket/sampleStatefulRocketComponent.py | henrystoldt/MAPLEAF | af970d3e8200832f5e70d537b15ad38dd74fa551 | [
"MIT"
] | 3 | 2021-12-24T19:39:53.000Z | 2022-03-29T01:06:28.000Z | from MAPLEAF.Motion import ForceMomentSystem, Inertia, Vector
from MAPLEAF.Rocket import RocketComponent
__all__ = [ "SampleStatefulComponent" ] | 42.947368 | 128 | 0.694853 |
2f610ddfbb4015ca897145b09e2fa1a4b5263289 | 866 | py | Python | Array/Final450/Sort_Array_Of_0s_1s_2s.py | prash-kr-meena/GoogleR | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | [
"Apache-2.0"
] | null | null | null | Array/Final450/Sort_Array_Of_0s_1s_2s.py | prash-kr-meena/GoogleR | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | [
"Apache-2.0"
] | null | null | null | Array/Final450/Sort_Array_Of_0s_1s_2s.py | prash-kr-meena/GoogleR | 27aca71e51cc2442e604e07ab00406a98d8d63a4 | [
"Apache-2.0"
] | null | null | null | from Utils.Array import input_array
ZERO, ONE, TWO = 0, 1, 2
# Time -> O(n)
# Space -> O(1) inplace
if __name__ == "__main__":
A = input_array()
sort_by_counting(A)
print(A)
"""
2 1 0 1 2 0 0 0 1 2 2 2 1 1
1 1 1 1
2 1 0 2 1 0
2 1 0
"""
| 16.339623 | 52 | 0.469977 |
2f6154e27302aac990c76151fe6022aab6340e63 | 8,601 | py | Python | public/yum-3.2.28/callback.py | chillaxor/blogbin | 211202d513fa80a3d22fb3963f36a01a8dec5b68 | [
"MIT"
] | 8 | 2021-11-26T06:19:06.000Z | 2022-01-11T01:30:11.000Z | initrd/usr/share/yum-cli/callback.py | OpenCloudOS/OpenCloudOS-tools | 06b12aab3182f4207d78a5d8733be03f0d7b69a4 | [
"MulanPSL-1.0"
] | 5 | 2021-02-02T08:17:10.000Z | 2022-02-27T06:53:42.000Z | public/yum-3.2.28/callback.py | chillaxor/blogbin | 211202d513fa80a3d22fb3963f36a01a8dec5b68 | [
"MIT"
] | 2 | 2021-12-21T08:36:02.000Z | 2021-12-21T08:55:38.000Z | #!/usr/bin/python -t
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# Copyright 2005 Duke University
"""
Progress display callback classes for the yum command line.
"""
import rpm
import os
import sys
import logging
from yum import _
from yum.constants import *
| 36.914163 | 87 | 0.482153 |
2f61d9c0592b835198eb2ed4703fc9cefded5f37 | 1,911 | py | Python | miradar_node/scripts/ppi_visualizer.py | QibiTechInc/miradar_ros1_pkgs | 65b339147c2a1a990696d77e75b58f5fba84dc22 | [
"Apache-2.0"
] | null | null | null | miradar_node/scripts/ppi_visualizer.py | QibiTechInc/miradar_ros1_pkgs | 65b339147c2a1a990696d77e75b58f5fba84dc22 | [
"Apache-2.0"
] | null | null | null | miradar_node/scripts/ppi_visualizer.py | QibiTechInc/miradar_ros1_pkgs | 65b339147c2a1a990696d77e75b58f5fba84dc22 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import rospy
from miradar_node.msg import PPI, PPIData
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Point
import dynamic_reconfigure.client
if __name__ == "__main__":
rospy.init_node("ppi_visualizer")
ppiVisualizer = PPIVisualizer()
rospy.spin()
| 32.948276 | 83 | 0.591837 |
2f62350af98cfe5e5bc543e35cb2ce81345228a2 | 3,561 | py | Python | app/dashboard.py | nidheesh6/earlyearthquake | d0ab976629f126206afcd3dc15a76c66992f8a9e | [
"Apache-2.0"
] | null | null | null | app/dashboard.py | nidheesh6/earlyearthquake | d0ab976629f126206afcd3dc15a76c66992f8a9e | [
"Apache-2.0"
] | null | null | null | app/dashboard.py | nidheesh6/earlyearthquake | d0ab976629f126206afcd3dc15a76c66992f8a9e | [
"Apache-2.0"
] | null | null | null | import dash
from dash.dependencies import Input, Output
import dash_core_components as dcc
import dash_html_components as html
import psycopg2
import json
import pandas as pd
import time
app = dash.Dash(__name__)
#app.css.config.serve_locally=False
#app.css.append_css(
# {'external_url': 'https://codepen.io/amyoshino/pen/jzXypZ.css'})
conn = psycopg2.connect(host='ec2-18-232-24-132.compute-1.amazonaws.com',database='earthquake', user='postgres', password='********')
cur = conn.cursor()
location = pd.read_csv("data_file.csv")
location=location.astype(str)
app.layout = html.Div([
html.Div([
html.Div([
dcc.Graph(id='graph', style={'margin-top': '20'})], className="six columns"),
html.Div([
dcc.Graph(
id='bar-graph'
)
], className='twelve columns'
),
dcc.Interval(
id='interval-component',
interval=5*1000, # in milliseconds
n_intervals=0)
], className="row")
], className="ten columns offset-by-one")
if __name__ == '__main__':
app.run_server(debug=False)
| 30.965217 | 193 | 0.48975 |
2f63b5c96ba7f532ebe24f05547c39f51d21dc62 | 1,291 | py | Python | ddlc/database.py | UltiRequiem/ddlc_api | ab542b6b9b1979421531dcce636cfe30d18d3a9d | [
"MIT"
] | 5 | 2021-11-19T18:57:27.000Z | 2022-03-19T23:53:45.000Z | ddlc/database.py | UltiRequiem/ddlc_api | ab542b6b9b1979421531dcce636cfe30d18d3a9d | [
"MIT"
] | 5 | 2021-11-07T02:43:26.000Z | 2022-03-06T03:16:28.000Z | ddlc/database.py | UltiRequiem/ddlc_api | ab542b6b9b1979421531dcce636cfe30d18d3a9d | [
"MIT"
] | 3 | 2021-11-19T18:57:28.000Z | 2021-11-19T19:02:39.000Z | import pymongo
from .config import DB_PASSWORD, DB_USER, CLUSTER_NAME, SUBDOMAIN, DB_NAME
from .exceptions import CharacterNotFound, PoemAuthorNotFound
DBService = DatabaseService()
| 28.065217 | 101 | 0.652982 |
2f6638f61b3058472b08244c7bbaf61f509b9975 | 4,525 | py | Python | scripts/main_experiment.py | wsavran/relm_pycsep_reproducibility | 29294dc37627e74b4fcc4d05add1efc5950ded82 | [
"BSD-3-Clause"
] | null | null | null | scripts/main_experiment.py | wsavran/relm_pycsep_reproducibility | 29294dc37627e74b4fcc4d05add1efc5950ded82 | [
"BSD-3-Clause"
] | null | null | null | scripts/main_experiment.py | wsavran/relm_pycsep_reproducibility | 29294dc37627e74b4fcc4d05add1efc5950ded82 | [
"BSD-3-Clause"
] | null | null | null | # imports
from collections import defaultdict
import numpy as np
import matplotlib.pyplot as pyplot
# pycsep imports
import csep
from csep.utils import stats, plots
# experiment imports
from experiment_utilities import (
load_zechar_catalog,
plot_consistency_test_comparison,
read_zechar_csv_to_dict
)
from experiment_config import config
# runtime flags
show_target_event_rates = True
plot = False
compute_evaluations = True
# catalog from manuscript
catalog = csep.load_catalog('./data/evaluation_catalog_zechar2013_merge.txt', loader=load_zechar_catalog)
evaluation_results = defaultdict(list)
# load results from zechar
zechar_dict = read_zechar_csv_to_dict('./data/consistency_quantile_scores_from_zechar.csv')
# main evaluation loop
for name, path in config['forecasts'].items():
# load forecast
fore = csep.load_gridded_forecast(
config['forecasts'][name],
start_date=config['start_date'],
end_date=config['end_date'],
name=name
)
# assign region of forecast to catalog
catalog.region = fore.region
cat_filt = catalog.filter_spatial(in_place=False)
# assign region to new catalog
cat_filt.region = fore.region
# compute likelihood and expected number of events
spatial_magnitude_counts = cat_filt.spatial_magnitude_counts()
ll = stats.poisson_log_likelihood(spatial_magnitude_counts, fore.data).sum()
# print summary statistics
print(f"{name}\n==========================")
print(f"Nfore: {fore.sum()}\nNobs: {cat_filt.event_count}\nLL/Nobs: {ll / cat_filt.event_count}")
print("")
if show_target_event_rates:
print("Target event rates")
for lon, lat, mag in zip(cat_filt.get_longitudes(), cat_filt.get_latitudes(), cat_filt.get_magnitudes()):
try:
rate = fore.get_rates([lon], [lat], [mag])
print(lon, lat, mag, rate[0])
except ValueError:
print(lon, lat, mag, "ERROR")
print("")
# n-test
if compute_evaluations:
n_test_result = csep.poisson_evaluations.number_test(
fore,
cat_filt
)
evaluation_results['n-test'].append(n_test_result)
print(f"N-test result: {n_test_result.quantile}")
# m-test
m_test_result = csep.poisson_evaluations.magnitude_test(
fore,
cat_filt,
num_simulations=config['nsims'],
seed=config['seed']
)
evaluation_results['m-test'].append(m_test_result)
print(f"M-test result: {m_test_result.quantile}")
# s-test
s_test_result = csep.poisson_evaluations.spatial_test(
fore,
cat_filt,
num_simulations=config['nsims'],
seed=config['seed'],
)
evaluation_results['s-test'].append(s_test_result)
print(f"S-test result: {s_test_result.quantile}")
# l-test
l_test_result = csep.poisson_evaluations.likelihood_test(
fore,
cat_filt,
num_simulations=config['nsims'],
seed=config['seed'],
)
evaluation_results['l-test'].append(l_test_result)
print(f"L-test result: {l_test_result.quantile}")
print("")
# plot and save results
ax = plot_consistency_test_comparison(evaluation_results, zechar_dict)
ax.get_figure().savefig('./output/pycsep_zechar_comparison.pdf')
# visualizations
if plot:
ax = plots.plot_poisson_consistency_test(
evaluation_results['n-test'],
plot_args={'xlabel': 'Observed earthquakes'}
)
ax.set_xlim([0,100])
ax.get_figure().savefig('./output/number_test_pycsep.pdf')
ax = plots.plot_poisson_consistency_test(
evaluation_results['l-test'],
plot_args={'xlabel': 'log-likelihood'},
one_sided_lower=True
)
ax.set_xlim([-600,0])
ax.get_figure().savefig('./output/likelihood_test_pycsep.pdf')
ax = plots.plot_poisson_consistency_test(
evaluation_results['s-test'],
plot_args={'xlabel': 'log-likelihood'},
one_sided_lower=True
)
ax.set_xlim([-220, -100])
ax.get_figure().savefig('./output/spatial_test_pycsep.pdf')
ax = plots.plot_poisson_consistency_test(
evaluation_results['m-test'],
plot_args={'xlabel': 'log-likelihood'},
one_sided_lower=True
)
ax.set_xlim([-35, -10])
ax.get_figure().savefig('./output/magnitude_test_pycsep.pdf')
| 31.206897 | 113 | 0.651271 |
2f677c0ab09b208476f962949003f247df030535 | 10,284 | py | Python | kakuro.py | PanPapag/Kakuro | c2de75fff059fdb479c6c435205cf864bd057510 | [
"MIT"
] | 5 | 2020-01-01T19:12:34.000Z | 2020-05-16T08:57:08.000Z | kakuro.py | PanPapag/Kakuro | c2de75fff059fdb479c6c435205cf864bd057510 | [
"MIT"
] | 1 | 2020-04-26T09:51:55.000Z | 2020-04-26T10:41:25.000Z | kakuro.py | PanPapag/Kakuro | c2de75fff059fdb479c6c435205cf864bd057510 | [
"MIT"
] | null | null | null | import os
import re
import sys
import time
import puzzles
from csp import *
from search import *
from utils import *
if __name__ == "__main__":
# Get all puzzles from puzzle.py
kakuro_puzzles = []
for item in vars(puzzles).keys():
if not item.startswith("__"):
kakuro_puzzles.append((item,vars(puzzles)[item]))
for puzzle_name, puzzle in kakuro_puzzles:
print("\n----------------------------- {} Kakuro puzzle -----------------------------".format(puzzle_name))
kakuro = Kakuro(puzzle)
kakuro.display_grid(kakuro.puzzle)
# BT algorithm
print("\n> Solution using BT algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.BT(), kakuro.nassigns)
# BT + MRV algorithm
print("\n> Solution using BT and MRV algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.BT_MRV(), kakuro.nassigns)
# FC algorithm
print("\n> Solution using FC algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.FC(), kakuro.nassigns)
# FC + MRV algorithm
print("\n> Solution using FC and MRV algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.FC_MRV(), kakuro.nassigns)
# MAC algorithm
print("\n> Solution using MAC algorithm")
kakuro.display_solution(kakuro.puzzle, *kakuro.MAC(), kakuro.nassigns)
# print an empty line for better output
print()
| 40.809524 | 115 | 0.50564 |
2f6b470e4d68764c82c5e960377985365be6d841 | 49 | py | Python | AD-HOC/2416.py | jeconiassantos/uriissues | f6c32f8632b9940a4886240ea5d22300922dc79a | [
"MIT"
] | null | null | null | AD-HOC/2416.py | jeconiassantos/uriissues | f6c32f8632b9940a4886240ea5d22300922dc79a | [
"MIT"
] | null | null | null | AD-HOC/2416.py | jeconiassantos/uriissues | f6c32f8632b9940a4886240ea5d22300922dc79a | [
"MIT"
] | null | null | null | C, N = map(int, input().split(' '))
print(C % N)
| 16.333333 | 35 | 0.510204 |
2f6b78883c06f33b085d614b2c51070339de771d | 2,682 | py | Python | plot.py | nsi88/ctg-plot | c623fb76d72429f0411d42ff1e380a7a46d8c04f | [
"MIT"
] | null | null | null | plot.py | nsi88/ctg-plot | c623fb76d72429f0411d42ff1e380a7a46d8c04f | [
"MIT"
] | null | null | null | plot.py | nsi88/ctg-plot | c623fb76d72429f0411d42ff1e380a7a46d8c04f | [
"MIT"
] | null | null | null | import csv
import os
import sys
import matplotlib.pyplot as plt
from metrics.index import metrics
from plot_package.helpers import ascendence_label, descendence_label, with_sign
if len(sys.argv) != 2:
print(f"{__file__} sample_name.csv")
sys.exit(1)
sample_name = sys.argv[1]
# set size
plt.figure(figsize=(20.48, 10.24))
# draw heartbeats
seconds = []
heartbeats = []
with open(sample_name) as f:
read_csv = csv.reader(f, delimiter=",")
for row in read_csv:
seconds.append(int(row[0]))
heartbeats.append(int(row[1]))
plt.plot(seconds, heartbeats, color="blue", marker="D")
# calcualte ctg metrics
ctg_metrics = metrics(heartbeats)
x_min = 0
x_max = seconds[-1]
basic_channel_bottom = ctg_metrics.avg * 0.875 # - 12.5%
basic_channel_top = ctg_metrics.avg * 1.125 # + 12.5%
# draw ctg metrics
# average
# TODO: use a beeter way to draw line (plot(y)). See _axes.py plot method docs
plt.plot(
[x_min, x_max],
[ctg_metrics.avg, ctg_metrics.avg],
label=f"Average: {round(ctg_metrics.avg, 2)}",
color="red",
)
# basic channel
plt.plot([x_min, x_max], [basic_channel_bottom, basic_channel_bottom], color="black")
plt.plot(
[x_min, x_max],
[basic_channel_top, basic_channel_top],
color="black",
label=f"Basic channel from {round(basic_channel_bottom, 2)} to {round(basic_channel_top, 2)}. "
f"Variance: {with_sign(ctg_metrics.variance)}",
)
# ascendences
for index, ascendence in enumerate(ctg_metrics.ascendences):
plt.plot(
# NOTE: We add + 1 to ascendence.end cause slice(, end) is exclusive
seconds[slice(ascendence.start, ascendence.end + 1)],
heartbeats[slice(ascendence.start, ascendence.end + 1)],
color="green",
marker="D",
label=ascendence_label(
heartbeats=heartbeats, ascendence=ascendence, index=index
),
)
# descendences
for index, descendence in enumerate(ctg_metrics.descendences):
plt.plot(
seconds[slice(descendence.start, descendence.end + 1)],
heartbeats[slice(descendence.start, descendence.end + 1)],
color="purple",
marker="D",
label=descendence_label(
heartbeats=heartbeats, descendence=descendence, index=index
),
)
# draw common plot elements
# naming the x axis
plt.xlabel("Second")
# naming the y axis
plt.ylabel("Heartbeat")
# giving a title to my graph
plt.title("CTG")
# show a legend on the plot
plt.legend()
# function to show the plot
plt.show()
# save the plot
# sample_basename, _sample_extname = os.path.splitext(sample_name)
# figure_name = f"{sample_basename}.png"
# plt.savefig(figure_name)
# print(f"Saved to {figure_name}")
| 28.531915 | 99 | 0.688292 |
2f6b87929186c7b4d57d3ad6750b0986257cf867 | 662 | py | Python | list_prime.py | zm6/Python-Practice | c2080e1104cd7cee4af8ebc3e3f4941fc7466586 | [
"MIT"
] | null | null | null | list_prime.py | zm6/Python-Practice | c2080e1104cd7cee4af8ebc3e3f4941fc7466586 | [
"MIT"
] | null | null | null | list_prime.py | zm6/Python-Practice | c2080e1104cd7cee4af8ebc3e3f4941fc7466586 | [
"MIT"
] | null | null | null | #!/user/bin/env python
# -*- coding:utf-8 -*-
# zm6
# 2021-03-19
# 2021-03-19
# N
import time #
if __name__ == "__main__":
n = int(input("please enter the number")) # n
start = time.time() #
num = list_prime(n)
print(n, "", num)
end = time.time() #
print(str(end - start))
| 16.55 | 54 | 0.493958 |
2f6cee267527184d028d64eb983074f84ea9f058 | 2,246 | py | Python | foyer/tests/test_forcefield.py | rmatsum836/foyer | c150d6f4c34e9ca7c5e4012e4406fb4ebab588cb | [
"MIT"
] | 1 | 2020-11-08T23:51:29.000Z | 2020-11-08T23:51:29.000Z | foyer/tests/test_forcefield.py | rmatsum836/foyer | c150d6f4c34e9ca7c5e4012e4406fb4ebab588cb | [
"MIT"
] | null | null | null | foyer/tests/test_forcefield.py | rmatsum836/foyer | c150d6f4c34e9ca7c5e4012e4406fb4ebab588cb | [
"MIT"
] | null | null | null | import glob
import os
from pkg_resources import resource_filename
import mbuild as mb
import parmed as pmd
import pytest
from foyer import Forcefield
from foyer.tests.utils import get_fn
FF_DIR = resource_filename('foyer', 'forcefields')
FORCEFIELDS = glob.glob(os.path.join(FF_DIR, '*.xml'))
| 31.194444 | 72 | 0.684328 |
2f6d26b42b45ea8ec8df168ee13e107fabe9add1 | 407 | py | Python | Back/ns_portal/database/main_db/troles_model.py | anthonyHenryNS/NsPortal | dcb4b4e0a70c1c3431d5438d97e80f5d05c8e10e | [
"MIT"
] | 1 | 2019-01-22T15:16:43.000Z | 2019-01-22T15:16:43.000Z | Back/ns_portal/database/main_db/troles_model.py | NaturalSolutions/NsPortal | bcd07fdf015948a82f4d0c3c9a02f513b2d99f5d | [
"MIT"
] | 16 | 2015-09-28T14:46:13.000Z | 2020-04-20T10:34:25.000Z | Back/ns_portal/database/main_db/troles_model.py | anthonyHenryNS/NsPortal | dcb4b4e0a70c1c3431d5438d97e80f5d05c8e10e | [
"MIT"
] | 10 | 2015-05-06T08:05:09.000Z | 2020-01-27T13:39:47.000Z | from ns_portal.database.meta import (
Main_Db_Base
)
from sqlalchemy import (
Column,
Integer,
String
)
| 15.653846 | 37 | 0.594595 |
2f6fd35b1512af4a2c444f8b5323ece8c562f37c | 2,869 | py | Python | src/authorization/validators.py | dmitrijbozhkov/cloudcourseproject | 3e62a5fafef418c1c058587abc5615b03fc2325a | [
"Apache-2.0"
] | null | null | null | src/authorization/validators.py | dmitrijbozhkov/cloudcourseproject | 3e62a5fafef418c1c058587abc5615b03fc2325a | [
"Apache-2.0"
] | 7 | 2021-02-08T20:41:23.000Z | 2022-03-12T00:21:37.000Z | src/authorization/validators.py | dmitrijbozhkov/cloudcourseproject | 3e62a5fafef418c1c058587abc5615b03fc2325a | [
"Apache-2.0"
] | null | null | null | """ Form validators for authorization """
import re
from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField, SubmitField
from wtforms.validators import DataRequired, Length, ValidationError, Email, EqualTo
from cloudcourseproject.src.model import User
def password_validator(password):
""" Validate password should contain capital and small latin letters and numbers """
if re.search("[0-9]", password) is None:
raise ValidationError("Password should have numbers")
if re.search("[A-Z]", password) is None:
raise ValidationError("Password should contain capital latin letters")
if re.search("[a-z]", password) is None:
raise ValidationError("Password should contain lower latin letters")
| 42.191176 | 117 | 0.684559 |
2f705c1639774ae7481bbdfb1680d2106c872e2a | 1,418 | py | Python | app/pipelines/load_data/load_marketing_data/__init__.py | mediaimprove/mara-example-project-1 | d1cab4cf079e78a4c0f73edac73200fac4112f34 | [
"MIT"
] | 22 | 2020-10-07T21:32:07.000Z | 2022-03-21T19:21:36.000Z | app/pipelines/load_data/load_marketing_data/__init__.py | mediaimprove/mara-example-project-1 | d1cab4cf079e78a4c0f73edac73200fac4112f34 | [
"MIT"
] | 4 | 2020-07-16T15:22:46.000Z | 2020-10-28T15:18:32.000Z | app/pipelines/load_data/load_marketing_data/__init__.py | mediaimprove/mara-example-project-1 | d1cab4cf079e78a4c0f73edac73200fac4112f34 | [
"MIT"
] | 4 | 2020-10-08T10:30:04.000Z | 2022-03-19T09:21:51.000Z | import pathlib
from mara_pipelines.commands.sql import ExecuteSQL, Copy
from mara_pipelines.pipelines import Pipeline, Task
from mara_pipelines import config
pipeline = Pipeline(
id="load_marketing_data",
description="Jobs related with loading marketing leads data from the backend database",
max_number_of_parallel_tasks=5,
base_path=pathlib.Path(__file__).parent,
labels={"Schema": "m_data"})
pipeline.add_initial(
Task(id="initialize_schemas", description="Recreates the marketing data schema",
commands=[
ExecuteSQL(sql_file_name='../recreate_marketing_data_schema.sql',
file_dependencies=[
pathlib.Path(__file__).parent.parent / 'recreate_marketing_data_schema.sql'])]))
tables = [
'closed_deal',
'marketing_qualified_lead'
]
for table in tables:
pipeline.add(
Task(id=f"load_{table}",
description=f'Loads the {table}s from the backend database',
commands=[
ExecuteSQL(sql_file_name=f'{table}/create_{table}_table.sql'),
Copy(sql_statement=f"""
SELECT *
FROM marketing.{table}s;
""",
source_db_alias='olist',
target_db_alias='dwh',
target_table=f'm_data.{table}',
delimiter_char=';')]
)
)
| 32.227273 | 108 | 0.61213 |
2f710ce3e46ffdc56061d382495ca8df6e25a15b | 320 | py | Python | apps/urls.py | cijianciqing/myWX_l_ningmo | df4c80554b0f3c58060352fc0d5fc6c649f805c8 | [
"Apache-2.0"
] | null | null | null | apps/urls.py | cijianciqing/myWX_l_ningmo | df4c80554b0f3c58060352fc0d5fc6c649f805c8 | [
"Apache-2.0"
] | null | null | null | apps/urls.py | cijianciqing/myWX_l_ningmo | df4c80554b0f3c58060352fc0d5fc6c649f805c8 | [
"Apache-2.0"
] | null | null | null |
from django.urls import path,include
from .views import menu, image, weixinFile
urlpatterns = [
path('menu/list', menu.get_menu),
path('menu/user', menu.UserMenu.as_view()),
path('image', image.ImageView.as_view()),
path('saveWX', weixinFile.saveWX),
path('getRecentWX', weixinFile.getRecentWX),
] | 26.666667 | 48 | 0.696875 |
2f7149167478c04bd0604548dfe0f8ebb31e11a2 | 1,178 | py | Python | mayan/apps/django_gpg/links.py | garrans/mayan-edms | e95e90cc47447a1ae72629271652824aa9868572 | [
"Apache-2.0"
] | null | null | null | mayan/apps/django_gpg/links.py | garrans/mayan-edms | e95e90cc47447a1ae72629271652824aa9868572 | [
"Apache-2.0"
] | null | null | null | mayan/apps/django_gpg/links.py | garrans/mayan-edms | e95e90cc47447a1ae72629271652824aa9868572 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from django.utils.translation import ugettext_lazy as _
from navigation import Link
from .permissions import (
permission_key_delete, permission_key_receive, permission_key_view,
permission_keyserver_query
)
link_private_keys = Link(
icon='fa fa-key', permissions=(permission_key_view,),
text=_('Private keys'), view='django_gpg:key_private_list'
)
link_public_keys = Link(
icon='fa fa-key', permissions=(permission_key_view,),
text=_('Public keys'), view='django_gpg:key_public_list'
)
link_key_delete = Link(
permissions=(permission_key_delete,), tags='dangerous', text=_('Delete'),
view='django_gpg:key_delete', args=('object.fingerprint', 'object.type',)
)
link_key_query = Link(
permissions=(permission_keyserver_query,), text=_('Query keyservers'),
view='django_gpg:key_query'
)
link_key_receive = Link(
keep_query=True, permissions=(permission_key_receive,), text=_('Import'),
view='django_gpg:key_receive', args='object.key_id'
)
link_key_setup = Link(
icon='fa fa-key', permissions=(permission_key_view,),
text=_('Key management'), view='django_gpg:key_public_list'
)
| 31.837838 | 77 | 0.749576 |
2f73b6858df8269c4f5f2480de3342f864156c6c | 1,489 | py | Python | rosalind/splc/splc.py | TecKnow/learning | 71d1ddf9d580027ecc62a067581da378a9e85f6d | [
"BSD-3-Clause"
] | null | null | null | rosalind/splc/splc.py | TecKnow/learning | 71d1ddf9d580027ecc62a067581da378a9e85f6d | [
"BSD-3-Clause"
] | null | null | null | rosalind/splc/splc.py | TecKnow/learning | 71d1ddf9d580027ecc62a067581da378a9e85f6d | [
"BSD-3-Clause"
] | null | null | null | """
Problem : RNA Splicing
URL : http://rosalind.info/problems/splc/
Author : David P. Perkins
"""
import fasta
if __name__ == "__main__":
import sys
FASTAs = fasta.FASTA.fromList(sys.stdin.readline())
cr = getCodingRegion(FASTAs[0].value, [x.value for x in FASTAs[1:]])
rna = cr.replace('T','U')
prot = RNAtoProt(rna)
print(prot)
| 29.78 | 85 | 0.601746 |
2f74e4a6873c5b5dffd070a88fe2c8e9d487f559 | 191 | py | Python | common/helpers.py | denisov-vlad/mtuci-masters | 6fa68adb156ba4b43b4995d365f450654df70fa3 | [
"MIT"
] | null | null | null | common/helpers.py | denisov-vlad/mtuci-masters | 6fa68adb156ba4b43b4995d365f450654df70fa3 | [
"MIT"
] | null | null | null | common/helpers.py | denisov-vlad/mtuci-masters | 6fa68adb156ba4b43b4995d365f450654df70fa3 | [
"MIT"
] | null | null | null | import os
path_joiner = os.path.join
path_basename = os.path.basename | 17.363636 | 42 | 0.633508 |
2f75db3882d66a7931843a46ed92b1bea9dfaf2f | 6,448 | py | Python | tests/integration/test_interrupt_fields.py | jvanstraten/vhdmmio | f166b07074a9159311a01af88497df91c19e09d1 | [
"Apache-2.0"
] | 4 | 2019-07-01T14:41:38.000Z | 2021-11-28T12:54:49.000Z | tests/integration/test_interrupt_fields.py | jvanstraten/vhdmmio | f166b07074a9159311a01af88497df91c19e09d1 | [
"Apache-2.0"
] | 4 | 2019-08-23T15:05:24.000Z | 2020-12-16T10:02:20.000Z | tests/integration/test_interrupt_fields.py | jvanstraten/vhdmmio | f166b07074a9159311a01af88497df91c19e09d1 | [
"Apache-2.0"
] | 1 | 2021-07-16T13:41:21.000Z | 2021-07-16T13:41:21.000Z | """Interrupt field tests."""
from copy import deepcopy
from unittest import TestCase
from ..testbench import RegisterFileTestbench
| 38.380952 | 89 | 0.498759 |
2f762c138bc0fd2f04d2c1539f4eca93c9446723 | 1,745 | py | Python | app.py | corsmith/openshift-webhook-webex-teams-translator | fc20d4cdf2ca0959d2875048e6c5e5a1477ccec5 | [
"BSD-3-Clause"
] | null | null | null | app.py | corsmith/openshift-webhook-webex-teams-translator | fc20d4cdf2ca0959d2875048e6c5e5a1477ccec5 | [
"BSD-3-Clause"
] | null | null | null | app.py | corsmith/openshift-webhook-webex-teams-translator | fc20d4cdf2ca0959d2875048e6c5e5a1477ccec5 | [
"BSD-3-Clause"
] | null | null | null | import tornado.ioloop
import tornado.web
import tornado.options
from tornado.log import gen_log
'''
Alert Manager Documentation: https://prometheus.io/docs/alerting/configuration/
Sample alertmanager message:
{
"version": "4",
"groupKey": <string>, // key identifying the group of alerts (e.g. to deduplicate)
"status": "<resolved|firing>",
"receiver": <string>,
"groupLabels": <object>,
"commonLabels": <object>,
"commonAnnotations": <object>,
"externalURL": <string>, // backlink to the Alertmanager.
"alerts": [
{
"status": "<resolved|firing>",
"labels": <object>,
"annotations": <object>,
"startsAt": "<rfc3339>",
"endsAt": "<rfc3339>",
"generatorURL": <string> // identifies the entity that caused the alert
},
...
]
}
'''
def make_app():
return tornado.web.Application([
(r"/v1/webhooks/incoming/([^/]+)", MainHandler),
(r"/", HealthHandler),
])
if __name__ == "__main__":
tornado.options.parse_command_line()
app = make_app()
app.listen(8080)
tornado.ioloop.IOLoop.current().start()
| 27.698413 | 177 | 0.638395 |
2f76d7407f4890886daf9577fd641a2214d9abc6 | 1,848 | py | Python | backend/serializers.py | richardswei/werk | b08b1ffc2d3aaf61f2cfb07ecc259a35eea77e91 | [
"MIT"
] | null | null | null | backend/serializers.py | richardswei/werk | b08b1ffc2d3aaf61f2cfb07ecc259a35eea77e91 | [
"MIT"
] | 8 | 2020-03-21T01:57:31.000Z | 2021-09-22T18:46:07.000Z | backend/serializers.py | richardswei/werkit | b08b1ffc2d3aaf61f2cfb07ecc259a35eea77e91 | [
"MIT"
] | null | null | null | from rest_framework import serializers
from backend.models import TodoItem, Note
from django.contrib.auth import authenticate, get_user_model
| 33.6 | 113 | 0.679113 |
2f7707e7a77d86241b0db0b4c74b1a925d1c197b | 695 | py | Python | projects/demos/location2.py | readysetstem/readysetstem-api | 01e1360f4a28a6783ee1e0fa1bc239dd999de6be | [
"Apache-2.0"
] | 1 | 2018-02-23T20:20:45.000Z | 2018-02-23T20:20:45.000Z | projects/demos/location2.py | readysetstem/readysetstem-api | 01e1360f4a28a6783ee1e0fa1bc239dd999de6be | [
"Apache-2.0"
] | 1 | 2016-10-25T18:00:15.000Z | 2016-10-25T18:00:15.000Z | projects/demos/location2.py | readysetstem/readysetstem-api | 01e1360f4a28a6783ee1e0fa1bc239dd999de6be | [
"Apache-2.0"
] | null | null | null | from rstem.led_matrix import FrameBuffer
from rstem.mcpi import minecraft, control
import time
control.show()
mc = minecraft.Minecraft.create()
SCALE = 25
fb = FrameBuffer()
count = 0
FLASH_COUNT = 3
flash_lit = True
while True:
pos = mc.player.getTilePos()
x = round(pos.x/SCALE + (fb.width-1)/2)
x_out_of_bounds = not 0 <= x < fb.width
x = min(fb.width-1, max(0, x))
z = round(pos.z/SCALE + (fb.height-1)/2)
z_out_of_bounds = not 0 <= z < fb.height
z = min(fb.height-1, max(0, z))
fb.erase()
count += 1
if count > FLASH_COUNT:
flash_lit = not flash_lit
count = 0
if not x_out_of_bounds and not z_out_of_bounds or flash_lit:
fb.point(z, x)
fb.show()
time.sleep(0.01)
| 19.305556 | 61 | 0.683453 |
2f793c352ccb3c8ed1a615cf95be1f974da7e115 | 10,833 | py | Python | distributed_train.py | ShivamShrirao/contrastive-unpaired-translation | e81611a5bd8b7aee6aedab10aadf9e22a0804a63 | [
"BSD-3-Clause"
] | null | null | null | distributed_train.py | ShivamShrirao/contrastive-unpaired-translation | e81611a5bd8b7aee6aedab10aadf9e22a0804a63 | [
"BSD-3-Clause"
] | null | null | null | distributed_train.py | ShivamShrirao/contrastive-unpaired-translation | e81611a5bd8b7aee6aedab10aadf9e22a0804a63 | [
"BSD-3-Clause"
] | null | null | null | import os
from os.path import join as osp
import numpy as np
from tqdm import tqdm
import wandb
import torch
import torch.nn as nn
import torch.optim as optim
from torch.cuda import amp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from torch import autograd
from torch.optim import lr_scheduler
# from torchinfo import summary
from options.train_options import TrainOptions
from utils import AverageMeter, reduce_loss, synchronize, cleanup, seed_everything, set_grads, log_imgs_wandb
from data import CreateDataLoader
from data.unaligned_dataset import UnAlignedDataset
from models.custom_unet import Unet, NLayerDiscriminator, PatchSampleF, GANLoss, PatchNCELoss, get_norm_layer, init_weights
if __name__ == '__main__':
main()
| 48.361607 | 141 | 0.63925 |
2f79b110e6695d3ccede296e521acb7c60c79294 | 889 | py | Python | src/backend/common/models/notifications/tests/verification_test.py | bovlb/the-blue-alliance | 29389649d96fe060688f218d463e642dcebfd6cc | [
"MIT"
] | null | null | null | src/backend/common/models/notifications/tests/verification_test.py | bovlb/the-blue-alliance | 29389649d96fe060688f218d463e642dcebfd6cc | [
"MIT"
] | null | null | null | src/backend/common/models/notifications/tests/verification_test.py | bovlb/the-blue-alliance | 29389649d96fe060688f218d463e642dcebfd6cc | [
"MIT"
] | null | null | null | from backend.common.consts.notification_type import NotificationType
from backend.common.models.notifications.verification import (
VerificationNotification,
)
| 34.192308 | 87 | 0.786277 |