hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e3496bdad8c230d6caf15ce743cc65f029480031
| 5,287
|
py
|
Python
|
RecoEgamma/Configuration/python/RecoEgamma_cff.py
|
sebwieland/cmssw
|
431e2fdfedec052e73c16e9f06de98ade41ebc56
|
[
"Apache-2.0"
] | null | null | null |
RecoEgamma/Configuration/python/RecoEgamma_cff.py
|
sebwieland/cmssw
|
431e2fdfedec052e73c16e9f06de98ade41ebc56
|
[
"Apache-2.0"
] | null | null | null |
RecoEgamma/Configuration/python/RecoEgamma_cff.py
|
sebwieland/cmssw
|
431e2fdfedec052e73c16e9f06de98ade41ebc56
|
[
"Apache-2.0"
] | null | null | null |
import FWCore.ParameterSet.Config as cms
from RecoEgamma.EgammaElectronProducers.gsfElectronSequence_cff import *
from RecoEgamma.EgammaElectronProducers.uncleanedOnlyElectronSequence_cff import *
from RecoEgamma.EgammaPhotonProducers.photonSequence_cff import *
from RecoEgamma.EgammaPhotonProducers.conversionSequence_cff import *
from RecoEgamma.EgammaPhotonProducers.conversionTrackSequence_cff import *
from RecoEgamma.EgammaPhotonProducers.allConversionSequence_cff import *
from RecoEgamma.EgammaPhotonProducers.gedPhotonSequence_cff import *
from RecoEgamma.EgammaIsolationAlgos.egammaIsolationSequence_cff import *
from RecoEgamma.EgammaIsolationAlgos.interestingEgammaIsoDetIdsSequence_cff import *
from RecoEgamma.PhotonIdentification.photonId_cff import *
from RecoEgamma.ElectronIdentification.electronIdSequence_cff import *
from RecoEgamma.EgammaHFProducers.hfEMClusteringSequence_cff import *
from TrackingTools.Configuration.TrackingTools_cff import *
from RecoEgamma.EgammaIsolationAlgos.egmIsolationDefinitions_cff import *
#importing new gedGsfElectronSequence :
#from RecoEgamma.EgammaElectronProducers.gedGsfElectronSequence_cff import *
from RecoEgamma.EgammaElectronProducers.pfBasedElectronIso_cff import *
egammaGlobalRecoTask = cms.Task(electronGsfTrackingTask,conversionTrackTask,allConversionTask)
egammaGlobalReco = cms.Sequence(egammaGlobalRecoTask)
# this might be historical: not sure why we do this
from Configuration.Eras.Modifier_fastSim_cff import fastSim
_fastSim_egammaGlobalRecoTask = egammaGlobalRecoTask.copy()
_fastSim_egammaGlobalRecoTask.replace(conversionTrackTask,conversionTrackTaskNoEcalSeeded)
fastSim.toReplaceWith(egammaGlobalRecoTask, _fastSim_egammaGlobalRecoTask)
egammarecoTask = cms.Task(gsfElectronTask,conversionTask,photonTask)
egammareco = cms.Sequence(egammarecoTask)
egammaHighLevelRecoPrePFTask = cms.Task(gsfEcalDrivenElectronTask,uncleanedOnlyElectronTask,conversionTask,photonTask)
egammaHighLevelRecoPrePF = cms.Sequence(egammaHighLevelRecoPrePFTask)
# not commisoned and not relevant in FastSim (?):
fastSim.toReplaceWith(egammarecoTask, egammarecoTask.copyAndExclude([conversionTask]))
fastSim.toReplaceWith(egammaHighLevelRecoPrePFTask,egammaHighLevelRecoPrePFTask.copyAndExclude([uncleanedOnlyElectronTask,conversionTask]))
#egammaHighLevelRecoPostPFTask = cms.Task(gsfElectronMergingTask,interestingEgammaIsoDetIdsTask,photonIDTask,eIdTask,hfEMClusteringTask)
#adding new gedGsfElectronTask and gedPhotonTask :
#egammaHighLevelRecoPostPFTask = cms.Task(gsfElectronMergingTask,gedGsfElectronTask,interestingEgammaIsoDetIdsTask,gedPhotonTask,photonIDTask,eIdTask,hfEMClusteringTask)
egammaHighLevelRecoPostPFTask = cms.Task(interestingEgammaIsoDetIdsTask,egmIsolationTask,photonIDTask,photonIDTaskGED,eIdTask,hfEMClusteringTask)
egammaHighLevelRecoPostPF = cms.Sequence(egammaHighLevelRecoPostPFTask)
egammarecoFullTask = cms.Task(egammarecoTask,interestingEgammaIsoDetIdsTask,egmIsolationTask,photonIDTask,eIdTask,hfEMClusteringTask)
egammarecoFull = cms.Sequence(egammarecoFullTask)
egammarecoWithIDTask = cms.Task(egammarecoTask,photonIDTask,eIdTask)
egammarecoWithID = cms.Sequence(egammarecoWithIDTask)
egammareco_woConvPhotonsTask = cms.Task(gsfElectronTask,photonTask)
egammareco_woConvPhotons = cms.Sequence(egammareco_woConvPhotonsTask)
egammareco_withIsolationTask = cms.Task(egammarecoTask,egammaIsolationTask)
egammareco_withIsolation = cms.Sequence(egammareco_withIsolationTask)
egammareco_withIsolation_woConvPhotonsTask = cms.Task(egammareco_woConvPhotonsTask,egammaIsolationTask)
egammareco_withIsolation_woConvPhotons = cms.Sequence(egammareco_withIsolation_woConvPhotonsTask)
egammareco_withPhotonIDTask = cms.Task(egammarecoTask,photonIDTask)
egammareco_withPhotonID = cms.Sequence(egammareco_withPhotonIDTask)
egammareco_withElectronIDTask = cms.Task(egammarecoTask,eIdTask)
egammareco_withElectronID = cms.Sequence(egammareco_withElectronIDTask)
egammarecoFull_woHFElectronsTask = cms.Task(egammarecoTask,interestingEgammaIsoDetIdsTask,photonIDTask,eIdTask)
egammarecoFull_woHFElectrons = cms.Sequence(egammarecoFull_woHFElectronsTask)
from Configuration.Eras.Modifier_pA_2016_cff import pA_2016
from Configuration.Eras.Modifier_peripheralPbPb_cff import peripheralPbPb
from Configuration.Eras.Modifier_pp_on_AA_2018_cff import pp_on_AA_2018
from Configuration.Eras.Modifier_pp_on_XeXe_2017_cff import pp_on_XeXe_2017
from Configuration.Eras.Modifier_ppRef_2017_cff import ppRef_2017
#HI-specific algorithms needed in pp scenario special configurations
from RecoHI.HiEgammaAlgos.photonIsolationHIProducer_cfi import photonIsolationHIProducerpp
from RecoHI.HiEgammaAlgos.photonIsolationHIProducer_cfi import photonIsolationHIProducerppGED
from RecoHI.HiEgammaAlgos.photonIsolationHIProducer_cfi import photonIsolationHIProducerppIsland
_egammaHighLevelRecoPostPF_HITask = egammaHighLevelRecoPostPFTask.copy()
_egammaHighLevelRecoPostPF_HITask.add(photonIsolationHIProducerpp)
_egammaHighLevelRecoPostPF_HITask.add(photonIsolationHIProducerppGED)
_egammaHighLevelRecoPostPF_HITask.add(photonIsolationHIProducerppIsland)
for e in [pA_2016, peripheralPbPb, pp_on_AA_2018, pp_on_XeXe_2017, ppRef_2017]:
e.toReplaceWith(egammaHighLevelRecoPostPFTask, _egammaHighLevelRecoPostPF_HITask)
| 66.924051
| 169
| 0.897863
|
e349722dbbb7eaf1a0dc75722c25f01806dbcca5
| 3,632
|
py
|
Python
|
language/serene/boolq_tfds.py
|
Xtuden-com/language
|
70c0328968d5ffa1201c6fdecde45bbc4fec19fc
|
[
"Apache-2.0"
] | 1,199
|
2018-10-16T01:30:18.000Z
|
2022-03-31T21:05:24.000Z
|
language/serene/boolq_tfds.py
|
Xtuden-com/language
|
70c0328968d5ffa1201c6fdecde45bbc4fec19fc
|
[
"Apache-2.0"
] | 116
|
2018-10-18T03:31:46.000Z
|
2022-03-24T13:40:50.000Z
|
language/serene/boolq_tfds.py
|
Xtuden-com/language
|
70c0328968d5ffa1201c6fdecde45bbc4fec19fc
|
[
"Apache-2.0"
] | 303
|
2018-10-22T12:35:12.000Z
|
2022-03-27T17:38:17.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""TF Dataset for BoolQ in same format as Fever TFDS."""
import json
from language.serene import constants
from language.serene import util
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
| 32.141593
| 75
| 0.593337
|
e34a90751c66f311b5912bb8c6a8d1a8ad0deae9
| 449
|
py
|
Python
|
23/03/0.py
|
pylangstudy/201709
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
[
"CC0-1.0"
] | null | null | null |
23/03/0.py
|
pylangstudy/201709
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
[
"CC0-1.0"
] | 32
|
2017-09-01T00:52:17.000Z
|
2017-10-01T00:30:02.000Z
|
23/03/0.py
|
pylangstudy/201709
|
53d868786d7327a83bfa7f4149549c6f9855a6c6
|
[
"CC0-1.0"
] | null | null | null |
import json
import pprint
from urllib.request import urlopen
with urlopen('http://pypi.python.org/pypi/Twisted/json') as url:
http_info = url.info()
raw_data = url.read().decode(http_info.get_content_charset())
project_info = json.loads(raw_data)
pprint.pprint(project_info)
print('------------------------------')
pprint.pprint(project_info, depth=2)
print('------------------------------')
pprint.pprint(project_info, depth=2, width=50)
| 29.933333
| 65
| 0.657016
|
e34b1404822d471e120b2d87a3f5be2a57d14434
| 1,323
|
py
|
Python
|
molotov_ext/__init__.py
|
2gis-test-labs/molotov-ext
|
2cf2cc5b74f6676ed1680511030d4dddb8be8380
|
[
"Apache-2.0"
] | null | null | null |
molotov_ext/__init__.py
|
2gis-test-labs/molotov-ext
|
2cf2cc5b74f6676ed1680511030d4dddb8be8380
|
[
"Apache-2.0"
] | null | null | null |
molotov_ext/__init__.py
|
2gis-test-labs/molotov-ext
|
2cf2cc5b74f6676ed1680511030d4dddb8be8380
|
[
"Apache-2.0"
] | null | null | null |
from argparse import Namespace
from functools import partial
from typing import Any
import molotov
from .formatters import DefaultFormatter
from .record_table import RecordTable
from .recorder import Recorder
from .reporter import Reporter
from .scenario import Scenario
__all__ = ("Reporter", "register_reporter", "scenario", "recorder")
recorder = Recorder(RecordTable())
scenario = partial(Scenario, recorder.on_starting_scenario)
| 32.268293
| 99
| 0.73167
|
e34cc6ddd23022672aee1685f571b987ab87c815
| 936
|
py
|
Python
|
services/viewcounts/utils.py
|
RyanFleck/AuxilliaryWebsiteServices
|
bcaa6689e567fdf9f20f7f4ea84043aa2b6f1378
|
[
"MIT"
] | 1
|
2020-11-11T20:20:42.000Z
|
2020-11-11T20:20:42.000Z
|
services/viewcounts/utils.py
|
RyanFleck/AuxilliaryWebsiteServices
|
bcaa6689e567fdf9f20f7f4ea84043aa2b6f1378
|
[
"MIT"
] | 17
|
2020-11-09T19:04:04.000Z
|
2022-03-01T18:08:42.000Z
|
services/viewcounts/utils.py
|
RyanFleck/AuxilliaryWebsiteServices
|
bcaa6689e567fdf9f20f7f4ea84043aa2b6f1378
|
[
"MIT"
] | null | null | null |
from slugify import slugify
from services.viewcounts.models import PageViewsModel
def get_page_views(url: str):
"""Returns the number of views for a given page object."""
# Pre-processing checks: Client should not pass full or partial URL.
if not url.startswith("/"):
raise Exception("Partial URL detected, only POST the page path.")
if ("http" in url) or ("localhost" in url):
raise Exception("Full URL detected, only POST the page path.")
# Boil down url to slug/path:
path = url_to_path(url)
print(f"User is at {path}")
# Creates a new object if none exists.
page, created = PageViewsModel.objects.get_or_create(path=path)
# Add a view to the model
if not created:
page.views = page.views + 1
page.save()
return page.views
def url_to_path(url: str):
"""Converts an incoming url into a path-slug."""
return slugify(url, max_length=199)
| 28.363636
| 73
| 0.672009
|
e34da2a39a4311b17cd41e029318a815155da9e9
| 9,875
|
py
|
Python
|
bin/gaussian_process_samples.py
|
ltiao/videos
|
ba371078d107da5a4c726a957b31a29bb157664d
|
[
"MIT"
] | null | null | null |
bin/gaussian_process_samples.py
|
ltiao/videos
|
ba371078d107da5a4c726a957b31a29bb157664d
|
[
"MIT"
] | null | null | null |
bin/gaussian_process_samples.py
|
ltiao/videos
|
ba371078d107da5a4c726a957b31a29bb157664d
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import tensorflow_probability as tfp
from scipy.stats import expon
from videos.linalg import safe_cholesky
from manim import *
# shortcuts
tfd = tfp.distributions
kernels = tfp.math.psd_kernels
| 35.142349
| 127
| 0.582278
|
e34e43e9e1aa6f169f4e3ce01d35a03a886c9108
| 932
|
py
|
Python
|
app/api/v2/models/base_models.py
|
erick-maina/Questioner_API
|
0ffad203fd525e22b52e861ce574803a844cc3b3
|
[
"MIT"
] | null | null | null |
app/api/v2/models/base_models.py
|
erick-maina/Questioner_API
|
0ffad203fd525e22b52e861ce574803a844cc3b3
|
[
"MIT"
] | 7
|
2019-01-15T12:23:59.000Z
|
2019-01-20T17:32:45.000Z
|
app/api/v2/models/base_models.py
|
erick-maina/Questioner_API
|
0ffad203fd525e22b52e861ce574803a844cc3b3
|
[
"MIT"
] | null | null | null |
"""
This module defines the base model and associated functions
"""
from flask import Flask, jsonify
from psycopg2.extras import RealDictCursor
from ....database import db_con
| 30.064516
| 69
| 0.648069
|
e34fb05ebe987705aa9522c54e606db43ebf8086
| 9,288
|
py
|
Python
|
cvr/core/task.py
|
john-james-ai/cvr
|
37e12dff4d46acac64b09ad8ddb8d238d43a5513
|
[
"BSD-3-Clause"
] | null | null | null |
cvr/core/task.py
|
john-james-ai/cvr
|
37e12dff4d46acac64b09ad8ddb8d238d43a5513
|
[
"BSD-3-Clause"
] | null | null | null |
cvr/core/task.py
|
john-james-ai/cvr
|
37e12dff4d46acac64b09ad8ddb8d238d43a5513
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# ================================================================================================ #
# Project : Deep Learning for Conversion Rate Prediction (CVR) #
# Version : 0.1.0 #
# File : \task.py #
# Language : Python 3.7.12 #
# ------------------------------------------------------------------------------------------------ #
# Author : John James #
# Email : john.james.ai.studio@gmail.com #
# URL : https://github.com/john-james-ai/cvr #
# ------------------------------------------------------------------------------------------------ #
# Created : Wednesday, January 19th 2022, 5:34:06 pm #
# Modified : Thursday, February 10th 2022, 9:28:37 pm #
# Modifier : John James (john.james.ai.studio@gmail.com) #
# ------------------------------------------------------------------------------------------------ #
# License : BSD 3-clause "New" or "Revised" License #
# Copyright: (c) 2022 Bryant St. Labs #
# ================================================================================================ #
from abc import ABC, abstractmethod
import pandas as pd
import inspect
from dataclasses import dataclass, field
from datetime import datetime, timedelta
from cvr.utils.printing import Printer
from cvr.core.asset import AssetPassport
from cvr.core.dataset import Dataset
# ---------------------------------------------------------------------------- #
# TASK RESULT #
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
# TASK RESPONSE #
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
# TASK SUMMARY #
# ---------------------------------------------------------------------------- #
# ---------------------------------------------------------------------------- #
# TASK #
# ---------------------------------------------------------------------------- #
class Task(ABC):
"""Defines interface for task classes."""
def setup(self, **kwargs) -> None:
# Logging facility
self._logger = self._config.logger
# Subclass specific setup
self._setup()
def _setup(self) -> None:
pass
def teardown(self, **kwargs) -> None:
# Subclass specific teardown.
self._teardown()
# Base class gets last word
self._result.executed = "No" if self._result.executed is False else "Yes"
self._result.passed = "No" if self._result.passed is False else "Yes"
self._result.complete = "No" if self._result.complete is False else "Yes"
self._summary = TaskSummary(
passport=self.passport,
response=self.response,
result=self.result,
)
def _teardown(self, **kwargs) -> None:
pass
def summary(self) -> TaskSummary:
return self._summary
def summarize(self) -> None:
self._summary.print()
# ============================================================================ #
# DATASET FACTORY #
# ============================================================================ #
| 33.053381
| 100
| 0.463932
|
e34fb4cf8e7da5cda9892155346f681567a85054
| 304
|
py
|
Python
|
matroids/matroid.py
|
Aasfga/matroids-library
|
468f6fdc4b0c0e93346dba7365fae0fc6993f9cf
|
[
"MIT"
] | null | null | null |
matroids/matroid.py
|
Aasfga/matroids-library
|
468f6fdc4b0c0e93346dba7365fae0fc6993f9cf
|
[
"MIT"
] | null | null | null |
matroids/matroid.py
|
Aasfga/matroids-library
|
468f6fdc4b0c0e93346dba7365fae0fc6993f9cf
|
[
"MIT"
] | null | null | null |
from typing import Set
from numpy import ndarray
| 19
| 45
| 0.671053
|
e350ce9086d7c563b5e1154ba5f38a8024e85d87
| 779
|
py
|
Python
|
inconnu/traits/traitcommon.py
|
tiltowait/inconnu
|
6cca5fed520899d159537701b695c94222d8dc45
|
[
"MIT"
] | 4
|
2021-09-06T20:18:13.000Z
|
2022-02-05T17:08:44.000Z
|
inconnu/traits/traitcommon.py
|
tiltowait/inconnu
|
6cca5fed520899d159537701b695c94222d8dc45
|
[
"MIT"
] | 7
|
2021-09-13T00:46:57.000Z
|
2022-01-11T06:38:50.000Z
|
inconnu/traits/traitcommon.py
|
tiltowait/inconnu
|
6cca5fed520899d159537701b695c94222d8dc45
|
[
"MIT"
] | 2
|
2021-11-27T22:24:53.000Z
|
2022-03-16T21:05:00.000Z
|
"""traits/traitcommon.py - Common functionality across trait operations."""
import re
from ..constants import UNIVERSAL_TRAITS
VALID_TRAIT_PATTERN = re.compile(r"^[A-z_]+$")
def validate_trait_names(*traits):
"""
Raises a ValueError if a trait doesn't exist and a SyntaxError
if the syntax is bad.
"""
for trait in traits:
if (trait_len := len(trait)) > 20:
raise ValueError(f"`{trait}` is too long by {trait_len - 20} characters.")
if trait.lower() in UNIVERSAL_TRAITS:
raise SyntaxError(f"`{trait}` is a reserved trait and cannot be added/updated/deleted.")
if VALID_TRAIT_PATTERN.match(trait) is None:
raise SyntaxError(f"Traits can only have letters and underscores. Received `{trait}`.")
| 33.869565
| 100
| 0.671374
|
e355aa3c3d4c58a325cb59719ca07b7c1a10df4b
| 2,293
|
py
|
Python
|
docker_ml_templates/simple_batch_model/container/src/tests/test_model.py
|
MadMedian/ubik
|
d8dabf0a26db1e35c653b23facb5045f2ae7bf0d
|
[
"Apache-2.0"
] | null | null | null |
docker_ml_templates/simple_batch_model/container/src/tests/test_model.py
|
MadMedian/ubik
|
d8dabf0a26db1e35c653b23facb5045f2ae7bf0d
|
[
"Apache-2.0"
] | null | null | null |
docker_ml_templates/simple_batch_model/container/src/tests/test_model.py
|
MadMedian/ubik
|
d8dabf0a26db1e35c653b23facb5045f2ae7bf0d
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from ..model import RandomForestWithFeatureSelection
from sklearn.model_selection import train_test_split
import os
import numpy as np
if __name__ == '__main__':
unittest.main()
| 39.534483
| 113
| 0.69167
|
e357ec80e01c5cb1d929b33f8d9bbb4379d90eae
| 43,051
|
py
|
Python
|
DataAnalysis.py
|
ben-dent/Contract-Cheating-Analysis
|
28999b5ac73dbb6f4a65ef3d8f8dd4db677c42df
|
[
"MIT"
] | null | null | null |
DataAnalysis.py
|
ben-dent/Contract-Cheating-Analysis
|
28999b5ac73dbb6f4a65ef3d8f8dd4db677c42df
|
[
"MIT"
] | null | null | null |
DataAnalysis.py
|
ben-dent/Contract-Cheating-Analysis
|
28999b5ac73dbb6f4a65ef3d8f8dd4db677c42df
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt;
plt.rcdefaults()
import csv
import sqlite3 as lite
from calendar import monthrange
from datetime import datetime, date, timedelta
from datetimerange import DateTimeRange
import numpy as np
import pycountry_convert as pc
from dateutil.relativedelta import relativedelta
from forex_python.converter import CurrencyRates, RatesNotAvailableError
import random
import pandas as pd
DATABASE_NAME = 'JobDetails.db'
con = lite.connect(DATABASE_NAME)
cur = con.cursor()
bidNames = ["Bid ID", "Job ID", "Country", "User", "Price", "Currency"]
jobNames = ["Job ID", "URL", "Title", "Description", "Tags", "Number Of Bidders", "Average Bid Cost", "Final Cost",
"Currency", "Time", "Converted Final Cost", "Country Of Poster", "Country Of Winner", "Year", "Week",
"Date Range", "Category", "Score", "Positive Matches", "Negative Matches", "Attachment", "Category Type Two", "Possible Months"]
reviewJobNames = ["Job ID", "URL", "Title", "Description", "Tags", "Number Of Bidders", "Average Bid Cost", "Final Cost",
"Currency", "Time", "Converted Final Cost", "Country Of Poster", "Country Of Winner", "Date Scraped",
"Time Ago", "Date Range", "Category", "Score", "Positive Matches", "Negative Matches", "Attachment",
"Possible Years", "Category Type Two", "Possible Months"]
profileNames = ["Profile ID", "Username", "Number Of Reviews", "Average Review", "Hourly Rate",
"Earnings Percentage",
"Country"]
qualificationNames = ["Qualification ID", "Qualification Type", "User", "Qualification Name", "Extra Information"]
reviewNames = ["Review ID", "Project URL", "Profile", "Score", "Amount Paid", "Currency", "Converted Currency",
"Date Scraped", "Date", "Country", "Notes", "Date Range", "Possible Months", "Possible Years"]
winnerNames = ["Job ID", "Job URL", "Username", "Profile URL"]
names = {"Bids": bidNames, "Jobs": jobNames, "JobsHourly": jobNames, "ReviewJobs": reviewJobNames, "Profiles": profileNames,
"Qualifications": qualificationNames, "Reviews": reviewNames, "Winners": winnerNames}
# Converts the currency to USD at the historic rate
# Retrieves saved details to plot
# Generates multiple windows of bar charts to display the countries of bidders - grouped by continent
# Saving values from the database to CSV files
# def doExtras():
# # doAverages()
# # jobConversions()
# # reviewJobConversions()
# # conversions()
# # getDateRanges()
# # possibleYears()
# # plotYears('Projects')
# doExtras()
# avConversions()
| 31.016571
| 184
| 0.551207
|
e359de552a30d24e6371b5e1ad922405353576ab
| 1,733
|
py
|
Python
|
deploy_flask_plotly/app.py
|
mohamedsaadmoustafa/Arabic_Dialect_Classification
|
a13e92ddaa8fda5afcc40d1ce97946174f9a4674
|
[
"BSD-3-Clause"
] | null | null | null |
deploy_flask_plotly/app.py
|
mohamedsaadmoustafa/Arabic_Dialect_Classification
|
a13e92ddaa8fda5afcc40d1ce97946174f9a4674
|
[
"BSD-3-Clause"
] | null | null | null |
deploy_flask_plotly/app.py
|
mohamedsaadmoustafa/Arabic_Dialect_Classification
|
a13e92ddaa8fda5afcc40d1ce97946174f9a4674
|
[
"BSD-3-Clause"
] | 1
|
2022-03-14T19:41:57.000Z
|
2022-03-14T19:41:57.000Z
|
from flask import Flask, render_template, request, jsonify
import numpy as np
import pickle
import sys
import json
import re
app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False
target_names = [
'AE', 'BH', 'DZ',
'EG', 'IQ', 'JO',
'KW', 'LB', 'LY',
'MA', 'OM','PL',
'QA', 'SA', 'SD',
'SY', 'TN', 'YE'
]
arabic_dialects = {
'AE': ' ', 'BH': ' ', 'DZ': ' ', 'EG': ' ', 'IQ': ' ',
'JO': ' ', 'KW': ' ', 'LB': ' ', 'LY': ' ', 'MA': ' ',
'OM': ' ', 'PL': ' ', 'QA': ' ', 'SA': ' ', 'SD': ' ',
'SY': ' ', 'TN': ' ', 'YE': ' '
}
if __name__ == '__main__':
app.run(debug=True)
| 27.507937
| 112
| 0.562031
|
e359e595d02499d12ce9088ccf34ac138ffada36
| 384
|
py
|
Python
|
2018/2/hash.py
|
octonion/adventofcode
|
132e8bf0c9bc0ad64a0e12e22170177df4947e37
|
[
"MIT"
] | 1
|
2019-01-10T09:43:34.000Z
|
2019-01-10T09:43:34.000Z
|
2018/2/hash.py
|
octonion/adventofcode
|
132e8bf0c9bc0ad64a0e12e22170177df4947e37
|
[
"MIT"
] | null | null | null |
2018/2/hash.py
|
octonion/adventofcode
|
132e8bf0c9bc0ad64a0e12e22170177df4947e37
|
[
"MIT"
] | null | null | null |
data = [i.strip() for i in open("input.txt").readlines()]
two = 0
three = 0
for code in data:
counts = {}
for i in range(0,len(code)):
if code[i] in counts.keys():
counts[code[i]] += 1
else:
counts[code[i]] = 1
if (2 in counts.values()):
two += 1
if (3 in counts.values()):
three += 1
print(two*three)
| 21.333333
| 57
| 0.492188
|
e359f824dde6ff522819969499136201763f90fa
| 322
|
py
|
Python
|
conf_site/speakers/tests/factories.py
|
jasongrout/conf_site
|
6b3beb21de8d847cba65dcb6da84464b40739d48
|
[
"MIT"
] | 13
|
2015-05-22T17:10:22.000Z
|
2021-07-15T16:45:19.000Z
|
conf_site/speakers/tests/factories.py
|
jasongrout/conf_site
|
6b3beb21de8d847cba65dcb6da84464b40739d48
|
[
"MIT"
] | 758
|
2015-03-18T13:39:25.000Z
|
2022-03-31T13:14:09.000Z
|
conf_site/speakers/tests/factories.py
|
jasongrout/conf_site
|
6b3beb21de8d847cba65dcb6da84464b40739d48
|
[
"MIT"
] | 16
|
2015-03-24T18:53:17.000Z
|
2020-10-22T21:30:02.000Z
|
# -*- coding: utf-8 -*-
import factory
from symposion.speakers.models import Speaker
from conf_site.accounts.tests.factories import UserFactory
| 21.466667
| 58
| 0.73913
|
e35a2abebd28f1f938e6001756592d76df4ec548
| 179
|
py
|
Python
|
pokediadb/dbuilder/__init__.py
|
Kynarth/pokediadb
|
97d981909803335f878b9e07ed31d86fc93e7941
|
[
"MIT"
] | null | null | null |
pokediadb/dbuilder/__init__.py
|
Kynarth/pokediadb
|
97d981909803335f878b9e07ed31d86fc93e7941
|
[
"MIT"
] | null | null | null |
pokediadb/dbuilder/__init__.py
|
Kynarth/pokediadb
|
97d981909803335f878b9e07ed31d86fc93e7941
|
[
"MIT"
] | null | null | null |
# flake8: noqa
import pokediadb.dbuilder.version
import pokediadb.dbuilder.type
import pokediadb.dbuilder.ability
import pokediadb.dbuilder.move
import pokediadb.dbuilder.pokemon
| 25.571429
| 33
| 0.860335
|
e35b469d7625c1fa8f422ae121b4eaab1ed606da
| 10,171
|
py
|
Python
|
origin/app_bb_modifier.py
|
nukeguys/myutil
|
65d0aff36ec45bffbd2e52fea0fabfbabd5609b1
|
[
"Apache-2.0"
] | null | null | null |
origin/app_bb_modifier.py
|
nukeguys/myutil
|
65d0aff36ec45bffbd2e52fea0fabfbabd5609b1
|
[
"Apache-2.0"
] | null | null | null |
origin/app_bb_modifier.py
|
nukeguys/myutil
|
65d0aff36ec45bffbd2e52fea0fabfbabd5609b1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import sys
import os
import io
from orderedset import OrderedSet
from shell import Shell
import logpath as LogPath
VERSION = '1.1'
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
current_path = os.getcwd()
current_meta = ''
if current_path.endswith('meta-signage') == True:
current_meta = 'signage'
elif current_path.endswith('meta-commercial') == True:
current_meta = 'commercial'
elif current_path.endswith('meta-id') == True:
current_meta = 'id'
else:
print('You should execute this file in [%smeta-id, meta-commercial, meta-signage%s] path' % (WARNING, ENDC))
exit()
if __name__ == '__main__':
generater = GeneratorLog()
generater.parseLog()
generater.makeLog()
InputHelper().process(generater.listLogs)
| 39.730469
| 125
| 0.534756
|
e35b94ca7170c796ccad0fbd61ea4ee542cd52e0
| 3,514
|
py
|
Python
|
gamebike/controlmapbits.py
|
johnlpage/gamebike
|
429736d0238dca2961763f2a33d8e4e72ed97364
|
[
"Apache-2.0"
] | null | null | null |
gamebike/controlmapbits.py
|
johnlpage/gamebike
|
429736d0238dca2961763f2a33d8e4e72ed97364
|
[
"Apache-2.0"
] | null | null | null |
gamebike/controlmapbits.py
|
johnlpage/gamebike
|
429736d0238dca2961763f2a33d8e4e72ed97364
|
[
"Apache-2.0"
] | null | null | null |
# These were used when I was trying to map between controllers
# To map to a wheel - but was defeated in that by using a driver
# 2021 comment (What did I mean there?)
GAMEPAD_TRIANGLE = (0, 0x08)
GAMEPAD_CIRCLE = (0, 0x04)
GAMEPAD_CROSS = (0, 0x02)
GAMEPAD_SQUARE = (0, 0x01)
GAMEPAD_DPAD_MASK = 0x0F
GAMEPAD_DPAD_NONE = (2, 0x0F)
GAMEPAD_DPAD_U = (2, 0x00)
GAMEPAD_DPAD_R = (2, 0x02)
GAMEPAD_DPAD_D = (2, 0x04)
GAMEPAD_DPAD_L = (2, 0x06)
GAMEPAD_PSMENU = (1, 0x10)
GAMEPAD_SELECT = (1, 0x01)
GAMEPAD_START = (1, 0x02)
GAMEPAD_LJOY_BUTTON = (1, 0x04)
GAMEPAD_RJOY_BUTTON = (1, 0x08)
GAMEPAD_L1 = (0, 0x10)
GAMEPAD_R1 = (0, 0x20)
GAMEPAD_L2 = (0, 0x40)
GAMEPAD_R2 = (0, 0x80)
GAMEPAD_RTRIGGER = 18
GAMEPAD_LTRIGGER = 17
# These are Bytes not Bits
GAMEPAD_LJOY_X = 3
GAMEPAD_LJOY_Y = 4
GAMEPAD_RJOY_X = 5
GAMEPAD_RJOY_Y = 6
CLICKER_BUTTONS = 2
CLICKER_LEFT = [0x4B]
CLICKER_RIGHT = [0x4E]
CLICKER_UP = [0x05]
CLICKER_DOWN = [0x3E, 0x29] # Toggles
STEER_MIN = 0x0000
STEER_MAX = 0x3FFF
STEER_MID = 0x1FFF
WHEEL_NEUTRAL = [0x08, 0x00, 0x00, 0x5E, 0x00, 0x20, 0x7F, 0xFF]
WHEEL_TRIANGLE = (0, 0x80)
WHEEL_CIRCLE = (0, 0x40)
WHEEL_CROSS = (0, 0x10)
WHEEL_SQUARE = (0, 0x20)
WHEEL_DPAD_MASK = 0x0F
WHEEL_DPAD_NONE = (0, 0x08)
WHEEL_DPAD_U = (0, 0x00)
WHEEL_DPAD_R = (0, 0x02)
WHEEL_DPAD_D = (0, 0x04)
WHEEL_DPAD_L = (0, 0x06)
WHEEL_RPADDLE = (1, 0x01)
WHEEL_LPADDLE = (1, 0x02)
WHEEL_L1 = (1, 0x80)
WHEEL_L2 = (1, 0x08)
WHEEL_R1 = (1, 0x40)
WHEEL_R2 = (1, 0x04)
WHEEL_SELECT = (1, 0x10)
WHEEL_START = (1, 0x20)
WHEEL_PSMENU = (2, 0x08)
WHEEL_GEARUP = (2, 0x01)
WHEEL_GEARDOWN = (2, 0x02)
WHEEL_BACK = (2, 0x04)
WHEEL_ADJUST_CLOCKWISE = (2, 0x10)
WHEEL_ADJUST_ANTICLOCKWISE = (2, 0x20)
WHEEL_PLUS = (2, 0x80)
WHEEL_MINUS = (2, 0x40)
# Bytes
WHEEL_WHEEL_HIGHBYTE = 5
WHEEL_WHEEL_LOWBYTE = 4 # 0000-EFF3 But 0000 is extreme
WHEEL_ACCELERATEBYTE = 6 # 0-FF 0 IS DOWN
WHEEL_BRAKEBYTE = 7 # 0-FF 0 IS DOWN
# (FromByte,From Bit) -> (ToByte,ToBit)
# Wheel Has dedicated Gear buttons and Shifter that arent on the controller
# Stick Click is not used in TDU2 at all so will use that
BUTTON_MAPPINGS = [
(GAMEPAD_TRIANGLE, WHEEL_TRIANGLE),
(GAMEPAD_CIRCLE, WHEEL_CIRCLE),
(GAMEPAD_SQUARE, WHEEL_SQUARE),
(GAMEPAD_CROSS, WHEEL_CROSS),
(GAMEPAD_R1, WHEEL_R2),
(GAMEPAD_L1, WHEEL_L2),
(GAMEPAD_PSMENU, WHEEL_PSMENU),
(GAMEPAD_START, WHEEL_START),
(GAMEPAD_SELECT, WHEEL_SELECT),
(GAMEPAD_LJOY_BUTTON, WHEEL_GEARDOWN),
(GAMEPAD_RJOY_BUTTON, WHEEL_GEARUP),
]
#These made it work in PS3 menu screen
XMB_BUTTON_MAPPINGS = [
(GAMEPAD_TRIANGLE, WHEEL_TRIANGLE),
(GAMEPAD_CIRCLE, WHEEL_CIRCLE),
(GAMEPAD_CROSS, WHEEL_SQUARE),
(GAMEPAD_SQUARE, WHEEL_CROSS),
(GAMEPAD_R1, WHEEL_R2),
(GAMEPAD_L1, WHEEL_L2),
(GAMEPAD_PSMENU, WHEEL_PSMENU),
(GAMEPAD_START, WHEEL_START),
(GAMEPAD_SELECT, WHEEL_SELECT),
(GAMEPAD_LJOY_BUTTON, WHEEL_GEARDOWN),
(GAMEPAD_RJOY_BUTTON, WHEEL_GEARUP),
]
DPAD_MAPPINGS = [
(GAMEPAD_DPAD_NONE, WHEEL_DPAD_NONE),
(GAMEPAD_DPAD_U, WHEEL_DPAD_U),
(GAMEPAD_DPAD_D, WHEEL_DPAD_D),
(GAMEPAD_DPAD_L, WHEEL_DPAD_L),
(GAMEPAD_DPAD_R, WHEEL_DPAD_R),
]
STEAM_BUTTON_MAPPINGS = [
WHEEL_CROSS,WHEEL_CIRCLE,WHEEL_TRIANGLE,WHEEL_SQUARE,
WHEEL_START,WHEEL_PSMENU,WHEEL_SELECT,
WHEEL_GEARUP,WHEEL_GEARDOWN,WHEEL_L1,WHEEL_R1
]
STEAM_BUTTONS2_MAPPINGS = [WHEEL_LPADDLE,WHEEL_RPADDLE,WHEEL_PLUS,WHEEL_MINUS]
STEAM_DPAD_MAPPINGS = [ WHEEL_DPAD_U,WHEEL_DPAD_L,WHEEL_DPAD_D,WHEEL_DPAD_R]
| 24.746479
| 79
| 0.726807
|
e35c3059ee54c88a4dcfc9000cdd58922b31c667
| 4,185
|
py
|
Python
|
conveyordashboard/volumes/tables.py
|
Hybrid-Cloud/birdie-dashboard
|
a3dc370c50ef9f33498a8ed4180ff5009532a79f
|
[
"Apache-2.0"
] | null | null | null |
conveyordashboard/volumes/tables.py
|
Hybrid-Cloud/birdie-dashboard
|
a3dc370c50ef9f33498a8ed4180ff5009532a79f
|
[
"Apache-2.0"
] | null | null | null |
conveyordashboard/volumes/tables.py
|
Hybrid-Cloud/birdie-dashboard
|
a3dc370c50ef9f33498a8ed4180ff5009532a79f
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2017 Huawei, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.template import defaultfilters as filters
from django.utils.translation import pgettext_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import tables
from conveyordashboard.common import actions as common_actions
from conveyordashboard.common import constants as consts
from conveyordashboard.common import resource_state
def get_size(volume):
return _("%sGiB") % volume.size
def get_volume_type(volume):
return volume.volume_type if volume.volume_type != "None" else None
def get_encrypted_value(volume):
if not hasattr(volume, 'encrypted') or volume.encrypted is None:
return _("-")
elif volume.encrypted is False:
return _("No")
else:
return _("Yes")
| 35.769231
| 78
| 0.61601
|
e35f9878c2b4e671e6f25e427a7cdba3e0466f0b
| 9,046
|
py
|
Python
|
tools/tver/tver.py
|
jackyzy823/restrictionbreaker
|
1aabce0c98c50782a592fa6c91abd72e82e59a6a
|
[
"Unlicense"
] | 5
|
2019-05-29T21:34:34.000Z
|
2021-07-25T10:58:57.000Z
|
tools/tver/tver.py
|
jackyzy823/restrictionbreaker
|
1aabce0c98c50782a592fa6c91abd72e82e59a6a
|
[
"Unlicense"
] | null | null | null |
tools/tver/tver.py
|
jackyzy823/restrictionbreaker
|
1aabce0c98c50782a592fa6c91abd72e82e59a6a
|
[
"Unlicense"
] | 1
|
2020-06-25T14:14:41.000Z
|
2020-06-25T14:14:41.000Z
|
import requests
import re
import sqlite3
db =sqlite3.connect("db_tver.db",check_same_thread =False)
cur = db.cursor()
cur.execute(
'''CREATE TABLE if not exists `tver` (
`reference_id` TEXT NOT NULL,
`service` TEXT NOT NULL,
`player_id` TEXT NOT NULL,
`name` TEXT NOT NULL,
`title` TEXT,
`subtitle` TEXT,
`catchup_id` TEXT,
`url` TEXT,
`service_name` TEXT,
`id` TEXT NOT NULL,
`json` TEXT,
`updated_at` TIMESTAMP,
`done` BOOL,
UNIQUE (reference_id,player_id,id)
);''')
'''
/corner/
/episode/
/feature/
'''
pagepattern = re.compile(r'''addPlayer\(\s*?'(?P<player_id>.*?)',\s*?'(?P<player_key>.*?)',\s*?'(?P<catchup_id>.*?)',\s*?'(?P<publisher_id>.*?)',\s*?'(?P<reference_id>.*?)',\s*?'(?P<title>.*?)',\s*?'(?P<subtitle>.*?)',\s*?'(?P<service>.*?)',\s*?'(?P<servicename>.*?)',''')
policykeypattern = re.compile(r'''catalog\(\{accountId:\"?(?P<accountId>.*?)\"?,policyKey:\"(?P<policyKey>.*?)\"''')
BCOV_POLICY = {
#YTV
"5330942432001":"BCpkADawqM0kGrWxZoXJvJj5Uv6Lypjp4Nrwzz1ktDAuEbD1r_pj0oR1900CRG04FFkxo0ikc1_KmAlB4uvq_GnFwF4IsG_v9jhYOMajC9MkdVQ-QrpboS7vFV8RvK20V5v-St5WGPfXotPx",
#TX
"3971130137001":"BCpkADawqM1F2YPxbuFJzWtohXjxdgDgIJcsnWacQKaAuaf0gyu8yxCQUlca9Dh7V0Uu_8Rt5JUWZTpgcqzD_IT5hRVde8JIR7r1UYR73ne8S9iLSroqTOA2P-jtl2EUw_OrSMAtenvuaXRF",
#TBS
"4031511847001":"BCpkADawqM1n_azNkrwm-kl2UhijTLt4W7KZ6KS9HluAoLPvyRFu2X4Xu2dUuW-lLOmc6X7WjsiBwh83m8ecNmxl-pVy9w3M9iI6-en-_wIDvNJixpoMf4BhdOPtwO_7XIol9P3wVrq2BIzw",
"4394098881001":"BCpkADawqM3m-3484dphPL5raj3jQJVlFecOYAvpxhtJaK99BVRKtxd9SC6q0kOsknI1FD3kplVUaJzneAQb55EkCcDHrD9m_yoesmjsIfJpKQXJKfmQ5LfAFJnmf2Sv48heP_R1PGznwbAn",
#NTV
"4394098882001":"BCpkADawqM1s6XkqRoC2a0eEORY7FFF780eHkHQZ93Fw752A9swymrSMZEVF1d7G3mSby3Etzj8MGJp_ZwXpbSTH1ApfZxZ1FSPQ4LXDQhpaMRADtCbxKFTpAxGYwN61DYKKksmg4uwcdhLD",
#MBS
"5102072605001":"BCpkADawqM1VhDl0FtgrrM8jB-hVNkcrdrx4x9C_60OSeN4jIHynGkIKw0PY1cOsRqQYJOnJRscPAbdPTcpzZ_4g89Gcte_yQFW-yeWxzrPECulIh9ZlaZsJ_3rH7Gjs_RnuWHx_lTzilaxh",
#KTV
"5718741494001":"BCpkADawqM1llDtMelQ9nQyE91bAc-E5T1B0135MCCRZ_o4FlDkGWQY8t8Nrt1fJKAgui-kLefX-JGaRItrDXh_C1GlIgCSv-rhNPQYKJsY8nZp_IoJ38Mf3B5BSJLFulW0QhgQduipc9j4D",
#EX no publisherid
"4031511847001":"BCpkADawqM2N0e6IdrmQn-kEZJ0jRi-Dlm0aUZ9mVF2lcadunJzMVYD6j_51UZzQ3mXuIeV8Zx_UUvbGeeJn73SSrpm0xD7qtiKULPP2NEsp_rgKoVxVWTNZAHN-JAHcuIpFJT7PvUj6gpZv",
#ABC
"5102072603001":"BCpkADawqM2NfzEA47jZiJNK0SYahFenNwAtoehfrIAaCqxmHjBidnt_YfvFnp5j-Zi58FPj-zXAHATYU1nnOOuEf9XXV8JRGYSuZ5dgyNc2RjGv2Ej5zGfhxWs3_p4F7huxtbAD9fzQlg7b",
#World cup
"5764318572001":"BCpkADawqM3KJLCLszoqY9KsoXN2Mz52LwKx4UXYRuEaUGr-o3JBSHmz_0WRicxowBj8vmbGRK_R7Us96DdBYuYEoVX9nHJ3DjkVW5-8L6bRmm6gck8IaeLLw21sM6mOHtNs9pIJPF6a4qSZlO6t_RlkpMY6sasaIaSYlarJ_8PFMPdxxfY6cGtJDnc"
}
linkPattern = re.compile(r'''(\/episode\/.*?)\/?\"|(\/corner\/.*?)\/?\"|(\/feature\/.*?)\/?\"''')
findAll()
findAllByBrand()
# updateJson()
| 41.118182
| 272
| 0.624254
|
e360941b07ce2d49e4d682a79c218a27dc642b96
| 1,696
|
py
|
Python
|
tests/test_init.py
|
nuvolos-cloud/resolos
|
0918066cab7b11ef04ae005f3e052b14a65ded68
|
[
"MIT"
] | 1
|
2021-11-30T06:47:24.000Z
|
2021-11-30T06:47:24.000Z
|
tests/test_init.py
|
nuvolos-cloud/resolos
|
0918066cab7b11ef04ae005f3e052b14a65ded68
|
[
"MIT"
] | 1
|
2021-04-08T12:56:39.000Z
|
2021-04-08T12:56:39.000Z
|
tests/test_init.py
|
nuvolos-cloud/resolos
|
0918066cab7b11ef04ae005f3e052b14a65ded68
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from click.testing import CliRunner
from resolos.interface import res, res_run
from resolos.shell import run_shell_cmd
from tests.common import verify_result
import logging
logger = logging.getLogger(__name__)
| 34.612245
| 108
| 0.595519
|
e3619a35a7b1327801ed3520deafd02b23723cdc
| 946
|
py
|
Python
|
performence/get_flops.py
|
idealboy/mmsr
|
f8284e9fb977a74db1904e6034b768805845e138
|
[
"Apache-2.0"
] | 2
|
2020-08-05T05:13:14.000Z
|
2020-11-10T03:37:48.000Z
|
performence/get_flops.py
|
idealboy/mmsr
|
f8284e9fb977a74db1904e6034b768805845e138
|
[
"Apache-2.0"
] | null | null | null |
performence/get_flops.py
|
idealboy/mmsr
|
f8284e9fb977a74db1904e6034b768805845e138
|
[
"Apache-2.0"
] | null | null | null |
import os
import sys
op_name = []
with open("name.txt") as lines:
for line in lines:
line = line.strip()
op_name.append(line)
with open("shape.txt") as lines:
index = 0
for line in lines:
name = op_name[index]
line = line.strip()
items = line.split("\t")
if "conv" in name:
input_shape = [int(s) for s in items[0].split("#")[0].split("[")[1].split("]")[0].split(",")]
weight_shape = [int(s) for s in items[0].split("#")[1].split("[")[1].split("]")[0].split(",")]
output_shape = [int(s) for s in items[1].split("[")[1].split("]")[0].split(",")]
flops = output_shape[0] * output_shape[1] * output_shape[2] * output_shape[3] * weight_shape[0] * weight_shape[1] * weight_shape[2] * 2
elif "add" in name:
output_shape = [int(s) for s in items[1].split("[")[1].split("]")[0].split(",")]
flops = output_shape[0] * output_shape[1] * output_shape[2] * output_shape[3]
else:
flops = 0
print flops
index+=1
| 32.62069
| 138
| 0.605708
|
e3621a99c91f66bf8b690df838e64f5433dfeefe
| 6,086
|
py
|
Python
|
pan/mesh_bee_wrapper.py
|
KillingJacky/CloudPan
|
128f438b8d84c734aad94ad3e03f6c3aa12b66af
|
[
"MIT"
] | 1
|
2016-08-29T14:28:45.000Z
|
2016-08-29T14:28:45.000Z
|
pan/mesh_bee_wrapper.py
|
KillingJacky/CloudPan
|
128f438b8d84c734aad94ad3e03f6c3aa12b66af
|
[
"MIT"
] | null | null | null |
pan/mesh_bee_wrapper.py
|
KillingJacky/CloudPan
|
128f438b8d84c734aad94ad3e03f6c3aa12b66af
|
[
"MIT"
] | null | null | null |
# Wrapper for Mesh Bee library
# helping to easier communicate with Mesh Bee module
#
# Copyright (C) 2014 at seeedstudio
# Author: Jack Shao (jacky.shaoxg@gmail.com)
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import re
import glob
import binascii
import logging
from pan.mesh_bee import *
from factory import Factory
Factory.register(MeshBeeWrapper)
| 34.977011
| 141
| 0.565067
|
e36654005301b9cf41913be091e578a74c259424
| 1,669
|
py
|
Python
|
pythonapm/collector/test_reqhandler.py
|
nextapm/pythonapm
|
ddd8ad374e4f268516fc81f0bf710206565b737e
|
[
"FTL"
] | null | null | null |
pythonapm/collector/test_reqhandler.py
|
nextapm/pythonapm
|
ddd8ad374e4f268516fc81f0bf710206565b737e
|
[
"FTL"
] | null | null | null |
pythonapm/collector/test_reqhandler.py
|
nextapm/pythonapm
|
ddd8ad374e4f268516fc81f0bf710206565b737e
|
[
"FTL"
] | null | null | null |
import unittest
import json
import requests
from unittest import mock
from .reqhandler import send_req
from pythonapm.agent import Agent
from pythonapm import constants
| 37.931818
| 233
| 0.711803
|
e366e2fcb39a47a49999de24a158d9a70e017103
| 277
|
py
|
Python
|
app/admin.py
|
hbuiOnline/AMS
|
d9118aee7b5ddd90d54bf4cf7f5cdd11c8e4a511
|
[
"MIT"
] | null | null | null |
app/admin.py
|
hbuiOnline/AMS
|
d9118aee7b5ddd90d54bf4cf7f5cdd11c8e4a511
|
[
"MIT"
] | null | null | null |
app/admin.py
|
hbuiOnline/AMS
|
d9118aee7b5ddd90d54bf4cf7f5cdd11c8e4a511
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from .models import * # To import all the model from .models, then specify those in register
admin.site.register(Customer)
admin.site.register(Staff)
admin.site.register(Service)
admin.site.register(Appointment)
| 27.7
| 93
| 0.794224
|
e36726ae66238ed268d95c0acb72decccf95ea5d
| 17,263
|
py
|
Python
|
static/brythonlib/cs1robots/worlds_data.py
|
pythonpad/vue-pythonpad-runner
|
52decba9607b3b7b050ee0bf6dd4ef07ae644587
|
[
"MIT"
] | 3
|
2021-01-26T16:18:45.000Z
|
2021-09-15T00:57:12.000Z
|
static/brythonlib/cs1robots/worlds_data.py
|
pythonpad/vue-pythonpad-runner
|
52decba9607b3b7b050ee0bf6dd4ef07ae644587
|
[
"MIT"
] | null | null | null |
static/brythonlib/cs1robots/worlds_data.py
|
pythonpad/vue-pythonpad-runner
|
52decba9607b3b7b050ee0bf6dd4ef07ae644587
|
[
"MIT"
] | 2
|
2021-01-26T16:18:47.000Z
|
2021-10-21T20:45:20.000Z
|
worlds_data = {
'around': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(1, 9): 1, (2, 10): 1, (8, 10): 1, (10, 10): 1, (9, 10): 1, (5, 10): 1, (10, 8): 1, (10, 4): 1, (10, 1): 1, (8, 1): 1, (7, 1): 1, (6, 1): 1, (5, 1): 1, (3, 1): 1, (1, 6): 1, (1, 5): 1, (1, 3): 1}},
'around2': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(2, 1): 2, (3, 1): 3, (5, 1): 2, (7, 1): 1, (10, 1): 1, (10, 4): 3, (10, 3): 1, (10, 7): 2, (10, 6): 1, (10, 10): 4, (10, 9): 3, (9, 10): 1, (7, 10): 2, (5, 10): 1, (4, 10): 1, (3, 10): 1, (2, 10): 1, (1, 10): 2, (1, 8): 1, (1, 6): 4, (1, 5): 1, (1, 3): 3, (1, 2): 1}},
'around3': {'avenues': 6, 'streets': 6, 'walls': [], 'beepers': {(2, 1): 2, (3, 1): 1, (6, 1): 1, (6, 2): 3, (6, 3): 1, (6, 6): 2, (4, 6): 3, (1, 6): 1, (1, 4): 2, (1, 3): 1, (1, 2): 1}},
'cave': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (4, 1), (5, 4), (2, 5), (3, 6), (5, 6), (6, 3), (6, 1), (8, 1), (8, 3), (9, 4), (10, 3), (11, 2), (1, 8), (3, 8), (5, 8), (7, 8), (8, 7), (14, 1), (14, 3), (13, 4), (11, 6), (12, 7), (13, 8), (14, 7), (14, 5), (9, 8)], 'beepers': {(6, 5): 1}},
'cave2': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (4, 1), (4, 3), (4, 5), (3, 6), (1, 8), (3, 8), (5, 8), (6, 7), (7, 8), (9, 8), (10, 7), (9, 6), (8, 5), (8, 1), (10, 1), (10, 3), (7, 4), (6, 3)], 'beepers': {(6, 3): 1}},
'cave3': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (1, 4), (5, 2), (6, 1), (3, 4), (5, 6), (3, 6), (2, 5), (6, 3), (7, 6), (8, 5), (8, 1), (9, 2), (12, 1), (12, 3), (12, 5), (9, 4), (12, 7), (11, 8), (11, 6), (9, 8), (7, 8), (5, 8), (3, 8)], 'beepers': {(1, 5): 4, (2, 2): 2, (3, 3): 3, (4, 2): 1, (6, 2): 1, (5, 4): 1, (1, 4): 3}},
'cave4': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (1, 4), (3, 2), (5, 2), (3, 4), (5, 6), (6, 5), (7, 4), (8, 3), (8, 1), (2, 5), (1, 8), (3, 8), (5, 8), (7, 8), (9, 8), (9, 6), (10, 5), (11, 8), (12, 7), (12, 5), (11, 4), (12, 1), (10, 3)], 'beepers': {(3, 2): 1, (2, 4): 3, (4, 4): 3, (7, 2): 4}},
'chimney': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (2, 5), (2, 7), (2, 9), (2, 11), (4, 11), (4, 9), (4, 7), (4, 5), (4, 3), (3, 12), (5, 2), (6, 3), (6, 5), (7, 6), (8, 5), (8, 3), (9, 2), (11, 2), (12, 3), (12, 5), (12, 7), (12, 9), (13, 10), (14, 9), (14, 7), (14, 5), (14, 3), (15, 2), (16, 3), (16, 5), (16, 7), (16, 9), (16, 11), (16, 13), (16, 15), (17, 16), (18, 15), (18, 13), (18, 11), (18, 9), (18, 7), (18, 5), (18, 3), (19, 2)], 'beepers': {(2, 6): 1, (2, 5): 1, (2, 4): 2, (2, 2): 1, (9, 7): 1, (9, 5): 2, (9, 4): 3, (4, 3): 5, (7, 2): 1, (7, 4): 3, (7, 3): 1, (7, 5): 1}},
'chimney2': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (2, 5), (2, 7), (3, 8), (4, 7), (4, 5), (4, 3), (4, 9), (4, 11), (4, 13), (4, 15), (5, 16), (6, 15), (6, 13), (6, 11), (6, 9), (6, 7), (6, 5), (6, 3), (7, 2), (8, 3), (10, 3), (11, 2), (13, 2), (14, 3), (16, 3), (18, 3), (17, 2), (18, 5), (18, 7), (18, 9), (18, 11), (18, 13), (18, 15), (19, 16), (15, 4), (8, 5), (10, 5), (10, 11), (9, 12), (8, 11), (8, 9), (10, 9), (10, 7), (8, 7)], 'beepers': {(3, 8): 2, (8, 2): 3, (2, 3): 2, (2, 4): 1, (3, 3): 3, (3, 2): 2, (3, 5): 3, (3, 6): 1, (5, 2): 2, (5, 6): 1, (10, 7): 2}},
'chimney3': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (2, 5), (2, 7), (3, 8), (4, 7), (4, 5), (4, 3), (4, 9), (4, 11), (5, 12), (6, 11), (6, 9), (6, 7), (6, 5), (6, 3), (7, 2), (9, 2), (10, 3), (10, 5), (10, 7), (11, 8), (12, 9), (12, 11), (13, 12), (14, 11), (14, 9), (15, 8), (16, 9), (16, 11), (16, 15), (16, 13), (16, 17), (18, 17), (18, 15), (18, 13), (18, 11), (18, 9), (19, 8), (13, 2), (15, 2), (17, 2), (19, 2), (13, 4), (15, 4), (17, 4), (19, 4), (13, 6), (15, 6), (17, 6), (19, 6), (17, 18)], 'beepers': {(3, 2): 1, (2, 3): 3, (2, 4): 2, (3, 4): 6, (3, 5): 1, (7, 6): 5, (7, 5): 1, (9, 5): 3, (9, 7): 2}},
'mine': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2)], 'beepers': {(2, 1): 1, (3, 1): 1, (5, 1): 1, (8, 1): 1, (10, 1): 1}},
'mine2':{'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2)], 'beepers': {(2, 1): 2, (3, 1): 2, (6, 1): 3, (5, 1): 1, (8, 1): 1, (10, 1): 4}},
'mine3': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2)], 'beepers': {(10, 1): 5, (9, 1): 1, (8, 1): 3, (6, 1): 2, (1, 1): 2, (2, 1): 1, (3, 1): 3}},
'mine4': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (6, 3), (7, 4), (8, 1), (9, 2), (11, 2), (12, 1), (9, 4), (11, 4), (13, 4), (14, 3), (15, 2), (17, 2), (19, 2)], 'beepers': {(10, 1): 2, (8, 1): 3, (7, 2): 1, (7, 1): 1, (4, 2): 6, (5, 2): 1, (4, 1): 1, (3, 1): 2}},
'mine5': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 2), (6, 3), (9, 2), (8, 1), (10, 1), (7, 4), (9, 4), (11, 4), (12, 3), (13, 2), (14, 3), (14, 5), (14, 7), (15, 8), (17, 8), (19, 8), (17, 6), (16, 5), (18, 5), (19, 4), (16, 3), (16, 1)], 'beepers': {(10, 3): 1, (2, 1): 2, (4, 1): 3, (5, 2): 2, (7, 1): 3, (8, 2): 4, (8, 3): 1, (8, 4): 2}},
'stairs': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7), (9, 8), (10, 9), (11, 10), (12, 11), (13, 12), (14, 13), (15, 14), (16, 15), (17, 16), (18, 17), (19, 18)], 'beepers': {(10, 10): 1}},
'stairs2': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (3, 2), (5, 2), (6, 3), (7, 4), (8, 5), (9, 6), (11, 6), (12, 7), (13, 8), (14, 9), (15, 10), (17, 10), (18, 11), (19, 12)], 'beepers': {(10, 7): 1}},
'stairs3': {'avenues': 10, 'streets': 10, 'walls': [(4, 1), (5, 2), (6, 3), (7, 4), (9, 4), (11, 4), (12, 5), (13, 6), (14, 7), (15, 8), (17, 8), (18, 9), (19, 10)], 'beepers': {(10, 6): 1}},
'stairs4': {'avenues': 10, 'streets': 10, 'walls': [(2, 1), (3, 2), (4, 3), (5, 4), (7, 4), (9, 4), (11, 4), (12, 5), (13, 6), (15, 6), (16, 7), (17, 8), (18, 9), (19, 10)], 'beepers': {(4, 3): 1}},
'coins': {'avenues': 10, 'streets': 10, 'walls': [(3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2), (2, 3), (2, 5), (2, 7), (2, 9), (2, 11), (2, 13), (2, 15), (2, 17), (2, 19)], 'beepers': {(2, 1): 1, (4, 1): 3, (5, 1): 2, (8, 1): 3, (7, 1): 6, (1, 2): 3, (1, 10): 1, (1, 8): 3, (1, 9): 1, (1, 4): 1}},
'coins2': {'avenues': 10, 'streets': 10, 'walls': [(2, 19), (2, 17), (2, 15), (2, 13), (2, 11), (2, 9), (2, 7), (2, 5), (2, 3), (3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2)], 'beepers': {(6, 1): 1, (7, 1): 1, (5, 1): 2, (10, 1): 3, (2, 1): 1, (1, 2): 3, (1, 3): 2, (1, 6): 4, (1, 10): 7}},
'news': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (4, 3), (5, 4), (6, 3), (7, 2), (8, 3), (9, 4), (10, 3), (11, 2), (13, 2), (14, 3), (15, 4), (16, 3), (17, 2), (19, 2)], 'beepers': {}},
'news2': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (2, 3), (3, 4), (4, 3), (5, 2), (6, 3), (7, 4), (8, 3), (9, 2), (10, 3), (11, 4), (12, 3), (15, 2), (17, 2), (13, 2), (18, 3), (19, 4)], 'beepers': {}},
'news3': {'avenues': 10, 'streets': 10, 'walls': [(1, 2), (3, 2), (5, 4), (4, 3), (6, 3), (7, 4), (8, 3), (9, 4), (10, 3), (11, 4), (12, 3), (13, 2), (14, 3), (15, 4), (16, 3), (17, 4), (18, 3), (19, 2)], 'beepers': {}},
'read': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(10, 1): 7}},
'read2': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(9, 1): 2, (10, 1): 4, (8, 1): 3}},
'read3': {'avenues': 10, 'streets': 10, 'walls': [], 'beepers': {(6, 1): 2, (8, 1): 3, (9, 1): 1, (10, 1): 7}},
'hurdles1': {
'avenues': 10,
'streets': 10,
'walls': [(4, 1), (8, 1), (12, 1), (16, 1)],
'beepers': {(10, 1): 1},
},
'hurdles2': {
'avenues': 10,
'streets': 10,
'walls': [(4, 1), (8, 1), (12, 1), (16, 1)],
'beepers': {(7, 1): 1},
},
'hurdles3': {
'avenues': 10,
'streets': 10,
'walls': [(4, 1), (8, 1), (16, 1), (2, 1), (10, 1), (18, 1), (12, 1)],
'beepers': {(10, 1): 1},
},
'beepers1': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(3, 1): 1},
},
'corner3_4': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {},
},
'rain1': {
'avenues': 10,
'streets': 10,
'walls': [(5, 6), (4, 7), (4, 9), (4, 13), (4, 15), (5, 16), (9, 16), (13, 16), (15, 16), (16, 15), (16, 11), (16, 9), (16, 7), (15, 6), (11, 6), (7, 6)],
'beepers': {},
},
'newspaper': {
'avenues': 10,
'streets': 10,
'walls': [(4, 1), (5, 2), (7, 2), (8, 3), (9, 4), (11, 4), (12, 5), (13, 6), (15, 6), (16, 7), (17, 8), (19, 8)],
'beepers': {},
},
'hurdles4': {
'avenues': 10,
'streets': 10,
'walls': [(4, 1), (8, 1), (16, 1), (2, 1), (10, 1), (18, 1), (12, 1), (4, 3), (10, 3), (10, 5)],
'beepers': {(10, 1): 1},
},
'frank18': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(7, 4): 1, (3, 7): 2, (7, 1): 19, (6, 6): 2, (3, 4): 2},
},
'rain2': {
'avenues': 12,
'streets': 9,
'walls': [(5, 6), (7, 6), (11, 6), (13, 6), (15, 6), (16, 5), (17, 4), (21, 4), (22, 5), (22, 9), (22, 11), (22, 15), (21, 16), (19, 16), (15, 16), (13, 16), (9, 16), (5, 16), (4, 15), (4, 13), (4, 9), (4, 7)],
'beepers': {},
},
'wrong': {
'avenues': 10,
'streets': 10,
'walls': [10, (10, 3), (10, 5), (1, 10), (3, 10), (5, 10), (2, 1), (2, 3), (1, 6), (3, 6), (4, 5), (4, 3), (5, 2), (6, 3), (7, 8), (5, 8), (2, 7), (7, 10), (8, 7), (9, 6), (8, 3), (9, 4), (9, 10), (10, 9)],
'beepers': {(6, 4): 1},
},
'hanoi3': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(2, 1): 3, (2, 2): 2, (2, 3): 1},
},
'fairy_tale': {
'avenues': 14,
'streets': 8,
'walls': [(1, 10), (3, 10), (4, 9), (5, 8), (6, 7), (9, 8), (11, 8), (12, 7), (12, 5), (12, 3), (12, 1)],
'beepers': {},
},
'hanoi4': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(2, 4): 1, (2, 1): 4, (2, 2): 3, (2, 3): 2},
},
'empty': {
'avenues': 8,
'streets': 8,
'walls': [],
'beepers': {},
},
'trash1': {
'avenues': 10,
'streets': 10,
'walls': [(3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2), (1, 4), (2, 3)],
'beepers': {(6, 1): 1, (3, 1): 3, (5, 1): 1, (10, 1): 2, (7, 1): 2},
},
'trash2': {
'avenues': 10,
'streets': 10,
'walls': [(3, 2), (5, 2), (7, 2), (9, 2), (11, 2), (13, 2), (15, 2), (17, 2), (19, 2), (1, 4), (2, 3)],
'beepers': {(9, 1): 1, (5, 1): 13, (2, 1): 2, (7, 1): 2},
},
'trash3': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(1, 2): 18, (7, 3): 4, (4, 8): 1, (5, 6): 7, (7, 1): 4, (9, 2): 11, (8, 8): 1, (1, 10): 3, (2, 5): 3, (5, 8): 2, (7, 9): 2},
},
'trash4': {
'avenues': 11,
'streets': 10,
'walls': [],
'beepers': {(6, 9): 3, (1, 3): 2, (9, 8): 2, (10, 6): 1, (5, 1): 2, (1, 11): 2, (10, 3): 1, (5, 5): 2, (2, 9): 1, (6, 10): 2, (1, 5): 1, (2, 2): 1, (8, 6): 2, (4, 10): 1, (8, 2): 1, (8, 11): 2, (9, 10): 3, (4, 11): 1, (2, 7): 1, (4, 6): 1, (9, 2): 1, (3, 4): 3, (5, 7): 1, (3, 8): 3, (7, 8): 5},
},
'amazing3a': {
'avenues': 7,
'streets': 7,
'walls': [(2, 1), (3, 2), (5, 2), (6, 3), (6, 5), (6, 7), (6, 9), (6, 11), (6, 13)],
'beepers': {(1, 2): 1, (2, 7): 1, (3, 2): 1, (1, 3): 1, (3, 3): 1, (1, 7): 1, (1, 4): 1, (2, 4): 1, (1, 5): 1, (2, 6): 1, (1, 6): 1, (3, 6): 1, (2, 2): 1, (2, 3): 1, (3, 7): 1, (2, 5): 1, (3, 4): 1, (1, 1): 1, (3, 5): 1},
},
'yardwork': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(1, 2): 18, (7, 3): 4, (4, 8): 1, (5, 6): 7, (7, 1): 4, (9, 2): 11, (8, 8): 1, (1, 10): 3, (2, 5): 3, (5, 8): 2, (7, 9): 2},
},
'sort1': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(1, 2): 1, (1, 3): 1, (2, 2): 1, (1, 4): 1, (2, 4): 1, (1, 5): 1, (1, 6): 1, (2, 1): 1, (1, 7): 1, (2, 3): 1, (2, 5): 1, (1, 1): 1},
},
'harvest4': {
'avenues': 7,
'streets': 7,
'walls': [],
'beepers': {(7, 3): 1, (6, 6): 1, (5, 6): 1, (3, 2): 1, (2, 1): 1, (6, 2): 1, (5, 1): 2, (2, 5): 1, (7, 2): 1, (5, 5): 1, (7, 6): 1, (4, 4): 1, (3, 6): 1, (2, 2): 2, (3, 5): 1, (4, 1): 1, (6, 4): 1, (5, 4): 1, (7, 1): 1, (4, 5): 1, (2, 3): 1, (4, 2): 1, (6, 5): 2, (5, 3): 2, (4, 6): 1, (6, 1): 1, (7, 4): 1, (4, 3): 1, (3, 4): 2, (2, 4): 1},
},
'amazing5': {
'avenues': 7,
'streets': 7,
'walls': [(3, 2), (6, 5), (6, 7), (6, 9), (6, 11), (6, 13), (4, 1), (2, 3), (3, 4), (5, 4)],
'beepers': {},
},
'maze1': {
'avenues': 10,
'streets': 10,
'walls': [(10, 1), (10, 3), (10, 5), (1, 10), (3, 10), (5, 10), (2, 1), (2, 3), (1, 6), (3, 6), (4, 5), (4, 3), (5, 2), (6, 3), (7, 8), (5, 8), (2, 7), (7, 10), (8, 7), (9, 6), (8, 3), (9, 4), (9, 10), (10, 9)],
'beepers': {(6, 4): 1},
},
'harvest1': {
'avenues': 7,
'streets': 7,
'walls': [],
'beepers': {(3, 3): 1, (3, 2): 1, (3, 1): 1, (5, 6): 1, (5, 1): 1, (3, 6): 1, (5, 3): 1, (5, 2): 1, (7, 6): 1, (7, 5): 1, (7, 4): 1, (7, 3): 1, (7, 2): 1, (7, 1): 1, (3, 5): 1, (3, 4): 1, (2, 4): 1, (2, 5): 1, (2, 6): 1, (2, 1): 1, (2, 2): 1, (2, 3): 1, (4, 6): 1, (4, 4): 1, (4, 5): 1, (4, 2): 1, (4, 3): 1, (4, 1): 1, (6, 1): 1, (6, 2): 1, (6, 3): 1, (6, 4): 1, (6, 5): 1, (6, 6): 1, (5, 5): 1, (5, 4): 1},
},
'amazing1': {
'avenues': 5,
'streets': 5,
'walls': [],
'beepers': {},
},
'harvest2': {
'avenues': 12,
'streets': 12,
'walls': [],
'beepers': {(7, 3): 1, (6, 10): 1, (6, 6): 1, (2, 8): 1, (10, 6): 1, (7, 7): 1, (4, 6): 1, (6, 2): 1, (7, 11): 1, (3, 7): 1, (10, 8): 1, (5, 5): 1, (4, 4): 1, (8, 10): 1, (4, 8): 1, (8, 6): 1, (5, 3): 1, (9, 7): 1, (4, 10): 1, (2, 6): 1, (5, 11): 1, (5, 9): 1, (7, 5): 1, (6, 12): 1, (6, 4): 1, (3, 5): 1, (11, 7): 1, (6, 8): 1, (5, 7): 1, (9, 9): 1, (8, 8): 1, (7, 9): 1, (1, 7): 1, (9, 5): 1, (3, 9): 1, (8, 4): 1},
},
'amazing3': {
'avenues': 7,
'streets': 7,
'walls': [(2, 1), (3, 2), (5, 2), (6, 3), (6, 5), (6, 7), (6, 9), (6, 11), (6, 13)],
'beepers': {},
},
'amazing2': {
'avenues': 7,
'streets': 7,
'walls': [(6, 13), (6, 11), (6, 9), (13, 6), (11, 6), (9, 6), (7, 6), (6, 7)],
'beepers': {},
},
'harvest3': {
'avenues': 7,
'streets': 7,
'walls': [],
'beepers': {(7, 3): 1, (6, 6): 1, (5, 6): 1, (3, 2): 1, (2, 1): 1, (6, 2): 1, (5, 1): 1, (2, 5): 1, (7, 2): 1, (7, 6): 1, (4, 4): 1, (3, 6): 1, (2, 2): 1, (3, 5): 1, (4, 1): 1, (6, 4): 1, (5, 4): 1, (7, 1): 1, (4, 5): 1, (5, 5): 1, (2, 3): 1, (4, 2): 1, (6, 5): 1, (5, 3): 1, (4, 6): 1, (3, 4): 1, (6, 1): 1, (7, 4): 1, (4, 3): 1, (2, 4): 1},
},
'add1': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(10, 1): 3, (10, 2): 2}
},
'add2': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(9, 2): 1, (9, 1): 2, (10, 1): 2, (10, 2): 3}
},
'add34': {
'avenues': 10,
'streets': 10,
'walls': [],
'beepers': {(8, 2): 9, (7, 1): 1, (8, 1): 3, (9, 2): 8, (10, 1): 4, (10, 2): 7}
},
}
| 60.36014
| 635
| 0.333835
|
e36ae367550f66dd2b4a1cdb03a10bf47b3c6b9c
| 5,694
|
py
|
Python
|
hdp_api/routes/__init__.py
|
CedricCazinHC/HyperAPI
|
789419b95679faf550a57773b9cc57107b2b8504
|
[
"BSD-3-Clause"
] | null | null | null |
hdp_api/routes/__init__.py
|
CedricCazinHC/HyperAPI
|
789419b95679faf550a57773b9cc57107b2b8504
|
[
"BSD-3-Clause"
] | null | null | null |
hdp_api/routes/__init__.py
|
CedricCazinHC/HyperAPI
|
789419b95679faf550a57773b9cc57107b2b8504
|
[
"BSD-3-Clause"
] | null | null | null |
from abc import ABCMeta, abstractproperty, abstractmethod
import inspect
import random
import re
import time
from requests.exceptions import HTTPError
def __call__(self,**kwargs):
formatter = dict.fromkeys(self._path_keys)
for _path_key, _validator in self._path_keys.items():
_value = kwargs.pop(_path_key,None)
if not _validator(_value) :
raise RoutePathInvalidException(_path_key, _value, self.path, _validator)
formatter[_path_key] = _value
_path = self.path if self.path[0] != '/' else self.path[1:]
_path = _path.format(**formatter)
if self._watcher:
self._watcher(str(self),kwargs.pop('info','call'))
try:
_result = self.session.request(self.httpMethod, _path, **kwargs)
self._watcher(str(self),'200')
return _result
except HTTPError as HE:
self._watcher(str(self), str(HE.response))
raise
return self.session.request(self.httpMethod, _path, **kwargs)
def call_when(self, condition=lambda x:True, call=lambda x: None, step=1, timeout=500, **kwargs):
_remaining = timeout
if self._watcher:
kwargs['info'] = 'call'
while _remaining > 0:
_remaining = _remaining - step
time.sleep(step)
_res = self.__call__(**kwargs)
if condition(_res) :
return call(_res)
elif kwargs.get('info', None) == 'call':
kwargs['info'] = 'retry'
if self._watcher:
self._watcher(str(self),'timeout')
return None
def wait_until(self, condition=lambda x:True, step=1, timeout=60, **kwargs):
_remaining = timeout
if self._watcher:
kwargs['info'] = 'call'
while _remaining > 0:
_remaining = _remaining - step
time.sleep(step)
_res = self.__call__(**kwargs)
if condition(_res) :
return _res
elif kwargs.get('info', None) == 'call':
kwargs['info'] = 'retry'
if self._watcher:
self._watcher(str(self),'timeout')
return None
class Resource(object):
__metaclass__ = ABCMeta
def __repr__(self):
return '{} <{}>'.format(self.__class__.__name__, id(self))
| 31.114754
| 131
| 0.592378
|
e36b3c194ac71da00a1987d9f541d4a940300816
| 332
|
py
|
Python
|
compsocsite/polls/migrations/0108_merge_20180105_1930.py
|
ReedyChen/opra
|
86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f
|
[
"MIT"
] | 8
|
2017-03-07T19:46:51.000Z
|
2021-06-01T01:41:37.000Z
|
compsocsite/polls/migrations/0108_merge_20180105_1930.py
|
ReedyChen/opra
|
86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f
|
[
"MIT"
] | null | null | null |
compsocsite/polls/migrations/0108_merge_20180105_1930.py
|
ReedyChen/opra
|
86ce88c7219d92e321cd9aa3d0bc2bf631e4b90f
|
[
"MIT"
] | 9
|
2016-06-09T03:36:20.000Z
|
2019-09-11T20:56:23.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2018-01-06 00:30
from __future__ import unicode_literals
from django.db import migrations
| 19.529412
| 46
| 0.659639
|
e36c689a2b24e54549cda9f00830211a35aefafa
| 5,141
|
py
|
Python
|
source.py
|
Sakshisingh05/ClockChian
|
21ce1005c83b003a9fc62203d03c50b3e8f70793
|
[
"MIT"
] | null | null | null |
source.py
|
Sakshisingh05/ClockChian
|
21ce1005c83b003a9fc62203d03c50b3e8f70793
|
[
"MIT"
] | null | null | null |
source.py
|
Sakshisingh05/ClockChian
|
21ce1005c83b003a9fc62203d03c50b3e8f70793
|
[
"MIT"
] | null | null | null |
from flask import Flask
from flask import render_template, redirect, url_for
from flask import request
import blockChain
app = Flask(__name__)
if __name__ == '__main__':
app.run(debug=True)
import hashlib
import json
import os
from time import time
BLOCKCHAIN_DIR = os.curdir + '/blocks/'
if __name__ == '__main__':
# for i in range(10):
# write_block(str(i),True)
for i in range(2,10):
print(check_block(str(i)))
print(check_blocks_integrity())
| 29.545977
| 108
| 0.606497
|
e36dc2963b3e15b6183197cc7bce8f0677915722
| 27
|
py
|
Python
|
rtmp/__init__.py
|
notnola/pinybot
|
8ad579fe5652b42a8fb9486c8d11962f5972f817
|
[
"MIT"
] | null | null | null |
rtmp/__init__.py
|
notnola/pinybot
|
8ad579fe5652b42a8fb9486c8d11962f5972f817
|
[
"MIT"
] | null | null | null |
rtmp/__init__.py
|
notnola/pinybot
|
8ad579fe5652b42a8fb9486c8d11962f5972f817
|
[
"MIT"
] | 1
|
2019-01-31T01:07:56.000Z
|
2019-01-31T01:07:56.000Z
|
__author__ = 'TechWhizZ199'
| 27
| 27
| 0.814815
|
e36e0bc7f72121825603a719d4feff88206f860b
| 5,668
|
py
|
Python
|
download_pinterest_images.py
|
BrunoKrinski/pinterest_download_tools
|
c804f83bc97c418ea44f1d179ad9864e90631fe5
|
[
"MIT"
] | 1
|
2022-03-07T04:38:26.000Z
|
2022-03-07T04:38:26.000Z
|
download_pinterest_images.py
|
BrunoKrinski/pinterest_download_tools
|
c804f83bc97c418ea44f1d179ad9864e90631fe5
|
[
"MIT"
] | null | null | null |
download_pinterest_images.py
|
BrunoKrinski/pinterest_download_tools
|
c804f83bc97c418ea44f1d179ad9864e90631fe5
|
[
"MIT"
] | null | null | null |
import os
import wget
import time
import argparse
import subprocess
import geckodriver_autoinstaller
import chromedriver_autoinstaller
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver import FirefoxOptions
from selenium.webdriver import DesiredCapabilities
from selenium.webdriver import DesiredCapabilities
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium_stealth import stealth
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
#def execute_with_retry(method, max_attempts):
# e = None
# for i in range(0, max_attempts):
# try:
# return method()
# except Exception as e:
# print(e)
# time.sleep(1)
# if e is not None:
# raise e
if __name__ == '__main__':
args = get_args()
user = args.user
link = args.link
url_list = args.url_list
if link == None:
if url_list == None:
print('Please enter an url or an url file!')
exit()
links = open(url_list, 'r').read().splitlines()
else:
links = [link]
log_file = open('log.txt','w')
images_err = open('images_err.txt', 'w')
#geckodriver_autoinstaller.install()
chromedriver_autoinstaller.install()
options = webdriver.ChromeOptions()
options.add_argument("--start-maximized")
options.add_argument("--user-data-dir=C:\\Users\\{}\\AppData\\Local\\Google\\Chrome\\User Data".format(user))
driver = webdriver.Chrome(options=options)
images_folder = 'images'
print('Creating folder ' + images_folder + '...!')
log_file.write('Creating folder ' + images_folder + '...!\n')
os.makedirs(images_folder, exist_ok=True)
num_links = len(links)
cont = 0
for link in links:
dpath = 'images/' + str(cont).zfill(4)
os.mkdir(dpath)
print('\nDownloading ' + str(cont) + '/' + str(num_links) + '...')
log_file.write('Downloading ' + str(cont) + '/' + \
str(num_links) + '...\n')
cont += 1
print('Accessing pinterest link: ' + link)
log_file.write('Accessing pinterest link: ' + link + '\n')
try:
driver.get(link)
print('Link successfully accessed!')
log_file.write('Link successfully accessed!\n')
except TimeoutException as e:
print('Could not access the link:' + link)
log_file.write('Could not access the link:' + link + '\n')
#exit()
print('Waitning page load...')
log_file.write('Waiting page load...\n')
time.sleep(10)
last_height = driver.execute_script("return document.body.scrollHeight")
urls = []
len_urls = 0
change_times = 0
scroll_times = 0
print('Searching images... It can take a long time!')
log_file.write('Searching images... It can take a long time!\n')
cont_images = 0
while True:
link_tags = driver.find_elements_by_tag_name('img')
for tag in link_tags:
try:
url = tag.get_attribute('srcset')
url = url.split(' ')
if len(url) == 8:
url = url[6]
urls.append(url)
except:
continue
driver.execute_script("window.scrollBy(0, 50);")
scroll_times += 1
if scroll_times == 50:
cont_images += len(urls)
download_images(urls, dpath)
urls = []
new_height = driver.execute_script("return document.body.scrollHeight")
if new_height == last_height or cont_images > 20000:
break
else:
last_height = new_height
scroll_times = 0
log_file.close()
images_err.close()
| 35.873418
| 114
| 0.569689
|
e36ead0127bc40a1f4670d0eba027d0736c82d0a
| 781
|
py
|
Python
|
kafka_scripts/kafka-producer-stream-algorithm.py
|
walterjgsp/meaning
|
71fd69eab430d364baefb31096c866999de9b4dd
|
[
"MIT"
] | null | null | null |
kafka_scripts/kafka-producer-stream-algorithm.py
|
walterjgsp/meaning
|
71fd69eab430d364baefb31096c866999de9b4dd
|
[
"MIT"
] | null | null | null |
kafka_scripts/kafka-producer-stream-algorithm.py
|
walterjgsp/meaning
|
71fd69eab430d364baefb31096c866999de9b4dd
|
[
"MIT"
] | null | null | null |
from kafka import KafkaProducer
import json
import random
from time import sleep
from datetime import datetime
# Create an instance of the Kafka producer
producer = KafkaProducer(bootstrap_servers='kafka-server:9092',
value_serializer=lambda m: json.dumps(
m).encode('utf-8'),
api_version=(0, 11, 5))
stream_algorithm_str = {"id":"1","import_str": "from sklearn.tree import DecisionTreeClassifier",
"alg_str": "DecisionTreeClassifier", "parameters_str": None,
"db_training_path": "test_training.csv","db_test_path":"test_test.csv"}
producer.send('sk-individual-topic', stream_algorithm_str)
# block until all async messages are sent
producer.flush()
| 37.190476
| 97
| 0.658131
|
e3727e0484521064be92f2b66e6c5b9dd289ef54
| 897
|
py
|
Python
|
kinsumer/helpers.py
|
ungikim/kinsumer
|
01bd9626d985bc3c239b979f0d98094f78cc102f
|
[
"MIT"
] | 5
|
2018-03-09T05:16:38.000Z
|
2021-11-12T11:56:18.000Z
|
kinsumer/helpers.py
|
ungikim/kinsumer
|
01bd9626d985bc3c239b979f0d98094f78cc102f
|
[
"MIT"
] | 2
|
2017-10-16T06:38:28.000Z
|
2017-10-18T08:05:37.000Z
|
kinsumer/helpers.py
|
balancehero/kinsumer
|
01bd9626d985bc3c239b979f0d98094f78cc102f
|
[
"MIT"
] | 1
|
2017-10-18T08:15:28.000Z
|
2017-10-18T08:15:28.000Z
|
""":mod:`kinsumer.helpers` --- Implements various helpers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from threading import RLock
_missing = object()
| 27.181818
| 61
| 0.570792
|
e372ca50f1bbb91d278a5aa868f3b3246267b836
| 4,886
|
py
|
Python
|
python/tests/pyspark/feature/string_map_test.py
|
voganrc/mleap
|
68cbf375968d9f55acb1d673a4c2390602c0274a
|
[
"Apache-2.0"
] | 1,401
|
2017-01-07T03:34:44.000Z
|
2022-03-31T22:17:58.000Z
|
python/tests/pyspark/feature/string_map_test.py
|
liang0/mleap
|
41dbde99e389873fc609083cce5d610cea9e9170
|
[
"Apache-2.0"
] | 546
|
2016-12-30T19:10:55.000Z
|
2022-03-31T16:56:52.000Z
|
python/tests/pyspark/feature/string_map_test.py
|
liang0/mleap
|
41dbde99e389873fc609083cce5d610cea9e9170
|
[
"Apache-2.0"
] | 326
|
2017-01-24T10:35:41.000Z
|
2022-03-15T15:53:17.000Z
|
import os
import tempfile
import unittest
from py4j.protocol import Py4JJavaError
from pyspark.ml import Pipeline
from pyspark.sql import types as t
from mleap.pyspark.feature.string_map import StringMap
from mleap.pyspark.spark_support import SimpleSparkSerializer
from tests.pyspark.lib.assertions import assert_df
from tests.pyspark.lib.spark_session import spark_session
INPUT_SCHEMA = t.StructType([t.StructField('key_col', t.StringType(), False),
t.StructField('extra_col', t.StringType(), False)])
OUTPUT_SCHEMA = t.StructType([t.StructField('key_col', t.StringType(), False),
t.StructField('extra_col', t.StringType(), False),
t.StructField('value_col', t.DoubleType(), False)])
def _serialize_to_file(model, df_for_serializing):
jar_file_path = _to_jar_file_path(
os.path.join(tempfile.mkdtemp(), 'test_serialize_to_bundle-pipeline.zip'))
SimpleSparkSerializer().serializeToBundle(model, jar_file_path, df_for_serializing)
return jar_file_path
def _to_jar_file_path(path):
return "jar:file:" + path
def _deserialize_from_file(path):
return SimpleSparkSerializer().deserializeFromBundle(path)
| 36.192593
| 107
| 0.657388
|
e374829b389cef040daa81ebe91954032d3a7a55
| 72
|
py
|
Python
|
__init__.py
|
hoel-bagard/yolact
|
028fd121e94c18531243a73eb4c0d443fc38a079
|
[
"MIT"
] | null | null | null |
__init__.py
|
hoel-bagard/yolact
|
028fd121e94c18531243a73eb4c0d443fc38a079
|
[
"MIT"
] | null | null | null |
__init__.py
|
hoel-bagard/yolact
|
028fd121e94c18531243a73eb4c0d443fc38a079
|
[
"MIT"
] | null | null | null |
from .predict import YolactK
from .data import *
__version__ = "0.1.0"
| 14.4
| 28
| 0.722222
|
e374d42c0a5b986cfc32f92436749b7345991388
| 4,210
|
py
|
Python
|
stocks.py
|
nicojapas/algorithmic_trading
|
46b2b59253638f15858e44b4ebae39eb222a4619
|
[
"MIT"
] | 1
|
2021-03-16T12:11:47.000Z
|
2021-03-16T12:11:47.000Z
|
stocks.py
|
nicojapas/algorithmic_trading
|
46b2b59253638f15858e44b4ebae39eb222a4619
|
[
"MIT"
] | null | null | null |
stocks.py
|
nicojapas/algorithmic_trading
|
46b2b59253638f15858e44b4ebae39eb222a4619
|
[
"MIT"
] | 1
|
2022-01-14T21:48:08.000Z
|
2022-01-14T21:48:08.000Z
|
#!/usr/bin/env python
# coding: utf-8
# In[6]:
import pandas as pd
import io
import requests
import time
import random
# In[3]:
# gets the hidden API keys
api_key = pd.read_csv('secrets.csv').api_key.to_string().split()[1]
# In[124]:
# gets data using user's parameters
def get_data(symbol, interval):
"""
Signature: get_data(symbol, period) -> 'DataFrame'
Docstring:
Retrieves market data for the selected symbol and period.
Parameters
----------
symbol : str
The name of the equity of your choice. For example: symbol=GOOGL.
interval : str
Time interval between two consecutive data points in the time series.
The following values are supported: 1min, 5min, 15min, 30min, 60min.
Returns
-------
DataFrame
Examples
--------
>>> get_data('GOOGL', '60min')
"""
# main url or alphavantage and selection of features from user
BASE_URL = 'https://www.alphavantage.co/query?'
q = {
'function':'TIME_SERIES_INTRADAY_EXTENDED',
'symbol':symbol,
'interval':interval,
'slice':'year1month1',
'apikey':'KO4L9YMRD2VLJX8O'
}
df=pd.DataFrame()
for y in range(1,3):
for m in range(1,13):
# create 'slices' of 1 month each. has to do with how the api functions
q['slice'] = f'year{y}month{m}'
# concatenate all user's selected values into one string
q_str = "".join([i for i in [str(i) + "=" + str(q[i]) + "&" for i in q]])[:-1]
# concatenate the base alphavantage url with the user's query
url = BASE_URL + q_str
print(url)
# GET url
response = requests.get(url)
# read data into a pandas dataframe
df=pd.concat([df, pd.read_csv(io.StringIO(response.content.decode('utf-8')))], axis=0)
# because the free api has a limit of 5 calls per minute, we need to wait
time.sleep(60/5)
# returns a dataframe
return(df)
# In[125]:
# auto complete function for stocks
def auto_complete_stocks(x):
"""
Signature: auto_complete_stocks(str) -> 'json'
Docstring:
Makes use of the auto-completion function of Alpha Vantage API.
It takes the user's input and returns a json with the coincidences.
Parameters
----------
symbol : str
A string containing part of the symbol or description of the equity.
For example 'amaz' would return the symbol and description for AMZN stocks, etc.
Returns
-------
json
"""
BASE_URL = 'https://www.alphavantage.co/query?'
url = f'https://www.alphavantage.co/query?function=SYMBOL_SEARCH&keywords={x}&datatype=json&apikey={api_key}'
response = requests.get(url).json()
return(response)
# In[ ]:
# to fetch all updated stocks and ETFs supported
def get_supported_stocks():
"""
Signature: get_supported_stocks() -> 'DataFrame'
Docstring:
Retrieves the supported list of stocks and ETFs from Alpha Vantage, using their API.
See https://www.alphavantage.co/
Returns
-------
DataFrame
Examples
--------
>>> get_supported_stocks()
"""
BASE_URL = 'https://www.alphavantage.co/query?'
url = f'https://www.alphavantage.co/query?function=LISTING_STATUS&apikey={api_key}'
response = requests.get(url)
x=pd.read_csv(io.StringIO(response.content.decode('utf-8')))
return(x)
# In[ ]:
# to fetch all updated stocks and ETFs supported
# static version loading from .csv previously downloaded
def get_supported_stocks_static():
"""
Signature: get_supported_stocks() -> 'DataFrame'
Docstring:
Retrieves the supported list of stocks and ETFs from Alpha Vantage, using their API.
This 'static' version loads the list from a .csv file.
Returns
-------
DataFrame
Examples
--------
>>> get_supported_stocks()
"""
x = pd.read_csv('data/stocks_etfs_list.csv')
l1 = x['symbol'].to_list()
l2 = x['name'].to_list()
l3 = [str(i) + " - " + str(j) for i, j in zip(l1, l2)]
return(l1, l2, l3)
| 24.195402
| 113
| 0.611639
|
e3773931c3c2274119d47a9e56c7b5427c5ed618
| 241
|
py
|
Python
|
python/sock-merchant.py
|
gajubadge11/hackerrank-3
|
132a5019b7ed21507bb95b5063fa66c446b0eff7
|
[
"MIT"
] | 21
|
2015-02-09T18:08:38.000Z
|
2021-11-08T15:00:48.000Z
|
python/sock-merchant.py
|
gajubadge11/hackerrank-3
|
132a5019b7ed21507bb95b5063fa66c446b0eff7
|
[
"MIT"
] | 7
|
2020-04-12T23:00:19.000Z
|
2021-01-30T23:44:24.000Z
|
python/sock-merchant.py
|
gajubadge11/hackerrank-3
|
132a5019b7ed21507bb95b5063fa66c446b0eff7
|
[
"MIT"
] | 27
|
2015-07-22T18:08:12.000Z
|
2022-02-28T19:50:26.000Z
|
#!/bin/python3
from collections import Counter
_ = int(input().strip())
socks = list(map(int, input().strip().split(' ')))
print(pairs(socks))
| 18.538462
| 74
| 0.659751
|
e37917c4d561fd8d9c4ecc0de859e1c2d60e6398
| 1,834
|
py
|
Python
|
statdpwrapper/experiments/exp_without_pp.py
|
barryZZJ/dp-sniper
|
71a3fc06f3fc319b023bde9aad8f05b8c5a47a80
|
[
"MIT"
] | 13
|
2021-03-30T15:39:35.000Z
|
2022-02-21T08:30:45.000Z
|
statdpwrapper/experiments/exp_without_pp.py
|
barryZZJ/dp-sniper
|
71a3fc06f3fc319b023bde9aad8f05b8c5a47a80
|
[
"MIT"
] | null | null | null |
statdpwrapper/experiments/exp_without_pp.py
|
barryZZJ/dp-sniper
|
71a3fc06f3fc319b023bde9aad8f05b8c5a47a80
|
[
"MIT"
] | 4
|
2021-06-30T08:37:45.000Z
|
2022-03-05T03:21:14.000Z
|
import os
from dpsniper.utils.my_multiprocessing import initialize_parallel_executor
from dpsniper.utils.paths import get_output_directory, set_output_directory
from statdpwrapper.algorithms_ext import *
from statdpwrapper.experiments.base import run_statdp
from statdpwrapper.experiments.mechanism_config import statdp_mechanism_map, statdp_arguments_map,\
statdp_postprocessing_map, statdp_sensitivity_map, statdp_num_inputs_map
| 41.681818
| 99
| 0.768811
|
e37a1fef8ba0d57e0296169fc7cb4fee0cc149e2
| 1,134
|
py
|
Python
|
ants/registration/symimg.py
|
ncullen93/ANTsPy
|
a4c990dcd5b7445a45ce7b366ee018c7350e7d9f
|
[
"Apache-2.0"
] | 3
|
2018-06-07T19:11:47.000Z
|
2019-06-10T05:24:06.000Z
|
ants/registration/symimg.py
|
ncullen93/ANTsPy
|
a4c990dcd5b7445a45ce7b366ee018c7350e7d9f
|
[
"Apache-2.0"
] | null | null | null |
ants/registration/symimg.py
|
ncullen93/ANTsPy
|
a4c990dcd5b7445a45ce7b366ee018c7350e7d9f
|
[
"Apache-2.0"
] | 1
|
2019-04-04T06:18:44.000Z
|
2019-04-04T06:18:44.000Z
|
__all__ = ['symimg']
from tempfile import mktemp
from .reflect_image import reflect_image
from .interface import registration
from .apply_transforms import apply_transforms
from ..core import image_io as iio
def symimg(img, gs=0.25):
"""
Symmetrize an image
Example
-------
>>> import ants
>>> img = ants.image_read( ants.get_ants_data('r16') , 'float')
>>> simg = ants.symimg(img)
"""
imgr = reflect_image(img, axis=0)
imgavg = imgr * 0.5 + img
for i in range(5):
w1 = registration(imgavg, img, type_of_transform='SyN')
w2 = registration(imgavg, imgr, type_of_transform='SyN')
xavg = w1['warpedmovout']*0.5 + w2['warpedmovout']*0.5
nada1 = apply_transforms(img, img, w1['fwdtransforms'], compose=w1['fwdtransforms'][0])
nada2 = apply_transforms(img, img, w2['fwdtransforms'], compose=w2['fwdtransforms'][0])
wavg = (iio.image_read(nada1) + iio.image_read(nada2)) * (-0.5)
wavgfn = mktemp(suffix='.nii.gz')
iio.image_write(wavg, wavgfn)
xavg = apply_transforms(img, imgavg, wavgfn)
return xavg
| 27.658537
| 95
| 0.636684
|
e37b4614ad6f2375762685c14b508a5358b41194
| 184
|
py
|
Python
|
CRONtest/hello.py
|
liu2z2/TutorCal
|
41cd0272d59cd1cca439cfef178485d0d8096820
|
[
"MIT"
] | null | null | null |
CRONtest/hello.py
|
liu2z2/TutorCal
|
41cd0272d59cd1cca439cfef178485d0d8096820
|
[
"MIT"
] | null | null | null |
CRONtest/hello.py
|
liu2z2/TutorCal
|
41cd0272d59cd1cca439cfef178485d0d8096820
|
[
"MIT"
] | null | null | null |
import datetime
time=datetime.datetime.today().strftime("%H-%M-%S")
text_file = open("/home/pi/TutorCal/CRONtest/"+time+".txt", "w")
text_file.write("Hello world!")
text_file.close()
| 26.285714
| 64
| 0.711957
|
e37b5fbe24287ac8297fbe7f44ed3c806e40c97b
| 3,775
|
py
|
Python
|
src/facrecog_core.py
|
GaussQR/cs305-g01
|
06b1ad9ba2d05e7c76ee10eb053e9d091b070d6d
|
[
"MIT"
] | null | null | null |
src/facrecog_core.py
|
GaussQR/cs305-g01
|
06b1ad9ba2d05e7c76ee10eb053e9d091b070d6d
|
[
"MIT"
] | null | null | null |
src/facrecog_core.py
|
GaussQR/cs305-g01
|
06b1ad9ba2d05e7c76ee10eb053e9d091b070d6d
|
[
"MIT"
] | null | null | null |
import dlib
import face_recognition
import glob
import pickle
import cv2
import numpy as np
import os
from PIL import Image,ImageFont, ImageDraw, ImageEnhance
# from google.colab.patches import cv2_imshow
add_target_faces('known')
faces = load_encoded_faces('encoded_faces.pkl')
identify_faces_video('al.mp4', faces, 1)
| 41.944444
| 115
| 0.614834
|
e37b884d8c5f1d75d2683c37c9063a16300e4321
| 117
|
py
|
Python
|
src/aioprometheus/formats/__init__.py
|
jbunce12/aioprometheus
|
d6dec47b05cab04901ffb8d2016d659927e02311
|
[
"MIT"
] | null | null | null |
src/aioprometheus/formats/__init__.py
|
jbunce12/aioprometheus
|
d6dec47b05cab04901ffb8d2016d659927e02311
|
[
"MIT"
] | null | null | null |
src/aioprometheus/formats/__init__.py
|
jbunce12/aioprometheus
|
d6dec47b05cab04901ffb8d2016d659927e02311
|
[
"MIT"
] | null | null | null |
from . import text
from .base import IFormatter
try:
from . import binary
except ImportError:
binary = None
| 14.625
| 28
| 0.717949
|
e37d41115e68a3191b6a2c67d0ba9d33fd342473
| 378
|
py
|
Python
|
pulpo_forms_example/urls.py
|
pulpocoders/pulpo-forms-examples
|
8b9121b8e323b9432d17f7fc0812405668df3b04
|
[
"Apache-2.0"
] | 3
|
2015-11-05T00:23:32.000Z
|
2017-05-02T15:24:11.000Z
|
pulpo_forms_example/urls.py
|
pulpocoders/pulpo-forms-examples
|
8b9121b8e323b9432d17f7fc0812405668df3b04
|
[
"Apache-2.0"
] | null | null | null |
pulpo_forms_example/urls.py
|
pulpocoders/pulpo-forms-examples
|
8b9121b8e323b9432d17f7fc0812405668df3b04
|
[
"Apache-2.0"
] | 1
|
2015-08-01T02:03:23.000Z
|
2015-08-01T02:03:23.000Z
|
from django.conf.urls import include, url
from django.contrib import admin
urlpatterns = [
url(r'^example/', include('pulpo_example.urls')),
url(r'^pulpo/', include('pulpo_forms.urls'), name='base'),
url(r'^admin/', include(admin.site.urls)),
url(r'^model_field_form/$',
'pulpo_forms.views.render_form',
{'instance': 'model-field-example'}),
]
| 29.076923
| 62
| 0.656085
|
e37dccf1196dd3e409502f652bd89e454eb6a2b8
| 2,903
|
py
|
Python
|
backup_tool/utils.py
|
tnoff/backup-tool
|
114d066b0aeaa9dab9e2594f42a520839587df20
|
[
"BSD-2-Clause"
] | null | null | null |
backup_tool/utils.py
|
tnoff/backup-tool
|
114d066b0aeaa9dab9e2594f42a520839587df20
|
[
"BSD-2-Clause"
] | null | null | null |
backup_tool/utils.py
|
tnoff/backup-tool
|
114d066b0aeaa9dab9e2594f42a520839587df20
|
[
"BSD-2-Clause"
] | null | null | null |
import codecs
from contextlib import contextmanager
import hashlib
import logging
from logging.handlers import RotatingFileHandler
import random
import string
from pathlib import Path
def random_string(length=32, prefix='', suffix=''):
'''
Generate random string
length : Length of string
prefix : Prefix to place before random characters
suffix : Suffix to place after random characters
'''
chars = string.ascii_lowercase + string.digits
generated = "".join(random.choice(chars) for _ in range(length - len(prefix) - len(suffix)))
return f'{prefix}{generated}{suffix}'
def md5(input_file, chunksize=64*1024):
'''
Get md5 base64 hash of input file
'''
hash_value = hashlib.md5()
with open(input_file, 'rb') as read:
while True:
chunk = read.read(chunksize)
if not chunk:
break
try:
hash_value.update(chunk.encode('utf-8'))
except AttributeError:
# File is likely binary
hash_value.update(chunk)
md5_value = codecs.encode(hash_value.digest(), 'base64')
# This leaves "b'<hash> at beginning, so take out first two chars
return str(md5_value).rstrip("\\n'")[2:]
def setup_logger(name, log_file_level, logging_file=None,
console_logging=True, console_logging_level=logging.INFO):
'''
Setup logging
'''
logger = logging.getLogger(name)
formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S')
logger.setLevel(log_file_level)
if logging_file is not None:
fh = RotatingFileHandler(logging_file,
backupCount=4,
maxBytes=((2 ** 20) * 10))
fh.setLevel(log_file_level)
fh.setFormatter(formatter)
logger.addHandler(fh)
if console_logging:
sh = logging.StreamHandler()
sh.setLevel(console_logging_level)
sh.setFormatter(formatter)
logger.addHandler(sh)
return logger
| 31.901099
| 96
| 0.613503
|
e37ecee0bdcbaf77c2d3a8c1147419b338c8e1b7
| 1,573
|
py
|
Python
|
invmonInfra/models/inventoryModels.py
|
jtom38/invmon-api
|
28f163bef47ee5c95bac0f40198e25e44090758f
|
[
"MIT"
] | null | null | null |
invmonInfra/models/inventoryModels.py
|
jtom38/invmon-api
|
28f163bef47ee5c95bac0f40198e25e44090758f
|
[
"MIT"
] | 16
|
2021-12-09T06:22:29.000Z
|
2022-03-25T06:26:01.000Z
|
invmonInfra/models/inventoryModels.py
|
jtom38/invmon-api
|
28f163bef47ee5c95bac0f40198e25e44090758f
|
[
"MIT"
] | null | null | null |
from logging import lastResort
from pydantic import BaseModel
from invmonApi.database import Base
from invmonInfra.enum import InventoryLastStatusEnum
from sqlalchemy import Column, String, Boolean
from uuid import uuid4
| 25.370968
| 121
| 0.628099
|
e37f1a70ff938fa436f4a4d3d93cb8fdc066ba63
| 2,201
|
py
|
Python
|
chatette/parsing/lexing/rule_slot_val.py
|
ziligy/Chatette
|
014c0b0a991bf66cb69fc6a69e0f6c298974eec9
|
[
"MIT"
] | 263
|
2018-09-06T14:46:29.000Z
|
2022-03-31T08:40:19.000Z
|
chatette/parsing/lexing/rule_slot_val.py
|
ziligy/Chatette
|
014c0b0a991bf66cb69fc6a69e0f6c298974eec9
|
[
"MIT"
] | 50
|
2018-09-06T14:50:18.000Z
|
2021-11-16T03:54:27.000Z
|
chatette/parsing/lexing/rule_slot_val.py
|
ziligy/Chatette
|
014c0b0a991bf66cb69fc6a69e0f6c298974eec9
|
[
"MIT"
] | 49
|
2018-09-18T23:15:09.000Z
|
2022-03-02T11:23:08.000Z
|
# coding: utf-8
"""
Module `chatette.parsing.lexing.rule_slot_val`
Contains the definition of the class that represents the lexing rule
to tokenize a slot value being set within a unit rule (only for a slot).
"""
from chatette.parsing.lexing.lexing_rule import LexingRule
from chatette.parsing.lexing import LexicalToken, TerminalType
from chatette.parsing.utils import find_next_comment, SLOT_VAL_SYM
| 37.948276
| 78
| 0.598364
|
e37fa4448f670de81c2e240c869389511aaf6b49
| 441
|
py
|
Python
|
python算法/6.5如何根据已知随机数生成函数计算新的随机数.py
|
RobinYaoWenbin/Python-CommonCode
|
1ee714541f2fd9c8b96d018d3d4eb94f4edc812a
|
[
"MIT"
] | 12
|
2020-09-28T03:25:03.000Z
|
2022-03-20T07:44:09.000Z
|
python算法/6.5如何根据已知随机数生成函数计算新的随机数.py
|
RobinYaoWenbin/Python-CommonCode
|
1ee714541f2fd9c8b96d018d3d4eb94f4edc812a
|
[
"MIT"
] | null | null | null |
python算法/6.5如何根据已知随机数生成函数计算新的随机数.py
|
RobinYaoWenbin/Python-CommonCode
|
1ee714541f2fd9c8b96d018d3d4eb94f4edc812a
|
[
"MIT"
] | 21
|
2020-03-19T00:44:35.000Z
|
2022-01-30T03:46:18.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 23 20:28:51 2020
@author: Administrator
"""
"""
rand7()1~7,rand10(),1-10.
"""
import random
if __name__ == "__main__":
print(rand10())
| 16.961538
| 67
| 0.535147
|
e380169c4e3938481f564cc3fc99e33f8bdaa725
| 26,432
|
py
|
Python
|
pytropos/internals/values/python_values/python_values.py
|
helq/pytropos
|
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
|
[
"MIT"
] | 4
|
2019-10-06T18:01:24.000Z
|
2020-07-03T05:27:35.000Z
|
pytropos/internals/values/python_values/python_values.py
|
helq/pytropos
|
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
|
[
"MIT"
] | 5
|
2021-06-07T15:50:04.000Z
|
2021-06-07T15:50:06.000Z
|
pytropos/internals/values/python_values/python_values.py
|
helq/pytropos
|
497ed5902e6e4912249ca0a46b477f9bfa6ae80a
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from enum import Enum
from functools import partial
# from math import isinf
from typing import Union, Optional, Any
from typing import Callable, Tuple, Dict, List, Set, Type # noqa: F401
from ..builtin_values import Bool, ops_symbols
from ..abstract_value import AbstractValue
from ...abstract_domain import AbstractDomain
from ...errors import TypeCheckLogger
from .objects_ids import new_id
from ...miscelaneous import Pos
__all__ = ['PythonValue', 'PT', 'AbstractMutVal', 'Args']
| 37.019608
| 99
| 0.589967
|
e3819ef8cd2690861dd5dfa539b9d90716dabcd3
| 2,249
|
py
|
Python
|
datasets/utils_cifar10.py
|
jbinas/fortified-networks
|
7db626075a019a6a7d8e2cb7d3a97404a1124c69
|
[
"MIT"
] | 5
|
2018-10-29T20:21:58.000Z
|
2021-11-19T08:58:18.000Z
|
datasets/utils_cifar10.py
|
yaya20160101/fortified-networks
|
7db626075a019a6a7d8e2cb7d3a97404a1124c69
|
[
"MIT"
] | null | null | null |
datasets/utils_cifar10.py
|
yaya20160101/fortified-networks
|
7db626075a019a6a7d8e2cb7d3a97404a1124c69
|
[
"MIT"
] | 5
|
2018-06-29T00:37:56.000Z
|
2021-05-28T04:00:55.000Z
|
import keras
import tensorflow as tf
import numpy.random as rng
from keras.datasets import cifar10
from keras.utils import np_utils
def data_cifar10(**kwargs):
"""
Preprocess CIFAR10 dataset
:return:
"""
# These values are specific to CIFAR10
img_rows = 32
img_cols = 32
nb_classes = 10
# the data, shuffled and split between train and test sets
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
if keras.backend.image_dim_ordering() == 'th':
X_train = X_train.reshape(X_train.shape[0], 3, img_rows, img_cols)
X_test = X_test.reshape(X_test.shape[0], 3, img_rows, img_cols)
else:
X_train = X_train.reshape(X_train.shape[0], img_rows, img_cols, 3)
X_test = X_test.reshape(X_test.shape[0], img_rows, img_cols, 3)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
tpermutation = rng.permutation(X_test.shape[0])
X_test = X_test[tpermutation]
y_test = y_test[tpermutation]
permutation = rng.permutation(X_train.shape[0])
X_train = X_train[permutation]
y_train = y_train[permutation]
X_train /= 255
X_test /= 255
print('X_train shape:', X_train.shape)
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
return X_train, Y_train, X_test, Y_test
| 29.207792
| 74
| 0.678524
|
e382e90eebc5900ceec8c6969e8c5a01efb198a6
| 93
|
py
|
Python
|
gorden_crawler/utils/country.py
|
Enmming/gorden_cralwer
|
3c279e4f80eaf90f3f03acd31b75cf991952adee
|
[
"Apache-2.0"
] | 2
|
2019-02-22T13:51:08.000Z
|
2020-08-03T14:01:30.000Z
|
gorden_crawler/utils/country.py
|
Enmming/gorden_cralwer
|
3c279e4f80eaf90f3f03acd31b75cf991952adee
|
[
"Apache-2.0"
] | null | null | null |
gorden_crawler/utils/country.py
|
Enmming/gorden_cralwer
|
3c279e4f80eaf90f3f03acd31b75cf991952adee
|
[
"Apache-2.0"
] | 1
|
2020-08-03T14:01:32.000Z
|
2020-08-03T14:01:32.000Z
|
#!/usr/bin/python
#-*- coding:utf-8 -*-
from . import const
const.UK = 'UK'
const.US = 'US'
| 13.285714
| 21
| 0.591398
|
e3850a968708849ad26d08ad8495038926eabd09
| 248
|
py
|
Python
|
9bus.py
|
sayonsom/Canvass
|
e59cd68f26722144abc5caf2d7ae1e7389c39ad1
|
[
"MIT"
] | 9
|
2018-01-29T10:53:25.000Z
|
2021-02-21T19:35:23.000Z
|
9bus.py
|
cyberange-dev0ps/Canvass
|
e59cd68f26722144abc5caf2d7ae1e7389c39ad1
|
[
"MIT"
] | 1
|
2019-06-04T14:43:34.000Z
|
2021-07-09T08:35:13.000Z
|
9bus.py
|
cyberange-dev0ps/Canvass
|
e59cd68f26722144abc5caf2d7ae1e7389c39ad1
|
[
"MIT"
] | 12
|
2017-05-04T23:39:10.000Z
|
2021-09-25T17:05:00.000Z
|
# There are copyright holders.
import pandapower as pp
import pandapower.networks as pn
net = pn.case9()
pp.runpp(net)
print ("Canvass NR Power Flow Results At The Buses")
print ("------------------------------------------")
print (net.res_bus)
| 22.545455
| 52
| 0.612903
|
e388be8ed758ecb9a717bada7d2953a7819ac2aa
| 315
|
py
|
Python
|
archeutils/urls.py
|
acdh-oeaw/acdh-django-archeutils
|
d1d560ce739d3e2eeddd080c4d96e7482fefbbc5
|
[
"MIT"
] | null | null | null |
archeutils/urls.py
|
acdh-oeaw/acdh-django-archeutils
|
d1d560ce739d3e2eeddd080c4d96e7482fefbbc5
|
[
"MIT"
] | null | null | null |
archeutils/urls.py
|
acdh-oeaw/acdh-django-archeutils
|
d1d560ce739d3e2eeddd080c4d96e7482fefbbc5
|
[
"MIT"
] | null | null | null |
from django.urls import include, path
from . import arche_rdf_views
app_name = "archeutils"
urlpatterns = [
path('<app_name>/<model_name>/<pk>', arche_rdf_views.res_as_arche_graph, name='res_as_arche_graph'),
path('<app_name>/<model_name>', arche_rdf_views.qs_as_arche_graph, name='qs_as_arche_graph'),
]
| 31.5
| 104
| 0.75873
|
8b4c4b1f780806fe28ac378ba5bc5176a6a833d9
| 763
|
py
|
Python
|
applications/createEVENT/SimCenterEvent.py
|
fmckenna/EE-UQ
|
a1fe96fd000aec933430bda5829c82b5743338c3
|
[
"BSD-2-Clause"
] | 1
|
2019-04-30T19:38:17.000Z
|
2019-04-30T19:38:17.000Z
|
applications/createEVENT/SimCenterEvent.py
|
s-m-amin-ghasemi/EE-UQ
|
7eb42d09b59b42fd1256c6d8693cfe46e0b8034b
|
[
"BSD-2-Clause"
] | 2
|
2018-09-11T01:32:27.000Z
|
2018-09-11T23:08:06.000Z
|
applications/createEVENT/SimCenterEvent.py
|
s-m-amin-ghasemi/EE-UQ
|
7eb42d09b59b42fd1256c6d8693cfe46e0b8034b
|
[
"BSD-2-Clause"
] | 6
|
2018-05-14T21:45:24.000Z
|
2018-10-04T18:13:42.000Z
|
import sys
from shutil import copyfile
if __name__== "__main__":
main()
| 29.346154
| 69
| 0.626474
|
8b4cf66930d071ee4505d81a0c0281d51346de46
| 384
|
py
|
Python
|
zad1_12.py
|
kamilhabrych/python-semestr5-lista1
|
65faeffe83bcc4706b2818e2e7802d986b19244b
|
[
"MIT"
] | null | null | null |
zad1_12.py
|
kamilhabrych/python-semestr5-lista1
|
65faeffe83bcc4706b2818e2e7802d986b19244b
|
[
"MIT"
] | null | null | null |
zad1_12.py
|
kamilhabrych/python-semestr5-lista1
|
65faeffe83bcc4706b2818e2e7802d986b19244b
|
[
"MIT"
] | null | null | null |
x = 2 ** (1/2)
y = 3 ** (1/3)
z = 5 ** (1/5)
print(x)
print(y)
print(z)
print()
if x>y and x>z:
print(x,'jest najwiksza')
elif y>x and y>z:
print(y,'jest najwiksza')
elif z>x and z>y:
print(z,'jest najwiksza')
print()
if x<y and x<z:
print(x,'jest najmniejsza')
elif y<x and y<z:
print(y,'jest najmniejsza')
elif z<x and z<y:
print(z,'jest najmniejsza')
| 16
| 31
| 0.585938
|
8b4d9675e98a4abeceff47ef0ef4214b548c119b
| 259
|
py
|
Python
|
2-mouth02/day03/exe03.py
|
gary-gggggg/gary
|
d8ba30ea4bc2b662a2d6a87d247f813e5680d63e
|
[
"Apache-2.0"
] | 4
|
2021-02-01T10:28:11.000Z
|
2021-02-01T10:34:40.000Z
|
2-mouth02/day03/exe03.py
|
gary-gggggg/gary
|
d8ba30ea4bc2b662a2d6a87d247f813e5680d63e
|
[
"Apache-2.0"
] | null | null | null |
2-mouth02/day03/exe03.py
|
gary-gggggg/gary
|
d8ba30ea4bc2b662a2d6a87d247f813e5680d63e
|
[
"Apache-2.0"
] | null | null | null |
title=open("file.txt","w")
title.write("\n" )
title.close()
sum=0
while 1:
sentence=open("file.txt","a")
sum+=1
if sum>4:
sentence.close()
break
k =input("")
sentence.write(f"{k}\n")
sentence.close()
| 17.266667
| 33
| 0.555985
|
8b4d9f6ab5c3257761c9eb3fa1e62a13d1f8d05b
| 1,635
|
py
|
Python
|
scanplans/grid_scan.py
|
st3107/bluesky_scanplans
|
2ab126c0b7f4427a10d42cf59ea004770c433383
|
[
"BSD-3-Clause"
] | null | null | null |
scanplans/grid_scan.py
|
st3107/bluesky_scanplans
|
2ab126c0b7f4427a10d42cf59ea004770c433383
|
[
"BSD-3-Clause"
] | null | null | null |
scanplans/grid_scan.py
|
st3107/bluesky_scanplans
|
2ab126c0b7f4427a10d42cf59ea004770c433383
|
[
"BSD-3-Clause"
] | null | null | null |
import bluesky.plan_stubs as bps
import bluesky.plans as bp
from xpdacq.beamtime import _configure_area_det
from xpdacq.glbl import glbl
from xpdacq.xpdacq import open_shutter_stub, close_shutter_stub
from xpdacq.xpdacq_conf import xpd_configuration
# below is the code to run at the beamtime
# register the scanplan
# ScanPlan(bt, acq_rel_grid_scan, 60, 30, -5, 5, 10, -5, 5, 10)
# use bt.list() to see the index of the scanplan and use it in xrun
| 34.0625
| 76
| 0.688685
|
8b4e6b2b167aebf419baed2ece989c7a96978324
| 5,172
|
py
|
Python
|
kolibri/logger/migrations/0001_initial_redone.py
|
aronasorman/kolibri
|
940672bc849cd0b26d7d84ee08a34f072c4f6cd6
|
[
"MIT"
] | 1
|
2021-11-09T11:30:12.000Z
|
2021-11-09T11:30:12.000Z
|
kolibri/logger/migrations/0001_initial_redone.py
|
aronasorman/kolibri
|
940672bc849cd0b26d7d84ee08a34f072c4f6cd6
|
[
"MIT"
] | 2
|
2017-02-08T00:22:04.000Z
|
2017-06-12T20:27:44.000Z
|
kolibri/logger/migrations/0001_initial_redone.py
|
aronasorman/kolibri
|
940672bc849cd0b26d7d84ee08a34f072c4f6cd6
|
[
"MIT"
] | 1
|
2020-05-21T18:17:55.000Z
|
2020-05-21T18:17:55.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-09 17:25
from __future__ import unicode_literals
import django.core.validators
import django.db.models.deletion
import kolibri.content.models
from django.db import migrations, models
| 56.835165
| 176
| 0.616589
|
8b4e779744d51e5ebec4b797f93c9f1ab0c716a1
| 555
|
py
|
Python
|
setup.py
|
kennydo/pick-my-stick
|
17bb4fbb35cc9637a838f5bdd91caeb7458b43bd
|
[
"MIT"
] | null | null | null |
setup.py
|
kennydo/pick-my-stick
|
17bb4fbb35cc9637a838f5bdd91caeb7458b43bd
|
[
"MIT"
] | null | null | null |
setup.py
|
kennydo/pick-my-stick
|
17bb4fbb35cc9637a838f5bdd91caeb7458b43bd
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
setup(
name='picker-my-sticker',
version='0.0.1',
description='Stickers for Slack',
long_description='S t i c k e r s',
url='https://github.com/kennydo/pick-my-stick',
author='Kenny Do',
author_email='chinesedewey@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Topic :: Internet',
],
packages=find_packages(),
entry_points={
},
)
| 25.227273
| 51
| 0.610811
|
8b528bad86c27520698632ef706d6564180389c3
| 10,562
|
py
|
Python
|
helpers.py
|
TimHeiszwolf/NBPGravity
|
b054b189f5493ad8ec094786f16f5525c117a127
|
[
"MIT"
] | 1
|
2022-03-08T07:16:53.000Z
|
2022-03-08T07:16:53.000Z
|
helpers.py
|
TimHeiszwolf/NBPGravity
|
b054b189f5493ad8ec094786f16f5525c117a127
|
[
"MIT"
] | null | null | null |
helpers.py
|
TimHeiszwolf/NBPGravity
|
b054b189f5493ad8ec094786f16f5525c117a127
|
[
"MIT"
] | null | null | null |
import numpy as np
import time
import matplotlib.pyplot as plt
import imageio
from scipy.optimize import fsolve
from body import Body
def get_position_from_Kepler(semimajor_axis, eccentricity, inclination, ascending_node, argument_of_periapsis, mean_anomaly, mass_orbit, G=6.67430 * 10**(-11)):
"""
Get the position vectors from the Keplerian coordinates
First part from https://downloads.rene-schwarz.com/download/M001-Keplerian_Orbit_Elements_to_Cartesian_State_Vectors.pdf
Second part from https://space.stackexchange.com/questions/19322/converting-orbital-elements-to-cartesian-state-vectors
>>> position = get_position_from_Kepler(1.5*10**8, 0.0167, (5*10**(-5))*np.pi/180, 1, 1, 190*np.pi/180, 1.988435 * (10**30))
>>> position
array([ 8.58449271e+07, -1.26004733e+08, -1.22449388e+02])
>>> np.linalg.norm(position)
152468174.39880842
"""
mu = G * mass_orbit
func = lambda EA: mean_anomaly - (EA - eccentricity * np.sin(EA))
eccentric_anomaly = fsolve(func, np.pi)[0]
true_anomaly = 2 * np.arctan2(np.sqrt(1 + eccentricity) * np.sin(eccentric_anomaly / 2), np.sqrt(1 - eccentricity) * np.cos(eccentric_anomaly / 2))
radius = semimajor_axis * (1 - eccentricity * np.cos(eccentric_anomaly))
h = np.sqrt(mu * semimajor_axis * (1 - eccentricity**2))
p = semimajor_axis * (1 - eccentricity**2)
Om = ascending_node
w = argument_of_periapsis
nu = true_anomaly
r = radius
i = inclination
e = eccentricity
x = r*(np.cos(Om)*np.cos(w+nu) - np.sin(Om)*np.sin(w+nu)*np.cos(i))
y = r*(np.sin(Om)*np.cos(w+nu) + np.cos(Om)*np.sin(w+nu)*np.cos(i))
z = r*(np.sin(i)*np.sin(w+nu))
#print(x, r, Om, w, nu, i, e, eccentric_anomaly)
position = np.array([x, y, z])
xd = (x*h*e/(r*p))*np.sin(nu) - (h/r)*(np.cos(Om)*np.sin(w+nu) + np.sin(Om)*np.cos(w+nu)*np.cos(i))
yd = (x*h*e/(r*p))*np.sin(nu) - (h/r)*(np.sin(Om)*np.sin(w+nu) - np.cos(Om)*np.cos(w+nu)*np.cos(i))
zd = (x*h*e/(r*p))*np.sin(nu) - (h/r)*(np.cos(w+nu)*np.sin(i))
velocity = np.array([xd, yd, zd])
#print(velocity)
return position
def get_coordinates_from_Kepler(semimajor_axis, eccentricity, inclination, ascending_node, argument_of_periapsis, mean_anomaly, current_velocity, mass_orbit, G=6.67430 * 10**(-11), delta=0.001):
"""
Lol wtf pls kil me.
>>> position, velocity = get_coordinates_from_Kepler(1.5*10**8, 0.0167, (5*10**(-5))*np.pi/180, 1, 1, 190*np.pi/180, 29300, 1.988435 * (10**30))
>>> position
array([ 8.58449271e+07, -1.26004733e+08, -1.22449388e+02])
>>> velocity
array([ 2.41591639e+04, 1.65778407e+04, -9.92410781e-03])
>>> np.linalg.norm(position)
152468174.39880842
>>> np.linalg.norm(velocity)
29299.999999999993
"""
position = get_position_from_Kepler(semimajor_axis, eccentricity, inclination, ascending_node, argument_of_periapsis, mean_anomaly, mass_orbit, G)
position_plus_delta = get_position_from_Kepler(semimajor_axis, eccentricity, inclination, ascending_node, argument_of_periapsis, mean_anomaly + delta, mass_orbit, G)
delta_position = position_plus_delta - position
direction_unit_vector = delta_position / np.linalg.norm(delta_position)
return position, current_velocity * direction_unit_vector
def ld_to_m(ld):
"""
Converts the input distance (or velocity) of the input from Lunar distances to meters.
"""
return ld * 384402 * 10**3
def au_to_m(au):
"""
Converts the input distance (or velocity) of the input from atronomical units to meters.
"""
return au * 1.495978707 * 10**11
def ly_to_m(ly):
"""
Converts the input distance (or velocity) of the input from light years to meters.
"""
return ly * 9.4607 * 10**15
def pc_to_m(pc):
"""
Converts the input distance (or velocity) of the input from parsec to meters.
"""
return pc * 3.085677581 * 10**18
def get_test_Space_simple_solar():
"""
Generates a simple test Space object. It is filled with the 8 plannets of the solar system (and the moon). They are position in a way that doesn't 100% correspond to reality.
"""
bodies = []
mass_orbit = 1.988435 * (10**30)
# The most important bodies.
bodies.append(Body(np.array([0.0, 0.0, 0.0]), np.array([0.0, 0.0, 0.0]), 1.988435 * (10**30), 695700000, 'Sun', True, 'tab:orange'))
position_earth, velocity_earth = get_coordinates_from_Kepler(1.0*1.496*10**11, 0.01671, (5*10**(-5))*np.pi/180, 0, 0, 190*np.pi/180, 29300, mass_orbit)
bodies.append(Body(position_earth, velocity_earth, 5.97 * (10**24), 6371009, 'Earth', True, 'tab:blue'))
position, velocity = get_coordinates_from_Kepler(384400*1000, 0.0554, 5.16*np.pi/180, 125*np.pi/180, 318.15*np.pi/180, 213*np.pi/180, 1020, bodies[1].mass)
position = position + position_earth
velocity = velocity + velocity_earth
bodies.append(Body(position,velocity, 7.349 * (10**22), 1737400, 'Moon', True, 'darkgrey'))
# Other inner plannets.
position, velocity = get_coordinates_from_Kepler(0.38709893*1.496*10**11, 0.20563069, 7.00487*np.pi/180, 48.33*np.pi/180, 29.12*np.pi/180, 269*np.pi/180, 45810, mass_orbit)
bodies.append(Body(position, velocity, 3.301 * (10**23), 2440000, 'Mercury', True, 'lightsteelblue'))
position, velocity = get_coordinates_from_Kepler(0.72333199*1.496*10**11, 0.00677, 3.39471*np.pi/180, 76.68069*np.pi/180, 54.85*np.pi/180, 187*np.pi/180, 34790, mass_orbit)
bodies.append(Body(position, velocity, 4.867 * (10**24), 6050000, 'Venus', True, 'goldenrod'))
position, velocity = get_coordinates_from_Kepler(1.52366*1.496*10**11, 0.09341, 1.85061*np.pi/180, 49.57*np.pi/180, 286*np.pi/180, 349*np.pi/180, 26450, mass_orbit)
bodies.append(Body(position, velocity, 6.417 * (10**23), 3390000, 'Mars', True, 'sandybrown'))
# Outer planets.
position_jupiter, velocity_jupiter = get_coordinates_from_Kepler(5.2033*1.496*10**11, 0.04839, 1.3053*np.pi/180, 100.556*np.pi/180, -85.80*np.pi/180, 283*np.pi/180, 13170, mass_orbit)
bodies.append(Body(position_jupiter, velocity_jupiter, 1.898 * (10**27), 69950000, 'Jupiter', True, 'darkorange'))
position_saturn, velocity_saturn = get_coordinates_from_Kepler(9.537*1.496*10**11, 0.0541, 2.48446*np.pi/180, 113.715*np.pi/180, -21.2831*np.pi/180, 207*np.pi/180, 91590, mass_orbit)
bodies.append(Body(position_saturn, velocity_saturn, 5.683 * (10**26), 58300000, 'Saturn', True, 'navajowhite'))
position_uranus, velocity_uranus = get_coordinates_from_Kepler(19.1912*1.496*10**11, 0.0471771, 0.76986*np.pi/180, 74.22988*np.pi/180, 96.73436*np.pi/180, 229*np.pi/180, 6578, mass_orbit)
bodies.append(Body(position_uranus, velocity_uranus, 8.681 * (10**25), 25360000, 'Uranus', True, 'powderblue'))
position_neptune, velocity_neptune = get_coordinates_from_Kepler(30.06896*1.496*10**11, 0.00858587, 1.76917*np.pi/180, 131.72169*np.pi/180, -86.75*np.pi/180, 301*np.pi/180, 5449, mass_orbit)
bodies.append(Body(position_neptune, velocity_neptune, 1.024 * (10**26), 24600000, 'Neptune', True, 'dodgerblue'))
return bodies
if __name__ == "__main__":
import doctest
doctest.testmod()
| 44.944681
| 194
| 0.639841
|
8b569282b5d41a4fb5d9ee37ff203ff019b8b666
| 10,897
|
py
|
Python
|
opttrack/lib/ui/edit_handlers.py
|
aisthesis/opttrack
|
17e0c7740ea43e0f07166e30d689b106d0319d0b
|
[
"MIT"
] | null | null | null |
opttrack/lib/ui/edit_handlers.py
|
aisthesis/opttrack
|
17e0c7740ea43e0f07166e30d689b106d0319d0b
|
[
"MIT"
] | 2
|
2016-03-30T02:50:31.000Z
|
2016-03-30T16:18:23.000Z
|
opttrack/lib/ui/edit_handlers.py
|
aisthesis/opttrack
|
17e0c7740ea43e0f07166e30d689b106d0319d0b
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) 2015 Marshall Farrier
license http://opensource.org/licenses/MIT
lib/ui/handlers.py
Handlers for edit menu
"""
from bson.codec_options import CodecOptions
import datetime as dt
from functools import partial
import json
from pymongo.errors import BulkWriteError
from ..dbschema import SPREADS
from ..dbtools import delete_many, find_job, getcoll, insert_many
from ..dbwrapper import job
from ..spreads.optspread import SPREAD_TYPES
from ..spreads.optspread_factory import OptSpreadFactory
from .spread_ui import SpreadUi
from .utils import confirm
| 37.968641
| 112
| 0.623383
|
8b58c384ea2a5cec4051907804ed34709b049103
| 4,126
|
py
|
Python
|
server/music/api.py
|
tricelex/zc_plugin_youtube_music_video
|
f3389cafc9e1a6b0fd2d94e0af77e9beec678282
|
[
"MIT"
] | null | null | null |
server/music/api.py
|
tricelex/zc_plugin_youtube_music_video
|
f3389cafc9e1a6b0fd2d94e0af77e9beec678282
|
[
"MIT"
] | null | null | null |
server/music/api.py
|
tricelex/zc_plugin_youtube_music_video
|
f3389cafc9e1a6b0fd2d94e0af77e9beec678282
|
[
"MIT"
] | null | null | null |
# class SidebarView(GenericAPIView):
# permission_classes = [AllowAny]
# def get(self, request, *args, **kwargs):
# org_id = request.GET.get("org", None)
# user_id = request.GET.get("user", None)
# room = settings.ROOM_COLLECTION
# plugin_id = settings.PLUGIN_ID
# roomid = settings.ROOM_ID
# token = verify_token
# pub_room = get_room_info()
# # subscription_channel: org_id_memberid_sidebar
# if request.GET.get("org") and request.GET.get("user"):
# subscription_channel = "{org_id}_{user_id}_sidebar"
# #sidebar_update = "currentWorkspace_userInfo_sidebar"
# sidebar_update_payload = {
# "event": "sidebar_update",
# "plugin_id": "music.zuri.chat",
# "data": {
# "name": "Music Plugin",
# "description": "This is a virtual lounge where people can add, watch and listen to YouTube videos or music",
# "plugin_id": plugin_id,
# "organisation_id": org_id,
# "room_id": roomid,
# "user_id": user_id,
# "category": "entertainment",
# "group_name": "music",
# "show_group": False,
# "button_url": f"/music/{org_id}/{roomid}",
# "public_rooms": [pub_room],
# # "starred" : [],
# "joined_rooms": [pub_room],
# },
# }
# # centrifugo_post(sidebar_update_payload, subscription_channel)
# # return Response(sidebar_update_payload)
# url = "https://api.zuri.chat/sidebar?org={org_id}&user={user_id}"
# # http://127.0.0.1:8000/sidebar?org=61695d8bb2cc8a9af4833d46&user=61695d8bb2cc8a9af4833d47
# r = requests.get(url)
# # print(r.status_code)
# if r.status_code == 200:
# # public_url = f"https://api.zuri.chat/data/read/{org_id}/{plugin_id}/{room}/{roomid}"
# # r = requests.get(public_url)
# publish_to_sidebar(plugin_id, user_id, {"event": "sidebar_update", "data": pub_room})
# centrifugo_post(sidebar_update_payload, subscription_channel)
# return Response(r)
# else:
# centrifugo_post(sidebar_update_payload, subscription_channel)
# return Response(
# {
# "event": "sidebar_update",
# "name": "Music Plugin",
# "description": "This is a virtual lounge where people can add, watch and listen to YouTube videos or music",
# "plugin_id": plugin_id,
# "organisation_id": org_id,
# "room_id": roomid,
# "user_id": user_id,
# "group_name": [],
# "show_group": False,
# "category": "entertainment",
# "public_rooms": [pub_room],
# "joined_rooms": [pub_room],
# }
# )
# else:
# centrifugo_post(sidebar_update_payload, subscription_channel)
# return JsonResponse(
# {
# "name": "Music Plugin",
# "description": "This is a virtual lounge where people can add, watch and listen to YouTube videos or music",
# "plugin_id": plugin_id,
# "organisation_id": org_id,
# "room_id": roomid,
# "user_id": user_id,
# "group_name": [],
# "show_group": False,
# "category": "entertainment",
# "public_rooms": [pub_room],
# "joined_rooms": [pub_room],
# }
# )
# def is_valid(param):
# return param != "" and param is not None
| 41.26
| 134
| 0.479641
|
8b59bcd3a89ce1967c8f1f93333ca68f2476a3f5
| 6,331
|
py
|
Python
|
BIT_OpenDomain_QA/rerank/utils_rerank.py
|
rwei1218/transformers
|
511e100c650b3f942c432d8f71eee3ea1c0005a8
|
[
"Apache-2.0"
] | null | null | null |
BIT_OpenDomain_QA/rerank/utils_rerank.py
|
rwei1218/transformers
|
511e100c650b3f942c432d8f71eee3ea1c0005a8
|
[
"Apache-2.0"
] | null | null | null |
BIT_OpenDomain_QA/rerank/utils_rerank.py
|
rwei1218/transformers
|
511e100c650b3f942c432d8f71eee3ea1c0005a8
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Load Duqa labeled dataset. """
from __future__ import absolute_import, division, print_function
import collections
import json
import logging
import math
from io import open
from tqdm import tqdm
from transformers.tokenization_bert import BasicTokenizer, whitespace_tokenize
logger = logging.getLogger(__name__)
def convert_examples_to_features(examples, label_list, max_seq_length, tokenizer):
label_map = {label : i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in tqdm(enumerate(examples), desc='loading_data'):
tokens_a = tokenizer.tokenize(example.text_a)
tokens_b = None
if example.text_b:
tokens_b = tokenizer.tokenize(example.text_b)
_truncate_seq_pair(tokens_a, tokens_b, max_seq_length - 3)
else:
if len(tokens_a) > max_seq_length - 2:
tokens_a = tokens_a[:(max_seq_length - 2)]
tokens = ["[CLS]"] + tokens_a + ["[SEP]"]
segment_ids = [0] * len(tokens)
if tokens_b:
tokens += tokens_b + ["[SEP]"]
segment_ids += [1] * (len(tokens_b) + 1)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
label_id = label_map[example.label]
if ex_index < 5:
logger.info("*** Example ***")
logger.info("guid: %s" % (example.guid))
logger.info("tokens: %s" % " ".join(
[str(x) for x in tokens]))
logger.info("input_ids: %s" % " ".join([str(x) for x in input_ids]))
logger.info("input_mask: %s" % " ".join([str(x) for x in input_mask]))
logger.info(
"segment_ids: %s" % " ".join([str(x) for x in segment_ids]))
logger.info("label: %s (id = %d)" % (example.label, label_id))
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_id=label_id))
return features
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
# This is a simple heuristic which will always truncate the longer sequence
# one token at a time. This makes more sense than truncating an equal percent
# of tokens from each, since if one sequence is very short then each token
# that's truncated likely contains more information than a longer sequence.
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
processors = {
'duqa': DuQAProcessor,
}
num_labels_task = {
'duqa': 2,
}
| 31.655
| 113
| 0.59027
|
8b59f06aa5c12c6a5c23df65ae4eee79a9122e69
| 1,973
|
py
|
Python
|
LanguageConstructs/DataModel/MetaProgramming/Reflection/attribute_builtins.py
|
ha-khan/PythonPractice
|
31366d0a3380b168b96cf2e90cef3960efee8a7e
|
[
"MIT"
] | null | null | null |
LanguageConstructs/DataModel/MetaProgramming/Reflection/attribute_builtins.py
|
ha-khan/PythonPractice
|
31366d0a3380b168b96cf2e90cef3960efee8a7e
|
[
"MIT"
] | null | null | null |
LanguageConstructs/DataModel/MetaProgramming/Reflection/attribute_builtins.py
|
ha-khan/PythonPractice
|
31366d0a3380b168b96cf2e90cef3960efee8a7e
|
[
"MIT"
] | null | null | null |
from typing import Any
if __name__ == '__main__':
main()
| 25.294872
| 104
| 0.601115
|
8b5af6372e48aa5e412d730c4fca44191540f238
| 1,360
|
py
|
Python
|
src/gui/components/weeklycolormesh.py
|
larashores/spotify-analyzer
|
98022b178ce3ef1b07a8f005aeba2aeb573125ee
|
[
"MIT"
] | null | null | null |
src/gui/components/weeklycolormesh.py
|
larashores/spotify-analyzer
|
98022b178ce3ef1b07a8f005aeba2aeb573125ee
|
[
"MIT"
] | null | null | null |
src/gui/components/weeklycolormesh.py
|
larashores/spotify-analyzer
|
98022b178ce3ef1b07a8f005aeba2aeb573125ee
|
[
"MIT"
] | null | null | null |
import collections
import colorsys
from typing import Iterable, List, Tuple
import matplotobjlib as plot
from backports import zoneinfo
from matplotlib.colors import ListedColormap
import utils
from gui.components import PlotComponent
from gui.options import ArtistChooser, ColorMap, Spinbox
from track import Track
| 35.789474
| 137
| 0.627206
|
8b5b05fdbf74764959912c9444f946a0e9f8ee11
| 3,524
|
py
|
Python
|
hard-gists/1558831/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 21
|
2019-07-08T08:26:45.000Z
|
2022-01-24T23:53:25.000Z
|
hard-gists/1558831/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 5
|
2019-06-15T14:47:47.000Z
|
2022-02-26T05:02:56.000Z
|
hard-gists/1558831/snippet.py
|
jjhenkel/dockerizeme
|
eaa4fe5366f6b9adf74399eab01c712cacaeb279
|
[
"Apache-2.0"
] | 17
|
2019-05-16T03:50:34.000Z
|
2021-01-14T14:35:12.000Z
|
# [h] interpolated nudge dialog
'''a simple RoboFont dialog for the famous "interpolated nudge" script'''
# Interpolated Nudge for RoboFont -- Travis Kochel
# http://tktype.tumblr.com/post/15254264845/interpolated-nudge-for-robofont
# Interpolated Nudge -- Christian Robertson
# http://betatype.com/node/18
from vanilla import *
from NudgeCore import *
# run
interpolatedNudgeDialog()
| 31.464286
| 75
| 0.521566
|
8b5d964924108495e0cb8ad5afc9e9b8d784d6b3
| 1,547
|
py
|
Python
|
django_query_profiler/django/db/backends/database_wrapper_mixin.py
|
sonej/django-query-profiler
|
4afe3694ded26d7ba0b435f5666e990b668d85b5
|
[
"BSD-3-Clause"
] | 97
|
2020-03-03T01:20:35.000Z
|
2022-03-23T14:06:09.000Z
|
django_query_profiler/django/db/backends/database_wrapper_mixin.py
|
sonej/django-query-profiler
|
4afe3694ded26d7ba0b435f5666e990b668d85b5
|
[
"BSD-3-Clause"
] | 24
|
2020-03-06T17:35:08.000Z
|
2022-02-09T20:06:05.000Z
|
django_query_profiler/django/db/backends/database_wrapper_mixin.py
|
sonej/django-query-profiler
|
4afe3694ded26d7ba0b435f5666e990b668d85b5
|
[
"BSD-3-Clause"
] | 9
|
2020-03-22T18:17:09.000Z
|
2022-01-31T18:59:11.000Z
|
"""
This module defines a mixin, which can be used by all implementations for all databases.
All the databases have a different hierarchy of DatabaseWrapper, but all of them derive from BaseDatabaseWrapper
"""
from abc import ABC
from typing import Optional
from django.db.backends.base.base import BaseDatabaseWrapper
from django.db.backends.utils import CursorDebugWrapper, CursorWrapper
from .cursor_wrapper_instrumentation import QueryProfilerCursorDebugWrapper, QueryProfilerCursorWrapper
| 39.666667
| 120
| 0.707822
|
8b5e99254ec155e2d433487c1c07674f3203394e
| 1,736
|
py
|
Python
|
Demo/frontend-server.py
|
hlynch/Penguins_AIforEarth
|
bccedb68640b20c6c6849040ad57823e99dbd0c6
|
[
"MIT"
] | 2
|
2019-06-17T14:09:45.000Z
|
2020-08-17T00:20:44.000Z
|
Demo/frontend-server.py
|
hlynch/Penguins_AIforEarth
|
bccedb68640b20c6c6849040ad57823e99dbd0c6
|
[
"MIT"
] | 6
|
2019-05-21T16:24:43.000Z
|
2019-05-28T18:41:04.000Z
|
Demo/frontend-server.py
|
hlynch/Penguins_AIforEarth
|
bccedb68640b20c6c6849040ad57823e99dbd0c6
|
[
"MIT"
] | null | null | null |
'''
Webserver for the Penguin Guano Classification AI4Earth API
To run:
export FLASK_APP=frontend-server.py
python -m flask run --host=0.0.0.0
To access the website, enter your IP address:5000 into a browser.
e.g., http://127.0.0.1:5000/
'''
from flask import Flask, send_from_directory, request
import requests
print("Running frontend server")
API_ENDPOINT = "http://penguinguano.eastus.azurecontainer.io:80/v1/pytorch_api/classify"
app = Flask(__name__, static_url_path='')
# front-end server stuff
if __name__ == '__main__':
app.run()
| 24.111111
| 88
| 0.711982
|
8b6038fb868f4e95b06475e6967de2992f3ee654
| 2,947
|
py
|
Python
|
src/brouwers/shop/migrations/0021_payment.py
|
modelbrouwers/modelbrouwers
|
e0ba4819bf726d6144c0a648fdd4731cdc098a52
|
[
"MIT"
] | 6
|
2015-03-03T13:23:07.000Z
|
2021-12-19T18:12:41.000Z
|
src/brouwers/shop/migrations/0021_payment.py
|
modelbrouwers/modelbrouwers
|
e0ba4819bf726d6144c0a648fdd4731cdc098a52
|
[
"MIT"
] | 95
|
2015-02-07T00:55:39.000Z
|
2022-02-08T20:22:05.000Z
|
src/brouwers/shop/migrations/0021_payment.py
|
modelbrouwers/modelbrouwers
|
e0ba4819bf726d6144c0a648fdd4731cdc098a52
|
[
"MIT"
] | 2
|
2016-03-22T16:53:26.000Z
|
2019-02-09T22:46:04.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-06-29 13:56
from __future__ import unicode_literals
import django.contrib.postgres.fields.jsonb
import django.db.models.deletion
from django.db import migrations, models
import brouwers.shop.models.utils
| 33.11236
| 84
| 0.405836
|
8b620f703a95ef7c54125b1554d9a9e0de82f47e
| 12,330
|
py
|
Python
|
lib/rapi/auth/pam.py
|
regnauld/ganeti
|
c1d88461a964a5d0d89cd1ba0571429e01f0a1b5
|
[
"BSD-2-Clause"
] | 2
|
2018-09-26T10:09:23.000Z
|
2018-09-27T07:27:06.000Z
|
lib/rapi/auth/pam.py
|
regnauld/ganeti
|
c1d88461a964a5d0d89cd1ba0571429e01f0a1b5
|
[
"BSD-2-Clause"
] | null | null | null |
lib/rapi/auth/pam.py
|
regnauld/ganeti
|
c1d88461a964a5d0d89cd1ba0571429e01f0a1b5
|
[
"BSD-2-Clause"
] | null | null | null |
#
#
# Copyright (C) 2015, 2016 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module interacting with PAM performing authorization and authentication
This module authenticates and authorizes RAPI users based on their credintials.
Both actions are performed by interaction with PAM as a 'ganeti-rapi' service.
"""
import logging
try:
import ctypes as c # pylint: disable=F0401
import ctypes.util as util
except ImportError:
c = None
from ganeti import constants
from ganeti.errors import PamRapiAuthError
import ganeti.http as http
from ganeti.http.auth import HttpServerRequestAuthentication
from ganeti.rapi import auth
__all__ = ['PamAuthenticator']
DEFAULT_SERVICE_NAME = 'ganeti-rapi'
MAX_STR_LENGTH = 100000
MAX_MSG_COUNT = 100
PAM_ENV_URI = 'GANETI_RAPI_URI'
PAM_ENV_BODY = 'GANETI_REQUEST_BODY'
PAM_ENV_METHOD = 'GANETI_REQUEST_METHOD'
PAM_ENV_ACCESS = 'GANETI_RESOURCE_ACCESS'
PAM_ABORT = 26
PAM_BUF_ERR = 5
PAM_CONV_ERR = 19
PAM_SILENT = 32768
PAM_SUCCESS = 0
PAM_PROMPT_ECHO_OFF = 1
PAM_AUTHTOK = 6
PAM_USER = 2
if c:
CONV_FUNC = c.CFUNCTYPE(c.c_int, c.c_int, c.POINTER(c.POINTER(PamMessage)),
c.POINTER(c.POINTER(PamResponse)), c.c_void_p)
def Authenticate(cf, pam_handle, authtok=None):
"""Performs authentication via PAM.
Perfroms two steps:
- if authtok is provided then set it with pam_set_item
- call pam_authenticate
"""
try:
authtok_copy = None
if authtok:
authtok_copy = cf.strndup(authtok, len(authtok))
if not authtok_copy:
raise http.HttpInternalServerError("Not enough memory for PAM")
ret = cf.pam_set_item(c.pointer(pam_handle), PAM_AUTHTOK, authtok_copy)
if ret != PAM_SUCCESS:
raise http.HttpInternalServerError("pam_set_item failed [%d]" % ret)
ret = cf.pam_authenticate(pam_handle, 0)
if ret == PAM_ABORT:
raise http.HttpInternalServerError("pam_authenticate requested abort")
if ret != PAM_SUCCESS:
raise http.HttpUnauthorized("Authentication failed")
except:
cf.pam_end(pam_handle, ret)
raise
finally:
if authtok_copy:
cf.free(authtok_copy)
def PutPamEnvVariable(cf, pam_handle, name, value):
"""Wrapper over pam_setenv.
"""
setenv = "%s=" % name
if value:
setenv += value
ret = cf.pam_putenv(pam_handle, setenv)
if ret != PAM_SUCCESS:
raise http.HttpInternalServerError("pam_putenv call failed [%d]" % ret)
def Authorize(cf, pam_handle, uri_access_rights, uri=None, method=None,
body=None):
"""Performs authorization via PAM.
Performs two steps:
- initialize environmental variables
- call pam_acct_mgmt
"""
try:
PutPamEnvVariable(cf, pam_handle, PAM_ENV_ACCESS, uri_access_rights)
PutPamEnvVariable(cf, pam_handle, PAM_ENV_URI, uri)
PutPamEnvVariable(cf, pam_handle, PAM_ENV_METHOD, method)
PutPamEnvVariable(cf, pam_handle, PAM_ENV_BODY, body)
ret = cf.pam_acct_mgmt(pam_handle, PAM_SILENT)
if ret != PAM_SUCCESS:
raise http.HttpUnauthorized("Authorization failed")
except:
cf.pam_end(pam_handle, ret)
raise
def ValidateParams(username, _uri_access_rights, password, service, authtok,
_uri, _method, _body):
"""Checks whether ValidateRequest has been called with a correct params.
These checks includes:
- username is an obligatory parameter
- either password or authtok is an obligatory parameter
"""
if not username:
raise http.HttpUnauthorized("Username should be provided")
if not service:
raise http.HttpBadRequest("Service should be proivded")
if not password and not authtok:
raise http.HttpUnauthorized("Password or authtok should be provided")
def ValidateRequest(cf, username, uri_access_rights, password=None,
service=DEFAULT_SERVICE_NAME, authtok=None, uri=None,
method=None, body=None):
"""Checks whether it's permitted to execute an rapi request.
Calls pam_authenticate and then pam_acct_mgmt in order to check whether a
request should be executed.
@param cf: An instance of CFunctions class containing necessary imports
@param username: username
@param uri_access_rights: handler access rights
@param password: password
@param service: a service name that will be used for the interaction with PAM
@param authtok: user's authentication token (e.g. some kind of signature)
@param uri: an uri of a target resource obtained from an http header
@param method: http method trying to access the uri
@param body: a body of an RAPI request
@return: On success - authenticated user name. Throws an exception otherwise.
"""
ValidateParams(username, uri_access_rights, password, service, authtok, uri,
method, body)
def ConversationFunction(num_msg, msg, resp, _app_data_ptr):
"""Conversation function that will be provided to PAM modules.
The function replies with a password for each message with
PAM_PROMPT_ECHO_OFF style and just ignores the others.
"""
if num_msg > MAX_MSG_COUNT:
logging.warning("Too many messages passed to conv function: [%d]",
num_msg)
return PAM_BUF_ERR
response = cf.calloc(num_msg, c.sizeof(PamResponse))
if not response:
logging.warning("calloc failed in conv function")
return PAM_BUF_ERR
resp[0] = c.cast(response, c.POINTER(PamResponse))
for i in range(num_msg):
if msg[i].contents.msg_style != PAM_PROMPT_ECHO_OFF:
continue
resp.contents[i].resp = cf.strndup(password, len(password))
if not resp.contents[i].resp:
logging.warning("strndup failed in conv function")
for j in range(i):
cf.free(c.cast(resp.contents[j].resp, c.c_void_p))
cf.free(response)
return PAM_BUF_ERR
resp.contents[i].resp_retcode = 0
return PAM_SUCCESS
pam_handle = PamHandleT()
conv = PamConv(CONV_FUNC(ConversationFunction), 0)
ret = cf.pam_start(service, username, c.pointer(conv), c.pointer(pam_handle))
if ret != PAM_SUCCESS:
cf.pam_end(pam_handle, ret)
raise http.HttpInternalServerError("pam_start call failed [%d]" % ret)
Authenticate(cf, pam_handle, authtok)
Authorize(cf, pam_handle, uri_access_rights, uri, method, body)
# retrieve the authorized user name
puser = c.c_void_p()
ret = cf.pam_get_item(pam_handle, PAM_USER, c.pointer(puser))
if ret != PAM_SUCCESS or not puser:
cf.pam_end(pam_handle, ret)
raise http.HttpInternalServerError("pam_get_item call failed [%d]" % ret)
user_c_string = c.cast(puser, c.c_char_p)
cf.pam_end(pam_handle, PAM_SUCCESS)
return user_c_string.value
def MakeStringC(string):
"""Converts a string to a valid C string.
As a C side treats non-unicode strings, encode unicode string with 'ascii'.
Also ensure that C string will not be longer than MAX_STR_LENGTH in order to
prevent attacs based on too long buffers.
"""
if string is None:
return None
if isinstance(string, unicode):
string = string.encode("ascii")
if not isinstance(string, str):
return None
if len(string) <= MAX_STR_LENGTH:
return string
return string[:MAX_STR_LENGTH]
| 32.447368
| 79
| 0.701703
|
8b6521edee5c7a6f815e52a5b53c02dede9be866
| 918
|
py
|
Python
|
iconparse/image_store.py
|
donk-project/pydonk
|
50417ce9e655cdcab20918b474039426f583d6d3
|
[
"MIT"
] | null | null | null |
iconparse/image_store.py
|
donk-project/pydonk
|
50417ce9e655cdcab20918b474039426f583d6d3
|
[
"MIT"
] | null | null | null |
iconparse/image_store.py
|
donk-project/pydonk
|
50417ce9e655cdcab20918b474039426f583d6d3
|
[
"MIT"
] | null | null | null |
# Donk Project
# Copyright (c) 2021 Warriorstar Orion <orion@snowfrost.garden>
# SPDX-License-Identifier: MIT
import pathlib
from typing import Dict
from iconparse.reader import DmiData, Reader
from iconparse.extractor import Extractor
| 32.785714
| 69
| 0.676471
|
8b655e5cbdbfcf38233bc910318fcb6e68177e29
| 28
|
py
|
Python
|
network/__init__.py
|
sveatlo/inpainting
|
6870ee56beea7401aa97194f76487c391af9dd5d
|
[
"Unlicense"
] | 1
|
2021-08-08T03:17:17.000Z
|
2021-08-08T03:17:17.000Z
|
network/__init__.py
|
sveatlo/inpainting
|
6870ee56beea7401aa97194f76487c391af9dd5d
|
[
"Unlicense"
] | 6
|
2021-08-08T13:12:55.000Z
|
2022-03-13T15:26:02.000Z
|
network/__init__.py
|
sveatlo/unmasked
|
6870ee56beea7401aa97194f76487c391af9dd5d
|
[
"Unlicense"
] | null | null | null |
from .gan import SNPatchGAN
| 14
| 27
| 0.821429
|
8b666913019cd3ac664dfb714c512a8beb73daff
| 10,601
|
py
|
Python
|
mssql_backend/mssql_backend.py
|
Reposoft/trac-mssql
|
da8d8ae29ef81db39ca2d6af439d88f3d6ecfebd
|
[
"BSD-3-Clause"
] | 1
|
2021-01-27T00:21:47.000Z
|
2021-01-27T00:21:47.000Z
|
mssql_backend/mssql_backend.py
|
Reposoft/trac-mssql
|
da8d8ae29ef81db39ca2d6af439d88f3d6ecfebd
|
[
"BSD-3-Clause"
] | 1
|
2015-05-11T18:34:46.000Z
|
2017-02-12T07:07:06.000Z
|
mssql_backend/mssql_backend.py
|
Reposoft/trac-mssql
|
da8d8ae29ef81db39ca2d6af439d88f3d6ecfebd
|
[
"BSD-3-Clause"
] | 1
|
2021-01-27T00:21:50.000Z
|
2021-01-27T00:21:50.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2013 MATOBA Akihiro <matobaa+trac-hacks@gmail.com>
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution.
from trac.core import *
from trac.config import Option
from trac.core import Component, implements
from trac.db.api import ConnectionBase
from trac.db.api import DatabaseManager
from trac.db.api import IDatabaseConnector
from trac.db.api import _parse_db_str, get_column_names
from trac.db.api import ConnectionBase
from trac.db.util import ConnectionWrapper
from trac.env import IEnvironmentSetupParticipant, ISystemInfoProvider
from trac.env import BackupError
from trac.db import Table, Column
import re
try:
import pymssql as pymssql
has_mssql = True
except ImportError:
has_mssql = False
# force enables this plugin in trac-admin initenv
#enabled = BoolOption("components", "mssql_backend.*", "enabled")
# Mapping from "abstract" SQL types to DB-specific types
_type_map = {
'int64': 'bigint',
'text': 'nvarchar(512)',
}
# TODO: You cannot use MS Access because column name 'value' can seems not use via odbc.
_column_map = {
'key': '"key"',
# 'value': '"value"'
}
re_limit = re.compile(" LIMIT (\d+)( OFFSET (\d+))?", re.IGNORECASE)
re_order_by = re.compile("ORDER BY ", re.IGNORECASE)
re_where = re.compile("WHERE ", re.IGNORECASE)
re_equal = re.compile("(\w+)\s*=\s*(['\w]+|\?)", re.IGNORECASE)
re_isnull = re.compile("(\w+) IS NULL", re.IGNORECASE)
re_select = re.compile('SELECT( DISTINCT)?( TOP)?', re.IGNORECASE)
re_coalesce_equal = re.compile("(COALESCE\([^)]+\))=([^,]+)", re.IGNORECASE)
def _to_sql(table):
sql = ["CREATE TABLE %s (" % table.name]
coldefs = []
for column in table.columns:
column.name = _column_map.get(column.name, column.name)
ctype = column.type.lower()
ctype = _type_map.get(ctype, ctype)
# for SQL Server, patch for "enum" table, value is not text, use int instead.
if table.name == 'enum' and column.name == 'value':
ctype = 'int'
if (table.name, column.name) in [
('wiki', 'text'),
('report', 'query'),
('report', 'description'),
('milestone', 'description'),
('version', 'description'),
]:
ctype = 'nvarchar(MAX)'
if (table.name, column.name) in [
('ticket', 'description'),
('ticket_change', 'oldvalue'),
('ticket_change', 'newvalue'),
('ticket_custom', 'value'),
('session_attribute', 'value')
]:
ctype = 'nvarchar(4000)'
# I'm using SQL Userver 2012 Express
if column.auto_increment:
ctype = 'INT IDENTITY NOT NULL' # SQL Server Style
# ctype = 'INT UNSIGNED NOT NULL AUTO_INCREMENT' # MySQL Style
# ctype = 'SERIAL' # PGSQL Style
# ctype = "integer constraint P_%s PRIMARY KEY" % table.name # SQLite Style
else:
# if column.name in table.key or any([column.name in index.columns for index in table.indices]):
# ctype = {'ntext': 'nvarchar(255)'}.get(ctype, ctype) # SQL Server cannot use text as PK
if len(table.key) == 1 and column.name in table.key:
ctype += " constraint P_%s PRIMARY KEY" % table.name
coldefs.append(" %s %s" % (column.name, ctype))
if len(table.key) > 1:
coldefs.append(" UNIQUE (%s)" % ','.join(table.key))
sql.append(',\n'.join(coldefs) + '\n);')
yield '\n'.join(sql)
for index in table.indices:
type_ = ('INDEX', 'UNIQUE INDEX')[index.unique]
yield "CREATE %s %s_%s_idx ON %s (%s);" % (type_, table.name,
'_'.join(index.columns), table.name, ','.join(index.columns))
| 31.550595
| 111
| 0.647486
|
8b668fce877fc1e0332e1fd014c47e5007f994ff
| 6,767
|
py
|
Python
|
CertifiableBayesianInference/BayesKeras/optimizers/adam.py
|
Hongchenglong/colab
|
9cc5c15abde536493cc3f12008e791caa1d00070
|
[
"Apache-2.0"
] | null | null | null |
CertifiableBayesianInference/BayesKeras/optimizers/adam.py
|
Hongchenglong/colab
|
9cc5c15abde536493cc3f12008e791caa1d00070
|
[
"Apache-2.0"
] | null | null | null |
CertifiableBayesianInference/BayesKeras/optimizers/adam.py
|
Hongchenglong/colab
|
9cc5c15abde536493cc3f12008e791caa1d00070
|
[
"Apache-2.0"
] | null | null | null |
#Author: Matthew Wicker
# Impliments the BayesByBackprop optimizer for BayesKeras
import os
import math
import logging
import numpy as np
import tensorflow as tf
import tensorflow_probability as tfp
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tqdm import tqdm
from tqdm import trange
from BayesKeras.optimizers import optimizer
from BayesKeras.optimizers import losses
from BayesKeras import analyzers
from abc import ABC, abstractmethod
# A dumb mistake on my part which needs to be factored out
| 42.031056
| 114
| 0.612827
|
8b66b9f64668e1a15163413263d5b63cdc824a7c
| 1,435
|
py
|
Python
|
Scripts/ExplicitInstantation.py
|
fbudin69500/calatk
|
3cee90488feab7e3ef2ade1f791106aa7f11e404
|
[
"Apache-2.0"
] | 2
|
2019-09-15T12:51:02.000Z
|
2020-04-08T14:03:58.000Z
|
Scripts/ExplicitInstantation.py
|
cpatrick/calatk
|
849c17919ac5084b5b067c7631bc2aa1efd650df
|
[
"Apache-2.0"
] | null | null | null |
Scripts/ExplicitInstantation.py
|
cpatrick/calatk
|
849c17919ac5084b5b067c7631bc2aa1efd650df
|
[
"Apache-2.0"
] | 1
|
2018-10-20T16:38:28.000Z
|
2018-10-20T16:38:28.000Z
|
#!/usr/bin/env python
"""Create a .cxx file that performs explicit instantiation over float/double and
dimensions 1, 2, and 3. Writes the file to the current directory."""
usage = "ExplicitInstantiation.py <class_name>"
import sys
if len(sys.argv) < 2 or sys.argv[1] == '-h' or sys.argv[1] == '--help':
print(usage)
sys.exit(1)
copyright_header = """/*
*
* Copyright 2011 by the CALATK development team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*
*/
"""
explicit_file = open(sys.argv[1] + '.cxx', 'w')
explicit_file.write(copyright_header)
content = """
#include "{0}.txx"
namespace CALATK
{
template class {0}< float, 1 >;
template class {0}< float, 2 >;
template class {0}< float, 3 >;
template class {0}< double, 1 >;
template class {0}< double, 2 >;
template class {0}< double, 3 >;
} // namespace CALATK
""".replace('{0}', sys.argv[1])
explicit_file.write(content)
explicit_file.close()
| 26.090909
| 80
| 0.694774
|
8b67d69e37e542f410bab436a641c536c8c9539f
| 3,231
|
py
|
Python
|
aiopogo/auth_google.py
|
DennyLoko/aiopogo
|
55a9efe13c51261c68ab2abe8efc4ac69e04eb01
|
[
"MIT"
] | 14
|
2017-03-28T16:32:24.000Z
|
2021-03-13T23:03:57.000Z
|
aiopogo/auth_google.py
|
ultrafunkamsterdam/aiopogo
|
43444c994a400bc9bc8fd1ccaa6a1f79ff5df1fe
|
[
"MIT"
] | 8
|
2017-03-01T07:56:09.000Z
|
2017-08-15T07:37:12.000Z
|
aiopogo/auth_google.py
|
ultrafunkamsterdam/aiopogo
|
43444c994a400bc9bc8fd1ccaa6a1f79ff5df1fe
|
[
"MIT"
] | 14
|
2017-04-08T20:01:50.000Z
|
2017-08-19T04:23:57.000Z
|
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from time import time
try:
from gpsoauth import perform_master_login, perform_oauth
except ImportError:
perform_oauth = perform_master_login
from .auth import Auth
from .exceptions import AuthException, InvalidCredentialsException
| 36.715909
| 127
| 0.662953
|
8b68b894928fc1a47949be32739e5721fad32eb5
| 518
|
py
|
Python
|
voluseg/_tools/evenly_parallelize.py
|
jingxlim/voluseg
|
41429a73a481fbffc3a15457be262ec021304b51
|
[
"MIT"
] | 10
|
2019-11-05T18:49:50.000Z
|
2022-03-07T04:15:53.000Z
|
voluseg/_tools/evenly_parallelize.py
|
jingxlim/voluseg
|
41429a73a481fbffc3a15457be262ec021304b51
|
[
"MIT"
] | 5
|
2021-02-09T20:32:38.000Z
|
2021-03-22T16:53:40.000Z
|
voluseg/_tools/evenly_parallelize.py
|
jingxlim/voluseg
|
41429a73a481fbffc3a15457be262ec021304b51
|
[
"MIT"
] | 3
|
2019-12-09T08:30:18.000Z
|
2021-03-22T01:58:44.000Z
|
def evenly_parallelize(input_list):
'''return evenly partitioned spark resilient distributed dataset (RDD)'''
import numpy as np
from pyspark.sql.session import SparkSession
spark = SparkSession.builder.getOrCreate()
sc = spark.sparkContext
n_input = len(input_list)
n_parts = sc.parallelize(input_list).getNumPartitions()
partitions = np.floor(np.linspace(0, n_parts, n_input, endpoint=False)).astype(int)
return sc.parallelize(zip(partitions, input_list)).partitionBy(n_parts)
| 37
| 87
| 0.747104
|
8b6908539193ed05f7b55115e992b2c27664607d
| 3,153
|
py
|
Python
|
deepplats/models/utils.py
|
GuillaumeDMMarion/deep-plats
|
d1f58d9fe07a7e3e7560fd4b425234fd5512da1a
|
[
"MIT"
] | null | null | null |
deepplats/models/utils.py
|
GuillaumeDMMarion/deep-plats
|
d1f58d9fe07a7e3e7560fd4b425234fd5512da1a
|
[
"MIT"
] | null | null | null |
deepplats/models/utils.py
|
GuillaumeDMMarion/deep-plats
|
d1f58d9fe07a7e3e7560fd4b425234fd5512da1a
|
[
"MIT"
] | null | null | null |
"""Model helper module.
"""
from __future__ import annotations
from typing import Union
import numpy as np
import torch
| 29.745283
| 79
| 0.598795
|
8b69509f22f3cb70d7f8b98551364109fc2064fa
| 1,491
|
py
|
Python
|
test/utils/test_utils.py
|
Chick-star/sagemaker-xgboost-container
|
e06e278b3a34515f79fa73ab770b574b9aafe5f0
|
[
"Apache-2.0"
] | 1
|
2021-07-10T15:08:18.000Z
|
2021-07-10T15:08:18.000Z
|
test/utils/test_utils.py
|
Chick-star/sagemaker-xgboost-container
|
e06e278b3a34515f79fa73ab770b574b9aafe5f0
|
[
"Apache-2.0"
] | null | null | null |
test/utils/test_utils.py
|
Chick-star/sagemaker-xgboost-container
|
e06e278b3a34515f79fa73ab770b574b9aafe5f0
|
[
"Apache-2.0"
] | 1
|
2020-02-07T22:41:34.000Z
|
2020-02-07T22:41:34.000Z
|
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the 'License'). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the 'license' file accompanying this file. This file is
# distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
import socket
from contextlib import closing
import test.utils.local_mode as localmode
# From https://stackoverflow.com/a/45690594
| 35.5
| 84
| 0.7277
|
8b6aace89e3d825b331240e13aabc132d611171f
| 2,584
|
py
|
Python
|
setup_extension.py
|
kuwayamamasayuki/FeedValidator-extension-for-GTFS-JP
|
af01375d0cf99c671a8a49f8f3a7aac2083424bc
|
[
"Apache-2.0"
] | 1
|
2020-04-03T09:18:53.000Z
|
2020-04-03T09:18:53.000Z
|
setup_extension.py
|
kuwayamamasayuki/FeedValidator-extension-for-GTFS-JP
|
af01375d0cf99c671a8a49f8f3a7aac2083424bc
|
[
"Apache-2.0"
] | null | null | null |
setup_extension.py
|
kuwayamamasayuki/FeedValidator-extension-for-GTFS-JP
|
af01375d0cf99c671a8a49f8f3a7aac2083424bc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python2.5
# Copyright (C) 2019 KUWAYAMA, Masayuki
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright (C) 2011 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import agency
import agency_jp
import stop
import route
import route_jp
import trip
import office_jp
import fareattribute
import farerule
import shape
import feedinfo
import translation
import gtfsfactory
import schedule
| 28.711111
| 74
| 0.768576
|
8b6bbd5a925b35697b012e2714a2cfeb198264c6
| 651
|
py
|
Python
|
api/team_directory/questions/migrations/0003_auto_20200930_0947.py
|
Hipo/team-directory
|
dfc999a6b464e88c020cfebe3b569b960b5d7e3d
|
[
"MIT"
] | null | null | null |
api/team_directory/questions/migrations/0003_auto_20200930_0947.py
|
Hipo/team-directory
|
dfc999a6b464e88c020cfebe3b569b960b5d7e3d
|
[
"MIT"
] | 2
|
2020-06-05T23:54:21.000Z
|
2020-09-30T12:50:16.000Z
|
api/team_directory/questions/migrations/0003_auto_20200930_0947.py
|
Hipo/team-directory
|
dfc999a6b464e88c020cfebe3b569b960b5d7e3d
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.5 on 2020-09-30 09:47
from django.db import migrations, models
| 28.304348
| 208
| 0.58679
|
8b6d14070d60a3432471d3e5b7787427ad3b6a3d
| 565
|
py
|
Python
|
CursoemVideo/Desafio076.py
|
davihonorato/Curso-python
|
47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0
|
[
"MIT"
] | null | null | null |
CursoemVideo/Desafio076.py
|
davihonorato/Curso-python
|
47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0
|
[
"MIT"
] | null | null | null |
CursoemVideo/Desafio076.py
|
davihonorato/Curso-python
|
47e6b4b2f5b37ef520b8b31d37dba0b5d259a0b0
|
[
"MIT"
] | null | null | null |
# Exerccio Python 076: Crie um programa que tenha uma tupla nica com nomes de produtos e seus respectivos preos, na sequncia.
# No final, mostre uma listagem de preos, organizando os dados em forma tabular.
produtos = ('LPIS', 1.75,
'BORRACHA', 2,
'CADERNO', 20,
'CANETAS', 7,
'MOCHILA', 120)
print('-'*40)
print(f'{"PRODUTOS":^40}')
print('-'*40)
for c in range(0, len(produtos)):
if c % 2 == 0:
print(f'{produtos[c]:.<30}', end='R$')
else:
print(f'{produtos[c]:>7.2f}')
print('-'*40)
| 31.388889
| 129
| 0.580531
|
8b6db37566c6d60a2bd9e55330800dc0a7ad705e
| 8,558
|
py
|
Python
|
tests/test_simba.py
|
SIMBAChain/libsimba.py-platform
|
a815105a5ed84564c7eafbe01281473cebfb44e5
|
[
"MIT"
] | null | null | null |
tests/test_simba.py
|
SIMBAChain/libsimba.py-platform
|
a815105a5ed84564c7eafbe01281473cebfb44e5
|
[
"MIT"
] | 2
|
2022-02-25T05:03:13.000Z
|
2022-03-09T13:56:56.000Z
|
tests/test_simba.py
|
SIMBAChain/libsimba.py-platform
|
a815105a5ed84564c7eafbe01281473cebfb44e5
|
[
"MIT"
] | null | null | null |
import unittest
from unittest.mock import patch
from libsimba.simba import Simba
| 38.9
| 113
| 0.592545
|
8b6dc47fa5a53a344b6d3a7e96adce1b89de4411
| 521
|
py
|
Python
|
projects/golem_integration/tests/actions/wait_for_element_enabled.py
|
kangchenwei/keyautotest2
|
f980d46cabfc128b2099af3d33968f236923063f
|
[
"MIT"
] | null | null | null |
projects/golem_integration/tests/actions/wait_for_element_enabled.py
|
kangchenwei/keyautotest2
|
f980d46cabfc128b2099af3d33968f236923063f
|
[
"MIT"
] | null | null | null |
projects/golem_integration/tests/actions/wait_for_element_enabled.py
|
kangchenwei/keyautotest2
|
f980d46cabfc128b2099af3d33968f236923063f
|
[
"MIT"
] | null | null | null |
from golem import actions
description = 'Verify wait_for_element_enabled action'
| 34.733333
| 85
| 0.729367
|
8b6f0fc1892ec8aa8153dba6ca257fd87d9c6c75
| 4,263
|
py
|
Python
|
Sketches/THF/3D/playground/SimpleCube.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 12
|
2015-10-20T10:22:01.000Z
|
2021-07-19T10:09:44.000Z
|
Sketches/THF/3D/playground/SimpleCube.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 2
|
2015-10-20T10:22:55.000Z
|
2017-02-13T11:05:25.000Z
|
Sketches/THF/3D/playground/SimpleCube.py
|
sparkslabs/kamaelia_orig
|
24b5f855a63421a1f7c6c7a35a7f4629ed955316
|
[
"Apache-2.0"
] | 6
|
2015-03-09T12:51:59.000Z
|
2020-03-01T13:06:21.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 British Broadcasting Corporation and Kamaelia Contributors(1)
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://www.kamaelia.org/AUTHORS - please extend this file,
# not this notice.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -------------------------------------------------------------------------
"""\
=====================
Simple Cube component
=====================
TODO
"""
import Axon
import pygame
from pygame.locals import *
from OpenGL.GL import *
from OpenGL.GLU import *
from Display3D import Display3D
from Util3D import *
from Object3D import *
if __name__=='__main__':
from Kamaelia.Util.Graphline import Graphline
CUBEC = SimpleCube(pos=Vector(0, 0,-12), name="Center cube").activate()
CUBER = SimpleCube(pos=Vector(4,0,-22), name="Right cube").activate()
CUBEB = SimpleCube(pos=Vector(0,-4,-18), name="Bottom cube").activate()
ROTATOR = CubeRotator().activate()
ROTATOR.link((ROTATOR, "outbox"), (CUBEC, "rel_rotation"))
Axon.Scheduler.scheduler.run.runThreads()
| 31.577778
| 98
| 0.546798
|
8b71d0b65eecf04d767d50cdc3d7516cf1940fbe
| 236
|
py
|
Python
|
routers.py
|
gabrielangelo/revelo-wallet
|
3e91117b673e5aaf50773aa180af4117235965c9
|
[
"BSD-3-Clause"
] | null | null | null |
routers.py
|
gabrielangelo/revelo-wallet
|
3e91117b673e5aaf50773aa180af4117235965c9
|
[
"BSD-3-Clause"
] | 8
|
2020-02-11T23:50:12.000Z
|
2022-03-14T22:51:54.000Z
|
routers.py
|
gabrielangelo/revelo-wallet
|
3e91117b673e5aaf50773aa180af4117235965c9
|
[
"BSD-3-Clause"
] | null | null | null |
from rest_framework.routers import SimpleRouter
from transactions.api.views import TransactionsViewSet
router_v1 = SimpleRouter(trailing_slash=False)
router_v1.register(r'transactions', TransactionsViewSet, base_name='transactions')
| 33.714286
| 82
| 0.855932
|
8b72e1cc46246e65f5c4487e4423aa24c3c70e6e
| 8,480
|
py
|
Python
|
plugins/modules/waf_domain.py
|
schrej/ansible-collection-cloud
|
1fa1d18aaa06178616af17d8240e8fc5d13a370c
|
[
"Apache-2.0"
] | 16
|
2020-09-22T14:45:52.000Z
|
2022-02-11T07:56:38.000Z
|
plugins/modules/waf_domain.py
|
schrej/ansible-collection-cloud
|
1fa1d18aaa06178616af17d8240e8fc5d13a370c
|
[
"Apache-2.0"
] | 153
|
2020-08-20T14:00:55.000Z
|
2022-03-30T13:48:51.000Z
|
plugins/modules/waf_domain.py
|
schrej/ansible-collection-cloud
|
1fa1d18aaa06178616af17d8240e8fc5d13a370c
|
[
"Apache-2.0"
] | 11
|
2020-09-01T12:21:09.000Z
|
2021-12-23T09:48:34.000Z
|
#!/usr/bin/python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
DOCUMENTATION = '''
---
module: waf_domain
short_description: Add/Modify/Delete WAF domain
extends_documentation_fragment: opentelekomcloud.cloud.otc
version_added: "0.0.3"
author: "Anton Sidelnikov (@anton-sidelnikov)"
description:
- Add/Modify/Delete WAF domain from the OTC.
options:
name:
description: Specifies the domain name.
required: true
type: str
certificate:
description: Specifies the certificate.
type: str
server:
description: Specifies the origin server information.
Each element contains client_protocol (HTTP or HTTPS),
server_protocol (HTTP or HTTPS),
address (IP address or domain name),
port (from 0 to 65535)
type: list
elements: dict
proxy:
description: Specifies whether a proxy is configured.
type: bool
sip_header_name:
description: Specifies the type of the source IP header.
choices: [default, cloudflare, akamai, custom]
type: str
sip_header_list:
description: Specifies the HTTP request header
for identifying the real source IP address.
type: list
elements: str
state:
description:
- Should the resource be present or absent.
choices: [present, absent]
default: present
type: str
requirements: ["openstacksdk", "otcextensions"]
'''
RETURN = '''
waf_domain:
description: List of dictionaries describing domains matching query.
type: complex
returned: On Success.
contains:
id:
description: Specifies the instance ID.
type: str
hostname:
description: Specifies the domain name.
type: str
cname:
description: Specifies the CNAME value.
type: str
sample: "efec1196267b41c399f2980ea4048517.waf.cloud.com."
policy_id:
description: Specifies the policy ID.
type: str
protect_status:
description: Specifies the WAF mode.
type: int
access_status:
description: Specifies whether a domain name is connected to WAF.
type: int
protocol:
description: Specifies the protocol type.
type: str
certificate_id:
description: Specifies the certificate ID.
type: str
server:
description: Specifies the origin server information.
type: dict
proxy:
description: Specifies whether a proxy is configured.
type: bool
timestamp:
description: Specifies the time when a domain name is created.
type: str
'''
EXAMPLES = '''
# Create Domain.
- waf_domain:
name: test.domain.name
server:
- client_protocol: https
server_protocol: https
address: 4.3.2.1
port: 8080
proxy: False
state: present
# Modify Domain.
- waf_domain:
name: "{{ domain_name }}"
certificate: "{{ cert_name }}"
# Delete Domain.
- waf_domain:
name: "{{ domain_id }}"
state: absent
'''
from ansible_collections.opentelekomcloud.cloud.plugins.module_utils.otc import OTCModule
if __name__ == '__main__':
main()
| 33.254902
| 100
| 0.610024
|
8b73af8b167c0c808ac06e682936f0020d7644ea
| 2,104
|
py
|
Python
|
python/raft/NodeState.py
|
chenzhaoplus/vraft
|
73fe880289061cfbb62aa33b8e5c7d012543bb9d
|
[
"Apache-2.0"
] | 23
|
2020-05-17T04:22:17.000Z
|
2022-02-22T02:09:34.000Z
|
python/raft/NodeState.py
|
chenzhaoplus/vraft
|
73fe880289061cfbb62aa33b8e5c7d012543bb9d
|
[
"Apache-2.0"
] | 1
|
2020-10-22T11:47:54.000Z
|
2020-10-22T11:47:54.000Z
|
python/raft/NodeState.py
|
chenzhaoplus/vraft
|
73fe880289061cfbb62aa33b8e5c7d012543bb9d
|
[
"Apache-2.0"
] | 11
|
2020-07-11T07:12:19.000Z
|
2022-03-23T08:24:15.000Z
|
import collections
from cluster import Cluster
import logging
logging.basicConfig(format='%(asctime)s - %(levelname)s: %(message)s', datefmt='%H:%M:%S', level=logging.INFO)
VoteResult = collections.namedtuple('VoteResult', ['term', 'vote_granted', 'id'])
| 42.08
| 113
| 0.66635
|
8b73f4c7986a2f8c2bd94b366f876d38ceb6a037
| 303
|
py
|
Python
|
06source_code/service-center/language-service/bin/language_service.py
|
dumengnan/unicorn
|
165330ff8e01bc18e3eca2d8ecf23b5d955f155b
|
[
"Apache-2.0"
] | null | null | null |
06source_code/service-center/language-service/bin/language_service.py
|
dumengnan/unicorn
|
165330ff8e01bc18e3eca2d8ecf23b5d955f155b
|
[
"Apache-2.0"
] | 8
|
2020-01-28T22:31:03.000Z
|
2022-03-02T03:37:47.000Z
|
06source_code/service-center/language-service/bin/language_service.py
|
dumengnan/unicorn
|
165330ff8e01bc18e3eca2d8ecf23b5d955f155b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# encoding: utf-8
import _load_lib
import sys
import logging
import os
from unicorn.language.app\
import main as languae_main
if __name__ == '__main__':
try:
languae_main()
except Exception as ex:
logging.exception("main except")
os._exit(1)
| 16.833333
| 40
| 0.673267
|
8b75a7eaacdb476c970a5cf2013b558edc778b20
| 10,303
|
py
|
Python
|
incremental_evaluation_run.py
|
comrob/ensgendel
|
4958d588a30a5bc60c6e7af5abb2b830b1265a25
|
[
"BSD-3-Clause"
] | null | null | null |
incremental_evaluation_run.py
|
comrob/ensgendel
|
4958d588a30a5bc60c6e7af5abb2b830b1265a25
|
[
"BSD-3-Clause"
] | null | null | null |
incremental_evaluation_run.py
|
comrob/ensgendel
|
4958d588a30a5bc60c6e7af5abb2b830b1265a25
|
[
"BSD-3-Clause"
] | null | null | null |
import incremental_evaluation.utils as IE
import incremental_evaluation.scenario_sets as SS
import incremental_evaluation.visualisation_helper as VH
import models.basic_predictor_interfaces
import models.ensgendel_interface
import incremental_evaluation.data_file_helper as DFH
import os
import argparse
SS_MNIST012 = "mnist012"
SS_MNIST197 = "mnist197"
SS_MNIST_CN5 = "mnist_cn5"
SS_GAUSS3 = "gauss_3"
RESULTS = os.path.join("results", "incremental_evaluation_run")
parser = argparse.ArgumentParser(description="EnsGenDel algorithm & Incremental evaluation framework.\n"
"The continual learning algorithms are evaluated in predefined scenarios."
"For example: [{0:[9,7]}, {0:[8], 1:[7]}] is a scenario of two tasks."
"In the first task {0: [9, 7]} the predictor gets training instances of "
"nines and sevens images labeled as 0. In the second task {0:[8], 1:[7]} "
"the predictor gets training instances of eights labeled as 0 and "
"sevens labeled as 1. Note that the sevens changed the label. After the "
"second task the predictor should classify nines and eights as 0 and "
"sevens as 1.\n"
"The scenario is encoded into bracket-less notation in filenames, e.g., "
"[{0:[9,7]}, {0:[8], 1:[7]}] -> T0x97T0x8a1x7 (any resemblance with "
"hexadecimals is purely coincidental).")
parser.add_argument('experiment_name', help="Experiment name which will be in file prefix.")
parser.add_argument('scenario_name', help="Select the scenario. One of the following: " + str([
SS_MNIST012, SS_MNIST197, SS_MNIST_CN5, SS_GAUSS3]) + "The scenario name is appended after experiment_name.")
parser.add_argument('modes',
help="Series of numbers activating five modes of this application:"
"1:scenario preview; 2:predictor training; 3:debug evaluation; "
"4:generate csv table with evaluation stats; 5:generate accuracy plots"
";e.g., '24' trains the predictors and then generates csv table with results.")
parser.add_argument('--trials', type=int, default=1, help="Number of independent runs. The trial number is appended "
"in the postfix of the file.")
parser.add_argument('--trials_from', type=int, default=0, help="Index of the first trial.")
parser.add_argument('--scout_number', type=int, default=-1, help="Cropping the training set. Speeding up the training "
"at the cost of less accuracy.")
parser.add_argument("--debug", default=False, type=bool, help="Runs only light weight models. True/False")
if __name__ == '__main__':
args = parser.parse_args()
# Experiment setup
trial_tags = [i for i in range(args.trials_from, args.trials_from + args.trials)]
experiment_name = args.experiment_name
scout_subset = args.scout_number if args.scout_number > 0 else None
scenario_set_name = args.scenario_name
mode = list(map(int, args.modes))
# mode += [1] # show scenario data
# mode += [2] # run predictor learning on scenarios
# mode += [3] # evaluate predictors scenarios
# mode += [4] # write accuracy statistics into table
# mode += [5] # write accuracy statistics into table
# list of predictor classes that implement the incremental_evaluation.interfaces.Predictor
if args.debug:
predictor_builders = [
models.basic_predictor_interfaces.SGD,
models.basic_predictor_interfaces.Perceptron,
]
else:
predictor_builders = [
models.ensgendel_interface.Ensgendel,
models.ensgendel_interface.Ensgen,
models.ensgendel_interface.Ens,
models.basic_predictor_interfaces.Perceptron,
]
# scenario sets implementing the incremental_evaluation.interfaces.ScenarioSet
if scenario_set_name == SS_MNIST012:
scenario_set = SS.MnistMinimalScenarios(digits_tripplet=(0, 1, 2), debug_set=False, scout_subset=scout_subset)
visualiser = VH.mnist_visualiser
elif scenario_set_name == SS_MNIST197:
scenario_set = SS.MnistMinimalScenarios(digits_tripplet=(1, 9, 7), debug_set=False, scout_subset=scout_subset)
visualiser = VH.mnist_visualiser
elif scenario_set_name == SS_MNIST_CN5:
scenario_set = SS.MnistConvergentFiveScenarios(scout_subset=scout_subset)
visualiser = VH.mnist_visualiser
elif scenario_set_name == SS_GAUSS3:
scenario_set = SS.Gauss3DMinimalScenarios(train_size=scout_subset)
visualiser = VH.gauss3d_visualiser
else:
raise NotImplementedError(scenario_set_name)
# setting up basic directories
if not os.path.exists("results"):
os.mkdir("results")
if not os.path.exists(RESULTS):
os.mkdir(RESULTS)
# Pre-flight check of the scenario
if 1 in mode:
scenarios = scenario_set.get_scenarios()
train_sam, train_sub = scenario_set.get_training_set()
test_sam, test_sub = scenario_set.get_test_set()
for scenario in scenarios:
folder_name = "preview_{}".format(VH.scenario_into_filename(str(scenario)))
folder_path = os.path.join(RESULTS, folder_name)
if not os.path.exists(folder_path):
os.mkdir(folder_path)
VH.show_scenario(scenario, test_sam, test_sub, visualiser, save_into=folder_path)
# Cycle of experiment runs
for trial_tag in trial_tags:
experiment_path = datafile_path(experiment_name, scenario_set_name, trial_tag)
if not os.path.exists(experiment_path):
os.mkdir(experiment_path)
if 2 in mode:
DFH.run_and_save(predictor_builders, scenario_set, experiment_path)
if 3 in mode:
evals = DFH.datafile_evaluation(experiment_path, {
DFH.TOTAL_ACCURACY: IE.evaluate_task_total_accuracy,
DFH.LOCAL_ACCURACY: IE.evaluate_task_accuracy,
DFH.SUBCLASS_ACCURACY: IE.evaluate_subclass_accuracy,
})
print(evals)
# Stats evaluation
files = [datafile_path(experiment_name, scenario_set_name, trial_tag) for trial_tag in trial_tags]
portfolio = dict([(str(clazz), files) for clazz in predictor_builders])
if 4 in mode:
eval_stats_total = DFH.extract_stats_for_portfolio(portfolio, over_testing_set=True,
task_accuracy_type=DFH.TOTAL_ACCURACY)
table = VH.stats_into_text_table(eval_stats_total, stat_cell_format, cell_join=';', row_join='\n')
print(table)
table_path = os.path.join(RESULTS, "{}_{}_total_accuracy.csv".format(experiment_name, scenario_set_name))
with open(table_path, "w") as fil:
fil.write(table)
print("Saved stats of total accuracy into {}".format(table_path))
if 5 in mode:
figure_styles = [
[("color", "r"), ("marker", "o")],
[("color", "g"), ("marker", "^")],
[("color", "b"), ("marker", "x")],
[("color", "c"), ("marker", "s")],
[("color", "m"), ("marker", "d")],
[("color", "y"), ("marker", "+")],
[("color", "k"), ("marker", "*")],
]
classifier_style = dict(
[(str(clazz), dict([("label", clazz.__name__)] + figure_styles[i % len(figure_styles)]))
for i, clazz in enumerate(predictor_builders)]
)
eval_stats_total = DFH.extract_stats_for_portfolio(portfolio, over_testing_set=True,
task_accuracy_type=DFH.TOTAL_ACCURACY)
scenarios = list(eval_stats_total[list(eval_stats_total.keys())[0]].keys())
print(scenarios)
for i, scenario in enumerate(scenarios):
# picking subclass for tracking
scenario_obj = eval(scenario)
tracked_label = list(scenario_obj[0].keys())[0]
tracked_subclass = scenario_obj[0][tracked_label][-1]
# tracking the selected subclass label assignment
eval_stats_tracked = DFH.extract_stats_for_portfolio(
portfolio, over_testing_set=True, task_accuracy_type=None, evaluator=tracked_evaluation)
# titles and names
_scenario_str = scenario
if type(scenario) is bytes:
_scenario_str = scenario.decode('ASCII') # sometimes hdf5 returns bytes instead of strings
test_task = str(IE.get_perfect_task_map(scenario_obj, len(scenario_obj) - 1))
tracked_task = "{{{}: [{}]}}".format(tracked_label, tracked_subclass)
title = "Scenario: {}\ntest task {}(full), tracked assignment {}(dashed)".format(
_scenario_str, test_task, tracked_task)
# visualisaiton
fig_path = os.path.join(RESULTS, "{}_{}_{}_accuracy.pdf".format(experiment_name, scenario_set_name,
VH.scenario_into_filename(_scenario_str)))
VH.show_metric_evol(eval_stats_total, scenario, classifier_style,
fig_path=fig_path, tracked_eval_stats=eval_stats_tracked, title=title)
print("fig of scenario {} saved into {}".format(scenario, fig_path))
| 54.803191
| 119
| 0.625837
|
8b76c38f1e29d8bf142d3e3373941067e32aadc6
| 15,792
|
py
|
Python
|
core/models.py
|
admariner/madewithwagtail
|
a43b3263c0f151ece4994fccd561b0575db4979f
|
[
"MIT"
] | null | null | null |
core/models.py
|
admariner/madewithwagtail
|
a43b3263c0f151ece4994fccd561b0575db4979f
|
[
"MIT"
] | null | null | null |
core/models.py
|
admariner/madewithwagtail
|
a43b3263c0f151ece4994fccd561b0575db4979f
|
[
"MIT"
] | null | null | null |
import os
import re
from bs4 import BeautifulSoup
from django.core.exceptions import ObjectDoesNotExist
from django.core.paginator import EmptyPage, PageNotAnInteger, Paginator
from django.db import models
from django.db.models import Case, Count, Q, Value, When
from django.utils.encoding import python_2_unicode_compatible
from django.utils.html import mark_safe
from modelcluster.fields import ParentalKey
from modelcluster.tags import ClusterTaggableManager
from taggit.models import Tag, TaggedItemBase
from core import panels
from core.forms import SubmitFormBuilder
from core.utilities import has_recaptcha, validate_only_one_instance
from wagtail.wagtailcore.fields import RichTextField
from wagtail.wagtailcore.models import Page
from wagtail.wagtailforms.models import AbstractEmailForm, AbstractFormField
from wagtail.wagtailsearch import index
from wagtailcaptcha.models import WagtailCaptchaEmailForm
# Main core Page model. All main content pages inherit from this class.
def get_context(self, request, *args, **kwargs):
# Get pages
pages = self.children()
# Pagination
page = request.GET.get('page')
paginator = Paginator(pages, 12) # Show 12 pages per page
try:
pages = paginator.page(page)
except PageNotAnInteger:
pages = paginator.page(1)
except EmptyPage:
pages = paginator.page(paginator.num_pages)
# Update template context
context = super(WagtailCompanyPage, self).get_context(request, *args, **kwargs)
context['pages'] = pages
return context
content_panels = panels.WAGTAIL_COMPANY_PAGE_CONTENT_PANELS
settings_panels = panels.WAGTAIL_COMPANY_PAGE_SETTINGS_PANELS
| 31.967611
| 117
| 0.634435
|
8b77b588bbd23056762b56f743c2a98bf3afca31
| 868
|
py
|
Python
|
calvin/runtime/south/plugins/storage/twistedimpl/securedht/tests/cert_script.py
|
josrolgil/exjobbCalvin
|
976459eaa50246586360c049b9880d753623d574
|
[
"Apache-2.0"
] | 1
|
2016-05-10T22:36:31.000Z
|
2016-05-10T22:36:31.000Z
|
calvin/runtime/south/plugins/storage/twistedimpl/securedht/tests/cert_script.py
|
josrolgil/exjobbCalvin
|
976459eaa50246586360c049b9880d753623d574
|
[
"Apache-2.0"
] | null | null | null |
calvin/runtime/south/plugins/storage/twistedimpl/securedht/tests/cert_script.py
|
josrolgil/exjobbCalvin
|
976459eaa50246586360c049b9880d753623d574
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from calvin.utilities import certificate
import os
print "Trying to create a new domain configuration."
testconfig = certificate.Config(domain="test")
# testconfig2 = certificate.Config(domain="evil")
print "Reading configuration successfull."
print "Creating new domain."
certificate.new_domain(testconfig)
# certificate.new_domain(testconfig2)
print "Created new domain."
for i in range(1, 5):
for j in range(0, 6):
name = "node{}:{}".format(i, j)
certreq = certificate.new_runtime(testconfig, name)
certificate.sign_req(testconfig, os.path.basename(certreq), name)
certreq = certificate.new_runtime(testconfig, "evil")
certificate.sign_req(testconfig, os.path.basename(certreq), "evil")
# certreq = certificate.new_runtime(testconfig, "evil2")
# certificate.sign_req(testconfig2, os.path.basename(certreq), "evil2")
| 36.166667
| 73
| 0.75
|
8b78243e120efed83eabeee3ef9fab1fbb90cb9c
| 2,526
|
py
|
Python
|
users/migrations/0001_initial.py
|
bhaveshpraveen/VIT-Pugaar
|
0a33b264939287071ddaffef4ab1f2ef9a38de87
|
[
"MIT"
] | null | null | null |
users/migrations/0001_initial.py
|
bhaveshpraveen/VIT-Pugaar
|
0a33b264939287071ddaffef4ab1f2ef9a38de87
|
[
"MIT"
] | 6
|
2017-11-11T08:43:55.000Z
|
2021-06-10T19:38:24.000Z
|
users/migrations/0001_initial.py
|
bhaveshpraveen/VIT-Pugaar
|
0a33b264939287071ddaffef4ab1f2ef9a38de87
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-10-23 05:30
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
| 56.133333
| 266
| 0.641726
|
8b789e81aabae4e0b2a7953dacad2a13826e5a3e
| 93
|
py
|
Python
|
runoob100/032.py
|
GenweiWu/PythonDemo
|
957bacb6fc0eb0bc37c4af7a64220d8aa58189ba
|
[
"MIT"
] | null | null | null |
runoob100/032.py
|
GenweiWu/PythonDemo
|
957bacb6fc0eb0bc37c4af7a64220d8aa58189ba
|
[
"MIT"
] | null | null | null |
runoob100/032.py
|
GenweiWu/PythonDemo
|
957bacb6fc0eb0bc37c4af7a64220d8aa58189ba
|
[
"MIT"
] | null | null | null |
# _*_ coding:utf-8 _*_
#
arr=["aaa",True,100,"ccc"]
print arr
print arr[::-1]
| 11.625
| 26
| 0.634409
|
8b790365631a765420b493cba01b292fac4bc258
| 475
|
py
|
Python
|
ArrangingCoins.py
|
Jcarlos0828/LeetCode-PracticeResults
|
73566a131629038caf2555eaf4999379227ec369
|
[
"MIT"
] | 1
|
2019-06-26T22:44:16.000Z
|
2019-06-26T22:44:16.000Z
|
ArrangingCoins.py
|
Jcarlos0828/LeetCode-PracticeResults
|
73566a131629038caf2555eaf4999379227ec369
|
[
"MIT"
] | null | null | null |
ArrangingCoins.py
|
Jcarlos0828/LeetCode-PracticeResults
|
73566a131629038caf2555eaf4999379227ec369
|
[
"MIT"
] | null | null | null |
'''
EASY 441. Arranging Coins
You have a total of n coins that you want to form in
a staircase shape, where every k-th row must have exactly k coins.
'''
| 26.388889
| 66
| 0.511579
|
8b7cf31b94df4dc51935676b554357efa86d4611
| 1,167
|
py
|
Python
|
stable_projects/predict_phenotypes/Nguyen2020_RNNAD/cbig/Nguyen2020/test_rnn.py
|
marielacour81/CBIG
|
511af756c6ddabbd3a9681ce3514b79ef5aaaf3f
|
[
"MIT"
] | 6
|
2020-03-03T22:23:07.000Z
|
2021-11-27T06:11:02.000Z
|
stable_projects/predict_phenotypes/Nguyen2020_RNNAD/cbig/Nguyen2020/test_rnn.py
|
marielacour81/CBIG
|
511af756c6ddabbd3a9681ce3514b79ef5aaaf3f
|
[
"MIT"
] | null | null | null |
stable_projects/predict_phenotypes/Nguyen2020_RNNAD/cbig/Nguyen2020/test_rnn.py
|
marielacour81/CBIG
|
511af756c6ddabbd3a9681ce3514b79ef5aaaf3f
|
[
"MIT"
] | 2
|
2020-05-27T20:24:03.000Z
|
2021-04-14T07:51:44.000Z
|
# Written by Minh Nguyen and CBIG under MIT license:
# https://github.com/ThomasYeoLab/CBIG/blob/master/LICENSE.md
import unittest
import torch
import cbig.Nguyen2020.rnn as rnn
| 33.342857
| 73
| 0.658098
|
8b7e0f2a1f8d7363c4a7045709aa260449c86b2e
| 4,816
|
py
|
Python
|
mysite/myapp/forms.py
|
MarkArren/PhotoSocial
|
bb401f465a464e7cf6a7fac184cef0d40e0a9525
|
[
"MIT"
] | null | null | null |
mysite/myapp/forms.py
|
MarkArren/PhotoSocial
|
bb401f465a464e7cf6a7fac184cef0d40e0a9525
|
[
"MIT"
] | null | null | null |
mysite/myapp/forms.py
|
MarkArren/PhotoSocial
|
bb401f465a464e7cf6a7fac184cef0d40e0a9525
|
[
"MIT"
] | null | null | null |
from django import forms
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from django.core.validators import EmailValidator
from . import models
from .models import ProfileModel
from io import BytesIO
from PIL import Image, ExifTags
from django.core.files import File
# class PostForm(ModelForm):
# class meta:
# model = models.PostModel
# fields = ('image', 'caption', 'location')
def must_be_unique_email(value):
user = User.objects.filter(email=value)
if len(user) > 0:
raise forms.ValidationError("Email Already Exists")
return value
def must_be_unique_username(value):
user = User.objects.filter(username=value)
if len(user) > 0:
raise forms.ValidationError("Username Already Exists")
return value
# class ProfileForm(forms.Form):
# profilePicture = forms.ImageField(label="Profile Picture", required=False)
# bio = forms.CharField(label="Bio", max_length=512, required=False)
# def save(self, request):
# profileInstance = models.PostModel()
# postInstance.user = request.user
# profileInstance.profilePicture = self.cleaned_data["profilePicture"]
# profileInstance.bio = self.cleaned_data["bio"]
# profileInstance.save()
# return profileInstance
| 28.163743
| 135
| 0.706811
|
8b80a8a516beaa5b7d7dde65eb8c098754473d58
| 1,442
|
py
|
Python
|
up/tasks/sparse/models/heads/cls_head.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 196
|
2021-10-30T05:15:36.000Z
|
2022-03-30T18:43:40.000Z
|
up/tasks/sparse/models/heads/cls_head.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 12
|
2021-10-30T11:33:28.000Z
|
2022-03-31T14:22:58.000Z
|
up/tasks/sparse/models/heads/cls_head.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 23
|
2021-11-01T07:26:17.000Z
|
2022-03-27T05:55:37.000Z
|
from up.utils.general.registry_factory import MODULE_ZOO_REGISTRY
from up.tasks.cls.models.heads import BaseClsHead, ConvNeXtHead
__all__ = ['SparseBaseClsHead', 'SparseConvNeXtHead']
| 36.974359
| 109
| 0.647018
|
8b8159fcb82d3a08050148abdcf3102b1846cbb7
| 4,753
|
py
|
Python
|
app/xl/long_runner.py
|
evgeniyabrosin/anfisa
|
ac4aef1a816de05ee2a45aa5b220e2baf93574de
|
[
"Apache-2.0"
] | 8
|
2019-03-26T16:07:46.000Z
|
2021-12-30T13:38:06.000Z
|
app/xl/long_runner.py
|
evgeniyabrosin/anfisa
|
ac4aef1a816de05ee2a45aa5b220e2baf93574de
|
[
"Apache-2.0"
] | 13
|
2018-11-07T19:37:20.000Z
|
2022-02-21T17:11:45.000Z
|
app/xl/long_runner.py
|
evgeniyabrosin/anfisa
|
ac4aef1a816de05ee2a45aa5b220e2baf93574de
|
[
"Apache-2.0"
] | 15
|
2018-10-16T08:15:11.000Z
|
2022-02-21T14:07:29.000Z
|
# Copyright (c) 2019. Partners HealthCare and other members of
# Forome Association
#
# Developed by Sergey Trifonov based on contributions by Joel Krier,
# Michael Bouzinier, Shamil Sunyaev and other members of Division of
# Genetics, Brigham and Women's Hospital
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from threading import Condition
from datetime import datetime
from forome_tools.job_pool import ExecutionTask
from forome_tools.log_err import logException
from app.config.a_config import AnfisaConfig
#===============================================
| 39.608333
| 77
| 0.564696
|
8b8201f75514c47ff34e925027bea925196f4d34
| 23,209
|
py
|
Python
|
cosmos_virtual_assistant_uf.py
|
Nishit014/COSMOS
|
3042377715f6f4b0eb0a75b6b360415a965754df
|
[
"MIT"
] | 1
|
2021-06-27T11:53:43.000Z
|
2021-06-27T11:53:43.000Z
|
cosmos_virtual_assistant_uf.py
|
Aayush9027/COSMOS_VIRTUAL_ASSISTANT
|
d02aa04a66b2acdfeaf9270607059182f54e78a5
|
[
"MIT"
] | null | null | null |
cosmos_virtual_assistant_uf.py
|
Aayush9027/COSMOS_VIRTUAL_ASSISTANT
|
d02aa04a66b2acdfeaf9270607059182f54e78a5
|
[
"MIT"
] | 1
|
2021-06-25T12:04:24.000Z
|
2021-06-25T12:04:24.000Z
|
import pyttsx3
import speech_recognition as sr
import os
import subprocess
#from requests import request , session
#from pprint import pprint as pp
import json
import requests
import datetime
from datetime import date
import time
import calendar
import warnings
import random
import wikipedia
import webbrowser
from pywhatkit import sendwhatmsg_instantly
import smtplib
import sys
import pyjokes
import pyautogui
import PyPDF2
from tkinter.filedialog import *
import psutil
import speedtest
import wolframalpha
warnings.filterwarnings("ignore") #ignoring all the warnings
if sys.platform == "win32":
engine=pyttsx3.init('sapi5')
voices=engine.getProperty('voices')
engine.setProperty('voice',voices[1].id)
else:
engine=pyttsx3.init('nsss') #sapi5 - SAPI5 on Windows #nsss - NSSpeechSynthesizer on Mac OS X #espeak - eSpeak on every other platform
voices=engine.getProperty('voices')
#for i in range(48):
#print(voices[i].id)
engine.setProperty('voice',voices[10].id)#10b 17 26 28 37 39
if __name__=="__main__":
TaskExecution()
| 38.553156
| 186
| 0.550605
|
8b820e62535a256f6892582e2d661efa4be1b944
| 1,748
|
py
|
Python
|
model.py
|
TilenHumar/Vislice
|
5970fb4d887a5689b906a7190fabb5405b25bbc7
|
[
"MIT"
] | null | null | null |
model.py
|
TilenHumar/Vislice
|
5970fb4d887a5689b906a7190fabb5405b25bbc7
|
[
"MIT"
] | 2
|
2021-04-19T15:51:18.000Z
|
2021-04-19T16:17:06.000Z
|
model.py
|
TilenHumar/Vislice
|
5970fb4d887a5689b906a7190fabb5405b25bbc7
|
[
"MIT"
] | null | null | null |
import random
# najprej konstante
STEVILO_DOVOLJENIH_NAPAK = 10
PRAVILNA_CRKA = "+"
PONOVLJENA_CRKA = "o"
NAPACNA_CRKA = "-"
ZMAGA = "W"
PORAZ = "X"
bazen_besed = []
with open("besede.txt", encoding ="utf8") as input_file:
bazen_besed = input_file.readlines()
| 23
| 69
| 0.582952
|