hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
efd461c9230c324e2c8e6e92be4631dc26caa578
| 768
|
py
|
Python
|
DailyProgrammer/20120316A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | 2
|
2020-12-23T18:59:22.000Z
|
2021-04-14T13:16:09.000Z
|
DailyProgrammer/20120316A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
DailyProgrammer/20120316A.py
|
DayGitH/Python-Challenges
|
bc32f1332a92fcc2dfa6f5ea4d95f8a8d64c3edf
|
[
"MIT"
] | null | null | null |
"""
you have a string "ddaaiillyypprrooggrraammeerr". We want to remove all the consecutive duplicates and put them in a
separate string, which yields two separate instances of the string "dailyprogramer".
use this list for testing:
input: "balloons"
expected output: "balons" "lo"
input: "ddaaiillyypprrooggrraammeerr"
expected output: "dailyprogramer" "dailyprogramer"
input: "aabbccddeded"
expected output: "abcdeded" "abcd"
input: "flabby aapples"
expected output: "flaby aples" "bap"
"""
inp = "ddaaiillyypprrooggrraammeerr"
org = ""
extra = ""
hold = ""
for a in range(len(inp)):
if hold == inp[a]:
extra += inp[a]
else:
org += inp[a]
hold = inp[a]
print("original:\t", inp)
print("first:\t\t", org)
print("repeats:\t", extra)
| 25.6
| 116
| 0.69401
|
efd60ec0f5dfed774930cf3e30f7572bed405c2b
| 6,485
|
py
|
Python
|
src/preppipe/enginesupport/enginesupport.py
|
PrepPipe/preppipe-python
|
6fc547a539737ec37a7528eb97ce92e56d4f404a
|
[
"Apache-2.0"
] | 1
|
2022-02-28T03:34:57.000Z
|
2022-02-28T03:34:57.000Z
|
src/preppipe/enginesupport/enginesupport.py
|
PrepPipe/preppipe-python
|
6fc547a539737ec37a7528eb97ce92e56d4f404a
|
[
"Apache-2.0"
] | null | null | null |
src/preppipe/enginesupport/enginesupport.py
|
PrepPipe/preppipe-python
|
6fc547a539737ec37a7528eb97ce92e56d4f404a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import typing
import PIL.Image
from enum import Enum
import re
import preppipe.commontypes
from preppipe.vnmodel import *
# we define an MIR infrastructure for backend... Engine Model (EM)
# helper functions
def _get_label_name(name : str, type_prefix : str, scope_prefix: str, name_dict : typing.Dict[str, typing.Any], prefix : str = "") -> str:
# get the base name
base_label = re.sub(r'[^a-zA-Z0-9_]', '', name.replace(" ", "_"))
# ensure the name does not start with number or underscore, or is not empty
if len(base_label) > 0:
frontchar = base_label[0]
if frontchar == '_' or frontchar.isnumeric():
base_label = type_prefix + "_" + base_label
else:
# we have no alphanumetic characters
base_label = type_prefix + "_anon"
# make sure it is unique
# we may have duplicates
# try to add scope prefix to resolve this
if prefix + base_label in name_dict and len(scope_prefix) > 0:
base_label = scope_prefix + "_" + base_label
# now add the prefix; we no longer add prefix to base label
if len(prefix) > 0:
base_label = prefix + base_label
# if not working, add a numeric suffix
numeric_suffix = 0
result = base_label
while result in name_dict:
numeric_suffix += 1
result = base_label + '_' + str(numeric_suffix)
# done
return result
def label_branch_targets(model : VNModel, reserved_set : typing.Set[str] = [], include_basicblock : bool = True) -> typing.Dict[VNValue, str]:
"""Assign all functions (and optionally basic blocks) with a label that is:
1. alphanumeric, non-empty
2. does not start with underscore '_'
3. unique across all functions and basic blocks
We may need this labeling even when functions already has no duplicated label so avoid sanitization issue or reserved keywords
"""
name_dict = {} # label -> element (used internally)
elem_dict = {} # element -> label (for returning)
# add all reserved keywords to name_dict
for reserved in reserved_set:
assert isinstance(reserved, str)
name_dict[reserved] = None
# actual work
for func in model.get_function_list():
func_label = _get_label_name(func.get_name(), "control_label", "", name_dict)
name_dict[func_label] = func
elem_dict[func] = func_label
if include_basicblock:
for bb in func.get_basicblock_list():
bbname = bb.get_name()
if len(bbname) == 0 and bb is func.get_entry_block():
bbname = "entry"
bb_label = _get_label_name(bbname, "control_label", func_label, name_dict)
name_dict[bb_label] = bb
elem_dict[bb] = bb_label
return elem_dict
def label_basicblocks(func : VNFunction, reserved_set : typing.Set[str] = []) -> typing.Dict[VNBasicBlock, str]:
"""Assign labels to basic blocks with the same criteria as label_branch_targets:
1. alphanumeric, non-empty
2. does not start with underscore '_'
3. unique
"""
name_dict = {} # label -> element (used internally)
elem_dict = {} # element -> label (for returning)
# add all reserved keywords to name_dict
for reserved in reserved_set:
assert isinstance(reserved, str)
name_dict[reserved]= None
for bb in func.get_basicblock_list():
bbname = bb.get_name()
if len(bbname) == 0 and bb is func.get_entry_block():
bbname = "entry"
bb_label = _get_label_name(bbname, "label", "", name_dict, ".")
name_dict[bb_label] = bb
elem_dict[bb] = bb_label
return elem_dict
def label_sayer_identity(model : VNModel, reserved_set : typing.Set[str] = []) -> typing.Dict[str, str]:
"""make sure all characters and sayers have (alphanumeric) labels"""
name_dict = {}
elem_dict = {}
for reserved in reserved_set:
assert isinstance(reserved, str)
name_dict[reserved] = None
for character in model.get_character_list():
name = _get_label_name(character.get_name(), "character", "", name_dict)
name_dict[name] = character
elem_dict[character] = name
for sayer in model.get_sayer_list():
character = sayer.get_identity()
character_label = elem_dict[character]
name = _get_label_name(character_label + sayer.get_name(), "sayer", "", name_dict)
name_dict[name] = sayer
elem_dict[sayer] = name
return elem_dict
| 32.918782
| 143
| 0.665998
|
efd85393ed4e8b07da224123311c11a7291f7173
| 190
|
py
|
Python
|
openprocurement/tender/limited/__init__.py
|
Leits/openprocurement.tender.limited
|
c216e5b96dc850036d94fdf21883845afee34252
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/tender/limited/__init__.py
|
Leits/openprocurement.tender.limited
|
c216e5b96dc850036d94fdf21883845afee34252
|
[
"Apache-2.0"
] | 2
|
2021-03-26T00:30:52.000Z
|
2022-03-21T22:22:09.000Z
|
openprocurement/tender/limited/__init__.py
|
Leits/openprocurement.tender.limited
|
c216e5b96dc850036d94fdf21883845afee34252
|
[
"Apache-2.0"
] | null | null | null |
from openprocurement.tender.limited.models import Tender
| 27.142857
| 56
| 0.815789
|
efd8cec6101a750931dee27419124950274496b7
| 3,422
|
py
|
Python
|
upload.py
|
woodlords/nftmaker-pro-scripts
|
86e1eef0d297bf9589d56272b1edea9bb3e18612
|
[
"Apache-2.0"
] | 2
|
2022-02-09T17:48:33.000Z
|
2022-02-12T08:18:42.000Z
|
upload.py
|
woodlords/nftmaker-pro-scripts
|
86e1eef0d297bf9589d56272b1edea9bb3e18612
|
[
"Apache-2.0"
] | null | null | null |
upload.py
|
woodlords/nftmaker-pro-scripts
|
86e1eef0d297bf9589d56272b1edea9bb3e18612
|
[
"Apache-2.0"
] | null | null | null |
from pprint import pprint
import requests
import base64
import json
import argparse
import sys
p = argparse.ArgumentParser(description="New")
p.add_argument('-f','--folder-name', required=True, help='Folder name of the images/metadata files')
p.add_argument('-s','--start', required=False, help='Start ID to upload')
p.add_argument('-e','--end', required=False, help='End number for IDs to upload')
p.add_argument('--ids', nargs="+", required=False, help='List of local IDs to upload')
if len(sys.argv)==1:
p.print_help(sys.stderr)
sys.exit(1)
args = p.parse_args()
# Some variables you will need
api_key = "api_key_from_nftmakerpro"
nft_project_id = "12345"
upload_url = f'https://api.nft-maker.io/UploadNft/{api_key}/{nft_project_id}'
prefixName="WoodCastleProject"
prefixDispalyName="Wood Castle: Wood Lords S1 " # Leave a space at the end as we will add the #number of token at the end.
projectDescription="Wood Castle Studios Presents Woods Lords: Season One"
# Lord details
folder_name = args.folder_name
ids_list = args.ids
# See example Metadata file to use for adding metadata
main()
| 30.553571
| 136
| 0.648159
|
efdbc298676774176ab064bb9bca24e8e6416478
| 1,613
|
py
|
Python
|
runner/runner.py
|
lorne-luo/quicksilver
|
79f2b66de9ab7aa63a35f56cac800b64c202f70c
|
[
"MIT"
] | null | null | null |
runner/runner.py
|
lorne-luo/quicksilver
|
79f2b66de9ab7aa63a35f56cac800b64c202f70c
|
[
"MIT"
] | null | null | null |
runner/runner.py
|
lorne-luo/quicksilver
|
79f2b66de9ab7aa63a35f56cac800b64c202f70c
|
[
"MIT"
] | null | null | null |
import logging
from queue import Queue
from redis_queue.queue import RedisQueue
from runner.base import BaseRunner
logger = logging.getLogger(__name__)
if __name__ == '__main__':
# python -m event.runner
from handler import *
r = TestRedisRunner('test_runner', [], [],
[HeartBeatHandler(), TimeFramePublisher(timezone=0)])
r.run()
| 25.603175
| 97
| 0.650341
|
efdeda6cab101f5fe55ec27079a0f853fcc20c7e
| 6,790
|
py
|
Python
|
ultron/sentry/Analysis/TechnicalAnalysis/__init__.py
|
wangjiehui11235/ultron
|
ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7
|
[
"Apache-2.0"
] | 4
|
2019-06-06T09:38:49.000Z
|
2022-01-29T00:02:11.000Z
|
ultron/sentry/Analysis/TechnicalAnalysis/__init__.py
|
wangjiehui11235/ultron
|
ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7
|
[
"Apache-2.0"
] | 1
|
2022-02-11T03:43:10.000Z
|
2022-02-11T03:43:10.000Z
|
ultron/sentry/Analysis/TechnicalAnalysis/__init__.py
|
wangjiehui11235/ultron
|
ade46fdcff7eaf01187cdf9b9fb1d6a04ae972b7
|
[
"Apache-2.0"
] | 8
|
2019-06-02T13:11:00.000Z
|
2021-11-11T01:06:22.000Z
|
# -*- coding: utf-8 -*-
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecuritySignValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityAverageValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityXAverageValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityMACDValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityExpValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityLogValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecuritySqrtValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityPowValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityAbsValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityAcosValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityAcoshValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityAsinValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityAsinhValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityNormInvValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityCeilValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityFloorValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityRoundValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityDiffValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityRoundValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecuritySigmoidValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityTanhValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecuritySimpleReturnValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityLogReturnValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityMaximumValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatelessTechnicalAnalysers import SecurityMinimumValueHolder
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingAverage
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingDecay
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingMax
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingArgMax
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingMin
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingArgMin
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingRank
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingQuantile
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingAllTrue
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingAnyTrue
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingSum
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingVariance
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingStandardDeviation
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingCountedPositive
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingPositiveAverage
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingCountedNegative
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingNegativeAverage
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingPositiveDifferenceAverage
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingNegativeDifferenceAverage
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingRSI
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingLogReturn
from ultron.sentry.Analysis.TechnicalAnalysis.StatefulTechnicalAnalysers import SecurityMovingCorrelation
__all__ = ['SecuritySignValueHolder',
'SecurityAverageValueHolder',
'SecurityXAverageValueHolder',
'SecurityMACDValueHolder',
'SecurityExpValueHolder',
'SecurityLogValueHolder',
'SecuritySqrtValueHolder',
'SecurityPowValueHolder',
'SecurityAbsValueHolder',
'SecurityAcosValueHolder',
'SecurityAcoshValueHolder',
'SecurityAsinValueHolder',
'SecurityAsinhValueHolder',
'SecurityNormInvValueHolder',
'SecurityCeilValueHolder',
'SecurityFloorValueHolder',
'SecurityRoundValueHolder',
'SecurityDiffValueHolder',
'SecurityTanhValueHolder',
'SecuritySigmoidValueHolder',
'SecuritySimpleReturnValueHolder',
'SecurityLogReturnValueHolder',
'SecurityMaximumValueHolder',
'SecurityMinimumValueHolder',
'SecurityMovingAverage',
'SecurityMovingDecay',
'SecurityMovingMax',
'SecurityMovingArgMax',
'SecurityMovingMin',
'SecurityMovingArgMin',
'SecurityMovingRank',
'SecurityMovingQuantile',
'SecurityMovingAllTrue',
'SecurityMovingAnyTrue',
'SecurityMovingSum',
'SecurityMovingVariance',
'SecurityMovingStandardDeviation',
'SecurityMovingCountedPositive',
'SecurityMovingPositiveAverage',
'SecurityMovingCountedNegative',
'SecurityMovingNegativeAverage',
'SecurityMovingPositiveDifferenceAverage',
'SecurityMovingNegativeDifferenceAverage',
'SecurityMovingRSI',
'SecurityMovingLogReturn',
'SecurityMovingCorrelation']
| 70
| 119
| 0.841679
|
efdf05259aeb476a54f281ec506c8577fe42f662
| 17,015
|
py
|
Python
|
app/common/helper.py
|
lguobin/KB_API
|
f7180cf430cb8de2eac8fa78e3937666da950c7a
|
[
"Apache-2.0"
] | null | null | null |
app/common/helper.py
|
lguobin/KB_API
|
f7180cf430cb8de2eac8fa78e3937666da950c7a
|
[
"Apache-2.0"
] | null | null | null |
app/common/helper.py
|
lguobin/KB_API
|
f7180cf430cb8de2eac8fa78e3937666da950c7a
|
[
"Apache-2.0"
] | null | null | null |
# from app.common.utils import *
from sqlalchemy import desc
from settings import Config
from app.models import *
from app.extensions import db
from app.models.base import _BaseModel
from app.common.message import DBError
#
# ------------------------------
# ------------------------------
# ------------------------------
from sqlalchemy.exc import IntegrityError
| 33.759921
| 132
| 0.53253
|
efe02360bc1283274b4bc2434f2af992e192e9a4
| 7,403
|
py
|
Python
|
package/tests/test_cp/test_azure/test_domain/test_services/test_vm_credentials_service.py
|
tim-spiglanin/Azure-Shell
|
58c52994f0d6cfd798c5dca33737419ec18363d4
|
[
"Apache-2.0"
] | 5
|
2016-09-08T08:33:47.000Z
|
2020-02-10T12:31:15.000Z
|
package/tests/test_cp/test_azure/test_domain/test_services/test_vm_credentials_service.py
|
tim-spiglanin/Azure-Shell
|
58c52994f0d6cfd798c5dca33737419ec18363d4
|
[
"Apache-2.0"
] | 505
|
2016-08-09T07:41:03.000Z
|
2021-02-08T20:26:46.000Z
|
package/tests/test_cp/test_azure/test_domain/test_services/test_vm_credentials_service.py
|
tim-spiglanin/Azure-Shell
|
58c52994f0d6cfd798c5dca33737419ec18363d4
|
[
"Apache-2.0"
] | 5
|
2016-12-21T12:52:55.000Z
|
2021-07-08T09:50:42.000Z
|
from unittest import TestCase
import mock
from cloudshell.cp.azure.domain.services.vm_credentials_service import VMCredentialsService
from cloudshell.cp.azure.models.vm_credentials import VMCredentials
def test_prepare_windows_credentials_without_user_and_password(self):
"""Check that method will return default username and generate password if credentials weren't provided"""
generated_pass = mock.MagicMock()
self.vm_credentials._generate_password = mock.MagicMock(return_value=generated_pass)
username, password = self.vm_credentials._prepare_windows_credentials("", "")
self.assertEqual(username, self.vm_credentials.DEFAULT_WINDOWS_USERNAME)
self.assertEqual(password, generated_pass)
def test_prepare_linux_credentials(self):
"""Check that method will return same credentials if username and password were provided"""
username, password, ssh_key = self.vm_credentials._prepare_linux_credentials(
username=self.test_username,
password=self.test_password,
storage_service=self.test_storage_service,
key_pair_service=self.test_key_pair_service,
storage_client=self.test_storage_client,
group_name=self.test_group_name,
storage_name=self.test_storage_name)
self.assertEqual(username, self.test_username)
self.assertEqual(password, self.test_password)
self.assertIsNone(ssh_key)
def test_prepare_linux_credentials_without_user_and_password(self):
"""Check that method will return default username and ssh_key if credentials weren't provided"""
returned_ssh_key = mock.MagicMock()
self.vm_credentials._get_ssh_key = mock.MagicMock(return_value=returned_ssh_key)
username, password, ssh_key = self.vm_credentials._prepare_linux_credentials(
username="",
password="",
storage_service=self.test_storage_service,
key_pair_service=self.test_key_pair_service,
storage_client=self.test_storage_client,
group_name=self.test_group_name,
storage_name=self.test_storage_name)
self.assertEqual(username, self.vm_credentials.DEFAULT_LINUX_USERNAME)
self.assertEqual(password, "")
self.assertEqual(ssh_key, returned_ssh_key)
| 51.409722
| 120
| 0.685533
|
efe17a7b9267d6d10ac42dd61070b721d1c277ec
| 751
|
py
|
Python
|
src/handler/quit.py
|
junhg0211/Kreylin
|
aae5e1e5ba5cfaadfab6708cb0bf26a75c6dcb7a
|
[
"Apache-2.0"
] | 1
|
2019-09-11T12:02:53.000Z
|
2019-09-11T12:02:53.000Z
|
src/handler/quit.py
|
junhg0211/Kreylin
|
aae5e1e5ba5cfaadfab6708cb0bf26a75c6dcb7a
|
[
"Apache-2.0"
] | 8
|
2019-09-11T12:06:54.000Z
|
2020-02-09T04:42:13.000Z
|
src/handler/quit.py
|
junhg0211/Kreylin
|
aae5e1e5ba5cfaadfab6708cb0bf26a75c6dcb7a
|
[
"Apache-2.0"
] | 1
|
2021-05-24T12:43:07.000Z
|
2021-05-24T12:43:07.000Z
|
from sys import platform
import pygame
from handler.handler import Handler
| 34.136364
| 120
| 0.663116
|
efe1e27548d4a791c0325857f9e7735c777989c1
| 2,635
|
py
|
Python
|
decisive/__init__.py
|
decisive/api-demo-python
|
58cd14e9e1f6373a3cd927536fd29f5f286940a0
|
[
"MIT"
] | null | null | null |
decisive/__init__.py
|
decisive/api-demo-python
|
58cd14e9e1f6373a3cd927536fd29f5f286940a0
|
[
"MIT"
] | null | null | null |
decisive/__init__.py
|
decisive/api-demo-python
|
58cd14e9e1f6373a3cd927536fd29f5f286940a0
|
[
"MIT"
] | null | null | null |
import requests
import requests.exceptions
import datetime
import ujson as json
import logging
| 38.188406
| 88
| 0.603036
|
efe41b6dc8f659359b1e12cb86ef509b2e8e51a8
| 38,284
|
py
|
Python
|
app/main/views/service_settings.py
|
karlchillmaid/notifications-admin
|
9ef6da4ef9e2fa97b7debb4b573cb035a5cb8880
|
[
"MIT"
] | null | null | null |
app/main/views/service_settings.py
|
karlchillmaid/notifications-admin
|
9ef6da4ef9e2fa97b7debb4b573cb035a5cb8880
|
[
"MIT"
] | null | null | null |
app/main/views/service_settings.py
|
karlchillmaid/notifications-admin
|
9ef6da4ef9e2fa97b7debb4b573cb035a5cb8880
|
[
"MIT"
] | null | null | null |
from flask import (
abort,
current_app,
flash,
redirect,
render_template,
request,
session,
url_for,
)
from flask_login import current_user, login_required
from notifications_python_client.errors import HTTPError
from notifications_utils.field import Field
from notifications_utils.formatters import formatted_list
from app import (
billing_api_client,
current_service,
email_branding_client,
inbound_number_client,
organisations_client,
service_api_client,
user_api_client,
zendesk_client,
)
from app.main import main
from app.main.forms import (
BrandingOptionsEmail,
ConfirmPasswordForm,
FreeSMSAllowance,
InternationalSMSForm,
LetterBranding,
LinkOrganisationsForm,
OrganisationTypeForm,
RenameServiceForm,
RequestToGoLiveForm,
ServiceBasicViewForm,
ServiceContactLinkForm,
ServiceEditInboundNumberForm,
ServiceInboundNumberForm,
ServiceLetterContactBlockForm,
ServiceReplyToEmailForm,
ServiceSetBranding,
ServiceSmsSenderForm,
ServiceSwitchLettersForm,
SMSPrefixForm,
branding_options_dict,
)
from app.utils import (
AgreementInfo,
email_safe,
get_cdn_domain,
user_has_permissions,
user_is_platform_admin,
)
def switch_service_permissions(service_id, permission, sms_sender=None):
force_service_permission(
service_id,
permission,
on=permission not in current_service['permissions'],
sms_sender=sms_sender
)
def force_service_permission(service_id, permission, on=False, sms_sender=None):
permissions, permission = set(current_service['permissions']), {permission}
update_service_permissions(
service_id,
permissions | permission if on else permissions - permission,
sms_sender=sms_sender
)
def update_service_permissions(service_id, permissions, sms_sender=None):
current_service['permissions'] = list(permissions)
data = {'permissions': current_service['permissions']}
if sms_sender:
data['sms_sender'] = sms_sender
service_api_client.update_service_with_properties(service_id, data)
def get_branding_as_value_and_label(email_branding):
return [
(branding['id'], branding['name'])
for branding in email_branding
]
| 37.132881
| 119
| 0.709252
|
efe457cbb3f9ed9d770c24aeb1ca7014a5e1296d
| 3,094
|
py
|
Python
|
doctools/spelling.py
|
Sketch98/oil
|
2d5c51432b9699e48178236da2e5b3bf1a33d79f
|
[
"Apache-2.0"
] | null | null | null |
doctools/spelling.py
|
Sketch98/oil
|
2d5c51432b9699e48178236da2e5b3bf1a33d79f
|
[
"Apache-2.0"
] | null | null | null |
doctools/spelling.py
|
Sketch98/oil
|
2d5c51432b9699e48178236da2e5b3bf1a33d79f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python2
"""
spelling.py
Filter the output of 'lynx -dump' into a list of words to spell check.
"""
from __future__ import print_function
from collections import Counter
import optparse
import re
import sys
def Options():
"""Returns an option parser instance."""
p = optparse.OptionParser()
p.add_option(
'--known-words', dest='known_words',
help='List of words like /usr/share/dict/words')
p.add_option(
'--more-than-bash', dest='more_than_bash', type=int, default=0,
help='Expected number of cases where OSH starts more processes than bash')
return p
def main(argv):
o = Options()
opts, argv = o.parse_args(argv[1:])
action = argv[0]
if action == 'word-split':
contents = sys.stdin.read()
for w in SplitWords(contents):
print(w)
elif action == 'check':
word_files = argv[1:]
d = Counter()
for path in word_files:
with open(path) as f:
for word in WordList(f):
d[word] += 1
print('')
print('Most common words')
print('')
for word, count in d.most_common()[:20]:
print('%10d %s' % (count, word))
print('')
print('Least common words')
print('')
for word, count in d.most_common()[-20:]:
print('%10d %s' % (count, word))
log('%d word files', len(word_files))
log('%d unique words', len(d))
known_words = {}
with open(opts.known_words) as f:
for w in WordList(f):
known_words[w] = True
print('')
print('Potential Misspellings')
print('')
for path in word_files:
print()
print('\t%s' % path)
print()
with open(path) as f:
unknown = {}
for w in WordList(f):
#if d.get(word) == 1:
# print(word)
if w not in known_words:
unknown[w] = True
if unknown:
for u in sorted(unknown):
# only occurs once
if d.get(u) == 1:
print(u)
log('\t%d unknown words in %s', len(unknown), path)
# Checking algorithms:
#
# - Does it appear in the dictionary? Problem: most computer terms
# - Does it appear only once or twice in the whole corpus?
# - Is the edit distance very close to a dictinoary word?
# - e.g. subsitutions is a typo
else:
raise RuntimeError('Invalid action %r' % action)
if __name__ == '__main__':
try:
main(sys.argv)
except RuntimeError as e:
print('FATAL: %s' % e, file=sys.stderr)
sys.exit(1)
| 21.636364
| 80
| 0.591791
|
efe4b76066b7fc615a3d5cb419d39e72b57d7593
| 20,659
|
py
|
Python
|
train_deep_ls.py
|
Kamysek/DeepLocalShapes
|
24ee92889381d40acbb5ad1c7c8abb512a8c26b5
|
[
"MIT"
] | 4
|
2021-09-23T11:36:30.000Z
|
2022-02-23T20:10:46.000Z
|
train_deep_ls.py
|
Kamysek/DeepLocalShapes
|
24ee92889381d40acbb5ad1c7c8abb512a8c26b5
|
[
"MIT"
] | null | null | null |
train_deep_ls.py
|
Kamysek/DeepLocalShapes
|
24ee92889381d40acbb5ad1c7c8abb512a8c26b5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Based on: https://github.com/facebookresearch/DeepSDF using MIT LICENSE (https://github.com/facebookresearch/DeepSDF/blob/master/LICENSE)
# Copyright 2021-present Philipp Friedrich, Josef Kamysek. All Rights Reserved.
import functools
import json
import logging
import math
import os
import signal
import sys
import time
import warnings
import deep_ls
import deep_ls.workspace as ws
import torch
import torch.multiprocessing as mp
import torch.utils.data as data_utils
from scipy.spatial import cKDTree
import numpy as np
if not sys.warnoptions:
warnings.simplefilter("ignore")
def get_learning_rate_schedules(specs):
schedule_specs = specs["LearningRateSchedule"]
schedules = []
for schedule_specs in schedule_specs:
if schedule_specs["Type"] == "Step":
schedules.append(
StepLearningRateSchedule(
schedule_specs["Initial"],
schedule_specs["Interval"],
schedule_specs["Factor"],
)
)
elif schedule_specs["Type"] == "Warmup":
schedules.append(
WarmupLearningRateSchedule(
schedule_specs["Initial"],
schedule_specs["Final"],
schedule_specs["Length"],
)
)
elif schedule_specs["Type"] == "Constant":
schedules.append(ConstantLearningRateSchedule(schedule_specs["Value"]))
else:
raise Exception(
'no known learning rate schedule of type "{}"'.format(
schedule_specs["Type"]
)
)
return schedules
def save_model(experiment_directory, filename, decoder, epoch):
model_params_dir = ws.get_model_params_dir(experiment_directory, True)
torch.save(
{"epoch": epoch, "model_state_dict": decoder.state_dict()},
os.path.join(model_params_dir, filename),
)
def save_optimizer(experiment_directory, filename, optimizer, epoch):
optimizer_params_dir = ws.get_optimizer_params_dir(experiment_directory, True)
torch.save(
{"epoch": epoch, "optimizer_state_dict": optimizer.state_dict()},
os.path.join(optimizer_params_dir, filename),
)
def load_optimizer(experiment_directory, filename, optimizer):
full_filename = os.path.join(
ws.get_optimizer_params_dir(experiment_directory), filename
)
if not os.path.isfile(full_filename):
raise Exception(
'optimizer state dict "{}" does not exist'.format(full_filename)
)
data = torch.load(full_filename)
optimizer.load_state_dict(data["optimizer_state_dict"])
return data["epoch"]
def save_latent_vectors(experiment_directory, filename, latent_vec, epoch):
latent_codes_dir = ws.get_latent_codes_dir(experiment_directory, True)
all_latents = latent_vec.state_dict()
torch.save(
{"epoch": epoch, "latent_codes": all_latents},
os.path.join(latent_codes_dir, filename),
)
# TODO: duplicated in workspace
if __name__ == "__main__":
import argparse
arg_parser = argparse.ArgumentParser(description="Train a DeepLS autodecoder")
arg_parser.add_argument(
"--experiment",
"-e",
dest="experiment_directory",
required=True,
help="The experiment directory. This directory should include "
+ "experiment specifications in 'specs.json', and logging will be "
+ "done in this directory as well.",
)
arg_parser.add_argument(
"--continue",
"-c",
dest="continue_from",
help="A snapshot to continue from. This can be 'latest' to continue"
+ "from the latest running snapshot, or an integer corresponding to "
+ "an epochal snapshot.",
)
arg_parser.add_argument(
"--batch_split",
dest="batch_split",
default=1,
help="This splits the batch into separate subbatches which are "
+ "processed separately, with gradients accumulated across all "
+ "subbatches. This allows for training with large effective batch "
+ "sizes in memory constrained environments.",
)
deep_ls.add_common_args(arg_parser)
args = arg_parser.parse_args()
deep_ls.configure_logging(args)
main_function(args.experiment_directory, args.continue_from, int(args.batch_split))
| 32.330203
| 182
| 0.628588
|
efe7d81ac7833b8ba25967361da1b664addd861c
| 498
|
py
|
Python
|
setup.py
|
nicosandller/python-ethereumrpc
|
e826f99bbb34dc3d8009ac9392677e9ae2c9fa36
|
[
"MIT"
] | 1
|
2019-03-28T19:16:21.000Z
|
2019-03-28T19:16:21.000Z
|
setup.py
|
nicosandller/python-ethereumrpc
|
e826f99bbb34dc3d8009ac9392677e9ae2c9fa36
|
[
"MIT"
] | null | null | null |
setup.py
|
nicosandller/python-ethereumrpc
|
e826f99bbb34dc3d8009ac9392677e9ae2c9fa36
|
[
"MIT"
] | null | null | null |
from distutils.core import setup
setup(
name = 'python-ethereumrpc',
packages = ['python-ethereumrpc'],
version = '0.1',
description = 'A python interface for ethereum JSON-RPC service.',
author = 'Nicolas Sandller',
author_email = 'nicosandller@gmail.com',
url = 'https://github.com/nicosandller/python-ethereumrpc',
download_url = 'https://github.com/nicosandller/python-ethereumrpc/tarball/0.1',
keywords = ['ethereum', 'rpc', 'api', 'JSON', 'JSON-RPC'],
classifiers = [],
)
| 35.571429
| 82
| 0.696787
|
efe8537711357e13e0aa907bd882c404ad86cc4e
| 988
|
py
|
Python
|
interface.py
|
robotafm/motor
|
1c0838db12514304b930aec976d7adcbc51b7c92
|
[
"MIT"
] | null | null | null |
interface.py
|
robotafm/motor
|
1c0838db12514304b930aec976d7adcbc51b7c92
|
[
"MIT"
] | null | null | null |
interface.py
|
robotafm/motor
|
1c0838db12514304b930aec976d7adcbc51b7c92
|
[
"MIT"
] | null | null | null |
# /robotafm/motor/interface.py
# Main web interface, contains basic
# information display
# imports:
import xml.dom.minidom
from flask import Flask, render_template
# constants:
LANG = "./lang/rus.xml"
# XML: load text strings from language file
dom = xml.dom.minidom.parse(LANG)
main_title = dom.getElementsByTagName("main_title")[0].childNodes[0].nodeValue
language = dom.getElementsByTagName("language")[0].childNodes[0].nodeValue
greeting = dom.getElementsByTagName("greeting")[0].childNodes[0].nodeValue
invitation = dom.getElementsByTagName("invitation")[0].childNodes[0].nodeValue
main_page_text = dom.getElementsByTagName("main_page_text")[0].childNodes[0].nodeValue
# Flask init:
app = Flask(__name__)
# Main site page:
| 29.058824
| 87
| 0.709514
|
efe95ae7664ab58458aa225b5cb6251325f40d6d
| 317
|
py
|
Python
|
src/eval_codalab_offline.py
|
bjj9/EVE_SCPT
|
c91b13f8bbfe8ea29a0e9f1df0dc016a258c904f
|
[
"MIT"
] | 21
|
2021-06-20T02:35:08.000Z
|
2022-03-16T06:57:03.000Z
|
src/eval_codalab_offline.py
|
bjj9/EVE_SCPT
|
c91b13f8bbfe8ea29a0e9f1df0dc016a258c904f
|
[
"MIT"
] | 2
|
2021-06-20T15:43:28.000Z
|
2021-08-02T08:37:02.000Z
|
src/eval_codalab_offline.py
|
bjj9/EVE_SCPT
|
c91b13f8bbfe8ea29a0e9f1df0dc016a258c904f
|
[
"MIT"
] | 1
|
2021-07-19T11:07:13.000Z
|
2021-07-19T11:07:13.000Z
|
from eval_codalab_basic import eval_codalab_basic
if __name__ == '__main__':
# 1. run first round to prepare full memory
eval_codalab_basic(output_suffix='online', skip_first_round_if_memory_is_ready=True)
# 2. do offline evaluation when memory is ready
eval_codalab_basic(output_suffix='offline')
| 31.7
| 88
| 0.782334
|
efed594b93f7036fd9e0fbb23d74fff628cd47d4
| 922
|
py
|
Python
|
CountingValleys/ValleyCounter.py
|
monemonesi/TDD_Katas_Python
|
f21a4f3516b75d7618dcd044453e25be015b4251
|
[
"MIT"
] | null | null | null |
CountingValleys/ValleyCounter.py
|
monemonesi/TDD_Katas_Python
|
f21a4f3516b75d7618dcd044453e25be015b4251
|
[
"MIT"
] | null | null | null |
CountingValleys/ValleyCounter.py
|
monemonesi/TDD_Katas_Python
|
f21a4f3516b75d7618dcd044453e25be015b4251
|
[
"MIT"
] | null | null | null |
UP = "U"
DOWN = "D"
ALLOWED_PATH_I = [UP, DOWN]
def update_high_for_step(high: int, step: str) -> int:
"""Update the current high given a step"""
if step == UP:
high += 1
elif step == DOWN:
high -= 1
return high
def count_valley(steps: int, path: str) -> int:
"""Function which returns the number of valley encountered in a given path"""
if len(path) != steps:
raise Exception("Steps should match length of path")
valleys = 0
high = 0
previous_high = 0
for i in range(steps):
previous_high = high
high = update_high_for_step(high, path[i])
valleys = update_valley_count(valleys, high, previous_high)
return valleys
| 27.117647
| 83
| 0.611714
|
efee0f491f5feefbc9f83692582c209722451e90
| 84
|
py
|
Python
|
examples/coordinates3.py
|
r0the/gpanel
|
34cb31ef5abf08b139330fce6b301d920b22cea4
|
[
"MIT"
] | 1
|
2021-03-22T06:31:38.000Z
|
2021-03-22T06:31:38.000Z
|
examples/coordinates3.py
|
r0the/gpanel
|
34cb31ef5abf08b139330fce6b301d920b22cea4
|
[
"MIT"
] | 8
|
2021-03-10T09:50:04.000Z
|
2021-03-22T06:33:18.000Z
|
examples/coordinates3.py
|
r0the/gpanel
|
34cb31ef5abf08b139330fce6b301d920b22cea4
|
[
"MIT"
] | null | null | null |
from gpanel import *
coordinates(-3, -3, 11, 11)
line(0, 0, 8, 8)
line(8, 0, 0, 8)
| 14
| 27
| 0.583333
|
efee470e855ae2a217e0a35720dd990d8a0f3c8b
| 333
|
py
|
Python
|
Ex044.py
|
JeanPauloGarcia/Python-Exercicios
|
faff4670806c423680ee00a88d3c4c49b437e72e
|
[
"MIT"
] | null | null | null |
Ex044.py
|
JeanPauloGarcia/Python-Exercicios
|
faff4670806c423680ee00a88d3c4c49b437e72e
|
[
"MIT"
] | null | null | null |
Ex044.py
|
JeanPauloGarcia/Python-Exercicios
|
faff4670806c423680ee00a88d3c4c49b437e72e
|
[
"MIT"
] | null | null | null |
preo = float(input('Preo: '))
print('''Preencha a forma de pagamento com:
1 - p/ VISTA
2 - p/ CARTO 1x
3 - p/ CARTO 2x
4 - p/ CARTO 3x ou mais
''')
pagto = str(input('Pagamento: ')).strip()
if pagto == '1':
preo = preo*0.9
elif pagto == '2':
preo = preo*0.95
elif pagto == '4':
preo = preo*1.2
print(preo)
| 19.588235
| 43
| 0.597598
|
eff27556e4f9b47dbc9ed41d42898d35ce432f5c
| 1,264
|
py
|
Python
|
scorebee/main.py
|
mikeboers/ScoreBee
|
e8c3476b6401808a61b495b9c42e8cbe752906b4
|
[
"BSD-3-Clause"
] | null | null | null |
scorebee/main.py
|
mikeboers/ScoreBee
|
e8c3476b6401808a61b495b9c42e8cbe752906b4
|
[
"BSD-3-Clause"
] | null | null | null |
scorebee/main.py
|
mikeboers/ScoreBee
|
e8c3476b6401808a61b495b9c42e8cbe752906b4
|
[
"BSD-3-Clause"
] | null | null | null |
import logging
import sys
from .application import Application
from .document import Document, Track, Event
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
app = Application(sys.argv)
if '--debug' in sys.argv:
# # Load a document.
# # We absolutely MUST have the document constructed fully BEFORE
# # setting it here. There are side effects to setting it.
# # HACK: This is just a hack for now.
# # doc = Document()
doc = Document('/Users/mikeboers/Desktop/example.MOV')
# doc = Document('/Users/mikeboers/Desktop/C00000S00A20091231112932302.avi')
doc.add_track(Track(
name='A behaviour',
key='q',
group='top two',
# events=[
# Event(10, 15), Event(50, 65), Event(500, 600)
# ]
))
doc.add_track(Track(
name='Nothin here',
key='w',
group='top two',
# events=[]
))
doc.add_track(Track(
name='Better one',
key='e',
# events=[
# Event(25, 26), Event(70, 71), Event(700, 701)
# ]
))
app.doc = doc
app.run()
| 28.088889
| 84
| 0.511076
|
eff28154f7d481027598302c0ee3f1c65be8e270
| 45,609
|
py
|
Python
|
ceci/stage.py
|
eacharles/ceci
|
e52e956c9e373c9a632ad0c312770f32ceab0c8b
|
[
"BSD-3-Clause"
] | null | null | null |
ceci/stage.py
|
eacharles/ceci
|
e52e956c9e373c9a632ad0c312770f32ceab0c8b
|
[
"BSD-3-Clause"
] | 1
|
2022-01-05T22:04:57.000Z
|
2022-01-05T22:04:57.000Z
|
ceci/stage.py
|
eacharles/ceci
|
e52e956c9e373c9a632ad0c312770f32ceab0c8b
|
[
"BSD-3-Clause"
] | null | null | null |
"""Module with core functionality for a single pipeline stage """
import pathlib
import os
import sys
from textwrap import dedent
import shutil
import cProfile
from abc import abstractmethod
from . import errors
from .monitor import MemoryMonitor
from .config import StageConfig, cast_to_streamable
SERIAL = "serial"
MPI_PARALLEL = "mpi"
DASK_PARALLEL = "dask"
IN_PROGRESS_PREFIX = "inprogress_"
#############################################
# Parallelism-related methods and properties.
#############################################
def is_parallel(self):
"""
Returns True if the code is being run in parallel.
Right now is_parallel() will return the same value as is_mpi(),
but that may change in future if we implement other forms of
parallelization.
"""
return self._parallel != SERIAL
def is_mpi(self):
"""
Returns True if the stage is being run under MPI.
"""
return self._parallel == MPI_PARALLEL
def is_dask(self):
"""
Returns True if the stage is being run in parallel with Dask.
"""
return self._parallel == DASK_PARALLEL
def start_dask(self):
"""
Prepare dask to run under MPI. After calling this method
only a single process, MPI rank 1 will continue to exeute code
"""
# using the programmatic dask configuration system
# does not seem to work. Presumably the loggers have already
# been created by the time we modify the config. Doing it with
# env vars seems to work. If the user has already set this then
# we use that value. Otherwise we only want error logs
key = "DASK_LOGGING__DISTRIBUTED"
os.environ[key] = os.environ.get(key, "error")
try:
import dask
import dask_mpi
import dask.distributed
except ImportError: #pragma: no cover
print(
"ERROR: Using --mpi option on stages that use dask requires "
"dask[distributed] and dask_mpi to be installed."
)
raise
if self.size < 3: #pragma: no cover
raise ValueError(
"Dask requires at least three processes. One becomes a scheduler "
"process, one is a client that runs the code, and more are required "
"as worker processes."
)
# This requires my fork until/unless they merge the PR, to allow
# us to pass in these two arguments. In vanilla dask-mpi sys.exit
# is called at the end of the event loop without returning to us.
# After this point only a single process, MPI rank 1,
# should continue to exeute code. The others enter an event
# loop and return with is_client=False, which we return here
# to tell the caller that they should not run everything.
is_client = dask_mpi.initialize(comm=self.comm, exit=False)
if is_client:
# Connect this local process to remote workers.
self.dask_client = dask.distributed.Client()
# I don't yet know how to see this dashboard link at nersc
print(f"Started dask. Diagnostics at {self.dask_client.dashboard_link}")
return is_client
##################################################
# Input and output-related methods and properties.
##################################################
def get_input(self, tag):
"""Return the path of an input file with the given tag"""
return self._inputs[tag]
def get_output(self, tag, final_name=False):
"""Return the path of an output file with the given tag
If final_name is False then use a temporary name - file will
be moved to its final name at the end
"""
path = self._outputs[tag]
# If not the final version, add a tag at the start of the filename
if not final_name:
p = pathlib.Path(path)
p = p.parent / (IN_PROGRESS_PREFIX + p.name)
path = str(p)
return path
def open_input(self, tag, wrapper=False, **kwargs):
"""
Find and open an input file with the given tag, in read-only mode.
For general files this will simply return a standard
python file object.
For specialized file types like FITS or HDF5 it will return
a more specific object - see the types.py file for more info.
"""
path = self.get_input(tag)
input_class = self.get_input_type(tag)
obj = input_class(path, "r", **kwargs)
if wrapper: #pragma: no cover
return obj
return obj.file
def open_output(self, tag, wrapper=False, final_name=False, **kwargs): #pragma: no cover
"""
Find and open an output file with the given tag, in write mode.
If final_name is True then they will be opened using their final
target output name. Otherwise we will prepend "inprogress_" to their
file name. This means we know that if the final file exists then it
is completed.
If wrapper is True this will return an instance of the class
of the file as specified in the cls.outputs. Otherwise it will
return an open file object (standard python one or something more
specialized).
Parameters
----------
tag: str
Tag as listed in self.outputs
wrapper: bool
Default=False. Whether to return a wrapped file
final_name: bool
Default=False. Whether to save to
**kwargs:
Extra args are passed on to the file's class constructor.
"""
path = self.get_output(tag, final_name=final_name)
output_class = self.get_output_type(tag)
# HDF files can be opened for parallel writing
# under MPI. This checks if:
# - we have been told to open in parallel
# - we are actually running under MPI
# and adds the flags required if all these are true
run_parallel = kwargs.pop("parallel", False) and self.is_mpi()
if run_parallel:
kwargs["driver"] = "mpio"
kwargs["comm"] = self.comm
# XXX: This is also not a dependency, but it should be.
# Or even better would be to make it a dependency of descformats where it
# is actually used.
import h5py
if not h5py.get_config().mpi:
print(
dedent(
"""\
Your h5py installation is not MPI-enabled.
Options include:
1) Set nprocess to 1 for all stages
2) Upgrade h5py to use mpi. See instructions here:
http://docs.h5py.org/en/latest/build.html#custom-installation
Note: If using conda, the most straightforward way is to enable it is
conda install -c spectraldns h5py-parallel
"""
)
)
raise RuntimeError("h5py module is not MPI-enabled.")
# Return an opened object representing the file
obj = output_class(path, "w", **kwargs)
if wrapper:
return obj
return obj.file
def get_input_type(self, tag):
"""Return the file type class of an input file with the given tag."""
for t, dt in self.inputs_():
if t == tag:
return dt
raise ValueError(f"Tag {tag} is not a known input") #pragma: no cover
def get_output_type(self, tag):
"""Return the file type class of an output file with the given tag."""
for t, dt in self.outputs_():
if t == tag:
return dt
raise ValueError(f"Tag {tag} is not a known output") #pragma: no cover
##################################################
# Configuration-related methods and properties.
##################################################
def read_config(self, args):
"""
This function looks for the arguments of the pipeline stage using a
combination of default values, command line options and separate
configuration file.
The order for resolving config options is first looking for a default
value, then looking for a
In case a mandatory argument (argument with no default) is missing,
an exception is raised.
Note that we recognize arguments with no default as the ones where
self.config_options holds a type instead of a value.
"""
# Try to load configuration file if provided
import yaml
config_file = self.get_input("config")
# This is all the config information in the file, including
# things for other stages
if config_file is not None:
with open(config_file) as _config_file:
overall_config = yaml.safe_load(_config_file)
else:
overall_config = {}
# The user can define global options that are inherited by
# all the other sections if not already specified there.
input_config = overall_config.get("global", {})
# This is just the config info in the file for this stage.
# It may be incomplete - there may be things specified on the
# command line instead, or just using their default values
stage_config = overall_config.get(self.instance_name, {})
input_config.update(stage_config)
self._configs.set_config(input_config, args)
def get_config_dict(self, ignore=None, reduce_config=False):
"""Write the current configuration to a dict
Parameters
----------
ignore : dict or None
Global parameters not to write
reduce_config : bool
If true, reduce the configuration by parsing out the inputs, outputs and global params
Returns
-------
out_dict : dict
The configuration
"""
out_dict = {}
if reduce_config:
ignore_keys = self.input_tags() + self.output_tags() + ['config']
else:
ignore_keys = []
ignore = ignore or {}
for key, val in self.config.items():
if reduce_config:
if key in ignore:
if ignore[key] == val:
continue
if key in ignore_keys:
continue
out_dict[key] = cast_to_streamable(val)
return out_dict
def find_inputs(self, pipeline_files):
"""Find and retrun all the inputs associated to this stage in the FileManager
These are returned as a dictionary of tag : path pairs
"""
ret_dict = {}
for tag, _ in self.inputs_():
aliased_tag = self.get_aliased_tag(tag)
ret_dict[aliased_tag] = pipeline_files[aliased_tag]
return ret_dict
def find_outputs(self, outdir):
"""Find and retrun all the outputs associated to this stage
These are returned as a dictionary of tag : path pairs
"""
ret_dict = {}
for tag, ftype in self.outputs_():
aliased_tag = self.get_aliased_tag(tag)
ret_dict[aliased_tag] = f"{outdir}/{ftype.make_name(aliased_tag)}"
return ret_dict
def print_io(self, stream=sys.stdout):
"""Print out the tags, paths and types for all the inputs and outputs of this stage"""
stream.write("Inputs--------\n")
for tag, ftype in self.inputs_():
aliased_tag = self.get_aliased_tag(tag)
stream.write(f"{tag:20} : {aliased_tag:20} :{str(ftype):20} : {self._inputs[tag]}\n")
stream.write("Outputs--------\n")
for tag, ftype in self.outputs_():
aliased_tag = self.get_aliased_tag(tag)
stream.write(f"{tag:20} : {aliased_tag:20} :{str(ftype):20} : {self._outputs[aliased_tag]}\n")
def should_skip(self, run_config):
"""Return true if we should skip a stage b/c it's outputs already exist and we are in resume mode"""
outputs = self.find_outputs(run_config["output_dir"]).values()
already_run_stage = all(os.path.exists(output) for output in outputs)
return already_run_stage and run_config["resume"]
def already_finished(self):
"""Print a warning that a stage is being skipped"""
print(f"Skipping stage {self.instance_name} because its outputs exist already")
################################
# Pipeline-related methods
################################
| 35.912598
| 118
| 0.568967
|
eff725f55234c2a2a095069fa9d26ab47ed278d3
| 8,400
|
py
|
Python
|
TrinaPointAndClick/src/TrinaPointAndClick/scripts/Marker_List_Node.py
|
mjclements/TRINA-WPI-2.0
|
aa060819522ed9010d20e9db0cf45b19f6b083af
|
[
"MIT"
] | null | null | null |
TrinaPointAndClick/src/TrinaPointAndClick/scripts/Marker_List_Node.py
|
mjclements/TRINA-WPI-2.0
|
aa060819522ed9010d20e9db0cf45b19f6b083af
|
[
"MIT"
] | null | null | null |
TrinaPointAndClick/src/TrinaPointAndClick/scripts/Marker_List_Node.py
|
mjclements/TRINA-WPI-2.0
|
aa060819522ed9010d20e9db0cf45b19f6b083af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import rospy
import numpy as np
import math
from geometry_msgs.msg import PoseStamped, Transform
from TrinaPointAndClick.msg import Marker, MarkerArray
if __name__ == '__main__':
"""
Initializes node and names it
Parameters:
None
Returns:
None
"""
print "Initializing Marker_Node..."
rospy.init_node('Marker_Node')
try:
Marker_Node = Marker_Node()
except rospy.ROSInterruptException:
rospy.logerror("Failed to start server node.")
pass
| 38.181818
| 202
| 0.616548
|
eff99e10986bd9b8e0f53017db77d82913562ddf
| 1,102
|
py
|
Python
|
topology.py
|
Patatone/ryu-static-load-balancing
|
7f3508ff8b135736150ad5c38b544d6e6ba90509
|
[
"Apache-2.0"
] | null | null | null |
topology.py
|
Patatone/ryu-static-load-balancing
|
7f3508ff8b135736150ad5c38b544d6e6ba90509
|
[
"Apache-2.0"
] | null | null | null |
topology.py
|
Patatone/ryu-static-load-balancing
|
7f3508ff8b135736150ad5c38b544d6e6ba90509
|
[
"Apache-2.0"
] | null | null | null |
from mininet.topo import Topo
from mininet.link import TCLink
topos = { 'topology': ( lambda: Topology() ) }
| 39.357143
| 81
| 0.607985
|
eff9cec3835ce08f6cdd64396a53993ba845ce23
| 5,155
|
py
|
Python
|
JFJB.py
|
stevevai/JFJB-crawler
|
182c8930e5e979ea9176452764e9494a17574b1f
|
[
"Apache-2.0"
] | 1
|
2019-04-14T16:28:28.000Z
|
2019-04-14T16:28:28.000Z
|
JFJB.py
|
stevevai/JFJB-crawler
|
182c8930e5e979ea9176452764e9494a17574b1f
|
[
"Apache-2.0"
] | null | null | null |
JFJB.py
|
stevevai/JFJB-crawler
|
182c8930e5e979ea9176452764e9494a17574b1f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Apr 12 23:00:28 2018
@author: wangshuai
"""
import urllib
import urllib.request as urllib2
import http.cookiejar as cookielib
import io
import re
import gzip
from selenium import webdriver
import datetime
if __name__ == '__main__':
config = Config()
ifile = open(config.get("outputPath")+"rough_info.txt","w",encoding='utf-8')
getArticle = GetArticle(config, handler = ifile)
getArticle.index_detail()
ifile.close()
| 31.625767
| 156
| 0.49098
|
effa145f7d27636ce9979e5fe2ebebe04e1345c3
| 392
|
py
|
Python
|
tensorprob/samplers/base.py
|
ibab/tensorfit
|
53bbb324520f34335a272dc057c3ae6e9d2c575e
|
[
"MIT"
] | 95
|
2016-02-29T08:25:07.000Z
|
2021-06-02T15:33:01.000Z
|
tensorprob/samplers/base.py
|
ibab/tensorprob
|
79efa5678f984a2bb92573fb25c17b9475baef23
|
[
"MIT"
] | 48
|
2016-02-19T00:56:05.000Z
|
2016-02-28T23:12:12.000Z
|
tensorprob/samplers/base.py
|
ibab/tensorfit
|
53bbb324520f34335a272dc057c3ae6e9d2c575e
|
[
"MIT"
] | 19
|
2016-02-29T00:14:34.000Z
|
2020-06-18T06:07:39.000Z
|
import tensorflow as tf
| 21.777778
| 59
| 0.67602
|
effaf46adea62c6d7c4589ee3471fc9f1f1bc8dc
| 3,327
|
py
|
Python
|
scripts/ocgis_subset.py
|
Zeitsperre/flyingpigeon
|
678370bf428af7ffe11ee79be3b8a89c73215e5e
|
[
"Apache-2.0"
] | 1
|
2016-12-04T18:01:49.000Z
|
2016-12-04T18:01:49.000Z
|
scripts/ocgis_subset.py
|
Zeitsperre/flyingpigeon
|
678370bf428af7ffe11ee79be3b8a89c73215e5e
|
[
"Apache-2.0"
] | 13
|
2017-03-16T15:44:21.000Z
|
2019-08-19T16:56:04.000Z
|
scripts/ocgis_subset.py
|
Zeitsperre/flyingpigeon
|
678370bf428af7ffe11ee79be3b8a89c73215e5e
|
[
"Apache-2.0"
] | null | null | null |
from os import path, listdir
import ocgis
from flyingpigeon import subset
from flyingpigeon import utils
from flyingpigeon.ocgis_module import call
def get_prediction(gam_model, ncs_indices): # mask=None
"""
predict the probabillity based on the gam_model and the given climate index datasets
:param gam_model: fitted gam (output from sdm.get_gam)
:pram nsc_indices: list of netCDF files containing climate indices of one dataset
:param mask: 2D array of True/False to exclude areas (e.g ocean) for prediction
:return array: 3D array with prediction values
"""
from netCDF4 import Dataset
from os.path import basename
from numpy import squeeze, ravel, array, reshape # , zeros, broadcast_arrays, nan
from flyingpigeon.utils import get_variable
from rpy2.robjects.packages import importr
import rpy2.robjects as ro
import rpy2.robjects.numpy2ri
rpy2.robjects.numpy2ri.activate()
mgcv = importr("mgcv")
stats = importr("stats")
ncs_indices.sort()
data = {}
for i, nc in enumerate(ncs_indices):
var = get_variable(nc)
agg = basename(nc).split('_')[-2]
ds = Dataset(nc)
vals = squeeze(ds.variables[var])
if i == 0:
dims = vals.shape
# if mask != None:
# mask = broadcast_arrays(vals, mask)[1]
# vals[mask==False] = nan
indice = '%s_%s' % (var, agg)
data[str(indice)] = ro.FloatVector(ravel(vals))
dataf = ro.DataFrame(data)
predict_gam = mgcv.predict_gam(gam_model, newdata=dataf,
type="response", progress="text",
newdata_guaranteed=True, na_action=stats.na_pass)
prediction = array(predict_gam).reshape(dims)
return prediction
p = "/home/nils/data/AFR-44/tas/"
ncs = [path.join(p, nc) for nc in listdir(p)]
ncd = utils.sort_by_filename(ncs)
geom = subset.get_geom('CMR')
ugid = subset.get_ugid('CMR', geom=geom)
# from ocgis import RequestDataset, OcgOperations
keys = ncd.keys()
print len(keys)
ocgis.env.OVERWRITE = True
dmap = ocgis.DimensionMap()
dmap.set_variable('x', 'lon', dimension='rlon')
dmap.set_variable('y', 'lat', dimension='rlat')
dmap.set_variable('time', 'time', dimension='time')
#
# print dmap
# rd = ocgis.RequestDataset(ncd[keys[0]][0], crs=ocgis.crs.Spherical(), )
# geos = ocgis.OcgOperations(rd, geom=geom, select_ugid=ugid, output_format='nc', prefix='one_file').execute()
# geos
for key in ncd.keys():
# rd = ocgis.RequestDataset(ncd[key], crs=ocgis.crs.Spherical(), dimension_map=dmap)
# geos = ocgis.OcgOperations(rd,
# geom=geom, select_ugid=ugid,
# output_format='nc',
# prefix=key,
# add_auxiliary_files=False).execute()
geos = call(ncd[key], geom=geom, select_ugid=ugid, output_format='nc', prefix=key,
variable='tas', crs=ocgis.crs.Spherical(), dimension_map=dmap)
print geos
#
# rd = RequestDataset(ncd[keys[0]][0])
# geos = OcgOperations(rd, geom=geom, select_ugid=ugid, output_format='nc').execute()
#
# ncd[keys[0]]
#
# rd = RequestDataset(ncd[keys[0]])
#
# geos = OcgOperations(rd, geom=geom, select_ugid=ugid, output_format='nc').execute()
| 31.386792
| 110
| 0.644725
|
effb3d0f203ab8c4e4ea27554b71aa4fcc456877
| 746
|
py
|
Python
|
jacc/migrations/0019_entrytype_identifier.py
|
bachvtuan/django-jacc
|
37cdd54d8602d25e43a433bd66ccbed61f45a112
|
[
"MIT"
] | 10
|
2019-02-25T23:30:33.000Z
|
2021-05-02T18:02:48.000Z
|
jacc/migrations/0019_entrytype_identifier.py
|
bachvtuan/django-jacc
|
37cdd54d8602d25e43a433bd66ccbed61f45a112
|
[
"MIT"
] | null | null | null |
jacc/migrations/0019_entrytype_identifier.py
|
bachvtuan/django-jacc
|
37cdd54d8602d25e43a433bd66ccbed61f45a112
|
[
"MIT"
] | 4
|
2019-09-09T09:33:55.000Z
|
2022-01-01T09:28:13.000Z
|
# Generated by Django 2.1.2 on 2018-10-18 15:36
from django.db import migrations, models
from django.db.models import F
| 28.692308
| 116
| 0.687668
|
effc0cb6fddb743089c7bdb462500e13e334b104
| 342
|
py
|
Python
|
tests/test_invalid_login.py
|
joshmgrant/Python-Pytest-Nerodia
|
55e8d92cd21e3093e6eb434e4ab7b126c974c6f0
|
[
"MIT"
] | 1
|
2019-03-19T08:29:02.000Z
|
2019-03-19T08:29:02.000Z
|
tests/test_invalid_login.py
|
joshmgrant/Python-Pytest-Nerodia
|
55e8d92cd21e3093e6eb434e4ab7b126c974c6f0
|
[
"MIT"
] | null | null | null |
tests/test_invalid_login.py
|
joshmgrant/Python-Pytest-Nerodia
|
55e8d92cd21e3093e6eb434e4ab7b126c974c6f0
|
[
"MIT"
] | null | null | null |
import pytest
| 28.5
| 70
| 0.733918
|
effc7b61a293ddc828780cd36ebddcbb6d17256b
| 403
|
py
|
Python
|
splicemachine/mlflow_support/flavors/mlflow_onnx.py
|
myles-novick/pysplice
|
96a848d4adda0a937002798865d32939f059f4d1
|
[
"Apache-2.0"
] | null | null | null |
splicemachine/mlflow_support/flavors/mlflow_onnx.py
|
myles-novick/pysplice
|
96a848d4adda0a937002798865d32939f059f4d1
|
[
"Apache-2.0"
] | null | null | null |
splicemachine/mlflow_support/flavors/mlflow_onnx.py
|
myles-novick/pysplice
|
96a848d4adda0a937002798865d32939f059f4d1
|
[
"Apache-2.0"
] | null | null | null |
from splicemachine.mlflow_support import *
from splicemachine.mlflow_support.mlflow_support import _GORILLA_SETTINGS
import gorilla
import mlflow.onnx
gorilla.apply(gorilla.Patch(mlflow.onnx, _log_model.__name__.lstrip('_'), _log_model, settings=_GORILLA_SETTINGS))
| 40.3
| 114
| 0.816377
|
effc868ba3985263b54f27c9ba1dafa032b3a960
| 351
|
py
|
Python
|
services/shortto.py
|
joshthecoder/shorty-python
|
35687d010683944d75e3f0dce7799903296172c5
|
[
"MIT"
] | 11
|
2015-05-29T04:58:28.000Z
|
2020-05-31T17:07:52.000Z
|
services/shortto.py
|
joshthecoder/shorty-python
|
35687d010683944d75e3f0dce7799903296172c5
|
[
"MIT"
] | null | null | null |
services/shortto.py
|
joshthecoder/shorty-python
|
35687d010683944d75e3f0dce7799903296172c5
|
[
"MIT"
] | 2
|
2015-03-10T06:22:31.000Z
|
2018-06-18T18:20:59.000Z
|
## Shorty
## Copyright 2009 Joshua Roesslein
## See LICENSE
## @url short.to
| 21.9375
| 65
| 0.60114
|
effded4514a6e107993718820a8e681baef231bd
| 4,743
|
py
|
Python
|
spinup/examples/pg_math/1_simple_pg.py
|
MengTianjian/spinningup-pytorch
|
6b9b87ed7a8140a52f3c86cc88f61428a9fd1176
|
[
"MIT"
] | 1
|
2019-04-23T04:32:35.000Z
|
2019-04-23T04:32:35.000Z
|
spinup/examples/pg_math/1_simple_pg.py
|
MengTianjian/spinningup-pytorch
|
6b9b87ed7a8140a52f3c86cc88f61428a9fd1176
|
[
"MIT"
] | null | null | null |
spinup/examples/pg_math/1_simple_pg.py
|
MengTianjian/spinningup-pytorch
|
6b9b87ed7a8140a52f3c86cc88f61428a9fd1176
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical
import numpy as np
import gym
from gym.spaces import Discrete, Box
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env_name', '--env', type=str, default='CartPole-v0')
parser.add_argument('--render', action='store_true')
parser.add_argument('--lr', type=float, default=1e-2)
args = parser.parse_args()
print('\nUsing simplest formulation of policy gradient.\n')
train(env_name=args.env_name, render=args.render, lr=args.lr)
| 37.346457
| 83
| 0.609741
|
560191c793a93a302d95f1bc0f3bed7552833bd0
| 2,334
|
py
|
Python
|
examples/protobuf/protobuftools.py
|
sunjinopensource/asynmsg
|
9c1d14f859cc6702446c3bb30b9916280429bd1d
|
[
"MIT"
] | 3
|
2015-05-10T16:10:35.000Z
|
2019-02-08T12:22:27.000Z
|
examples/protobuf/protobuftools.py
|
sunjinopensource/asynmsg
|
9c1d14f859cc6702446c3bb30b9916280429bd1d
|
[
"MIT"
] | null | null | null |
examples/protobuf/protobuftools.py
|
sunjinopensource/asynmsg
|
9c1d14f859cc6702446c3bb30b9916280429bd1d
|
[
"MIT"
] | null | null | null |
import asynmsg
import struct
import google.protobuf.message
| 33.826087
| 115
| 0.602828
|
56027f5cae2f8100bbcabdb3f59b412acf2181e4
| 6,402
|
py
|
Python
|
client/python/thegame/entity.py
|
afq984/thegame
|
3769fffa281b7d5e8d1336d57e73c8e8d4d2289a
|
[
"MIT"
] | 3
|
2017-08-18T00:32:54.000Z
|
2017-11-18T02:25:51.000Z
|
client/python/thegame/entity.py
|
afq984/thegame
|
3769fffa281b7d5e8d1336d57e73c8e8d4d2289a
|
[
"MIT"
] | 3
|
2017-08-15T09:59:25.000Z
|
2018-08-22T17:28:13.000Z
|
client/python/thegame/entity.py
|
afq984/thegame
|
3769fffa281b7d5e8d1336d57e73c8e8d4d2289a
|
[
"MIT"
] | 1
|
2018-08-07T12:38:48.000Z
|
2018-08-07T12:38:48.000Z
|
import collections
from thegame.abilities import Ability
Vector = collections.namedtuple('Vector', ('x', 'y'))
Vector.__doc__ = '''
A 2D vector.
Used to represent a point and velocity in thegame
'''
HeroAbility = collections.namedtuple(
'HeroAbility',
['level', 'value']
)
HeroAbilityList = collections.namedtuple(
'HeroAbilityList',
[ab.as_camel for ab in Ability]
)
| 25.710843
| 76
| 0.592002
|
4bc69e662f7af10d0c2438ee8ea0f1bb00d372e9
| 3,456
|
py
|
Python
|
services/web/project/__init__.py
|
shekharRavi/croationa_topic_api
|
a68bc69a69c5a6898b74ee0f3adf83b23d29b40b
|
[
"MIT"
] | null | null | null |
services/web/project/__init__.py
|
shekharRavi/croationa_topic_api
|
a68bc69a69c5a6898b74ee0f3adf83b23d29b40b
|
[
"MIT"
] | null | null | null |
services/web/project/__init__.py
|
shekharRavi/croationa_topic_api
|
a68bc69a69c5a6898b74ee0f3adf83b23d29b40b
|
[
"MIT"
] | null | null | null |
import os
import json
# import wget
from flask import (
Flask,
jsonify,
send_from_directory,
request,
redirect,
url_for
)
from flask_sqlalchemy import SQLAlchemy
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from werkzeug.utils import secure_filename
from werkzeug.middleware.proxy_fix import ProxyFix
from flask_restx import Api, Resource, fields, abort, reqparse
from celery import Celery
import celery.states as states
from . import api_functions
from . import topic_model_classifier
# global variables
CELERY_BROKER_URL = os.environ.get('CELERY_BROKER_URL')
CELERY_RESULT_BACKEND = os.environ.get('CELERY_RESULT_BACKEND')
celery = Celery('tasks', broker=CELERY_BROKER_URL, backend=CELERY_RESULT_BACKEND)
app = Flask(__name__)
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config.from_object("project.config.Config")
db = SQLAlchemy(app)
api = Api(app, version='1.0',
title='UGC API services',
description='REST APIs for processing user-generated content')
ns = api.namespace('comments_api', description='REST services API for news comments')
# input and output definitions
topic_model_single_input = api.model('TopicModelSingleInput', {
'text': fields.String(required=True, description='input text for topic')
})
topic_model_single_output = api.model('TopicModelSingleOutput', {
'suggested_label': fields.List(fields.String(), required=True, description='suggested label for topics'),
'description': fields.List(fields.String(), required=True, description='description of suggested label'),
'topic_words': fields.List(fields.String(), required=True, description='topic words')
})
topic_model_list_input = api.model('TopicModelListInput', {
'texts': fields.List(fields.String, required=True, description='input list of texts for topic')
})
topic_model_list_output = api.model('TopicModelListOutput', {
'suggested_label': fields.List(fields.String(), required=True, description='suggested label for topics'),
'description': fields.List(fields.String(), required=True, description='description of suggested label'),
'topic_words': fields.List(fields.String(), required=True, description='topic words')
})
| 35.628866
| 109
| 0.739005
|
4bc780e7bf91dc67b2e9b3c85f1b9477066d6c29
| 87
|
py
|
Python
|
opensanctions/helpers/gender.py
|
fastbone/opensanctions
|
dea7f7d073083eece26241bcade697a2b959a09e
|
[
"MIT"
] | null | null | null |
opensanctions/helpers/gender.py
|
fastbone/opensanctions
|
dea7f7d073083eece26241bcade697a2b959a09e
|
[
"MIT"
] | null | null | null |
opensanctions/helpers/gender.py
|
fastbone/opensanctions
|
dea7f7d073083eece26241bcade697a2b959a09e
|
[
"MIT"
] | null | null | null |
# Welcome to the wonderful world of police databases:
MALE = "male"
FEMALE = "female"
| 17.4
| 53
| 0.724138
|
4bc81ada6770b9d230169abfe03aa04a2271356b
| 472
|
py
|
Python
|
backups_operator/servers/views.py
|
jetchirag/backops
|
777e8d3b3b89afdc0482f71f1ecc499036c62968
|
[
"MIT"
] | null | null | null |
backups_operator/servers/views.py
|
jetchirag/backops
|
777e8d3b3b89afdc0482f71f1ecc499036c62968
|
[
"MIT"
] | null | null | null |
backups_operator/servers/views.py
|
jetchirag/backops
|
777e8d3b3b89afdc0482f71f1ecc499036c62968
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render
from django.http import HttpResponse
from backups_operator.servers.models import Server
# Create your views here.
| 21.454545
| 53
| 0.711864
|
4bc9a28e7931530bacfb9f635e9e8859c38140a3
| 1,460
|
py
|
Python
|
scripts/inspect_docker.py
|
lijing1996/DockerMonitor
|
b1105e120d9079a0d24a90ef401221dfceeed7b6
|
[
"Apache-2.0"
] | 1
|
2021-04-12T09:35:08.000Z
|
2021-04-12T09:35:08.000Z
|
scripts/inspect_docker.py
|
lijing1996/DockerMonitor
|
b1105e120d9079a0d24a90ef401221dfceeed7b6
|
[
"Apache-2.0"
] | null | null | null |
scripts/inspect_docker.py
|
lijing1996/DockerMonitor
|
b1105e120d9079a0d24a90ef401221dfceeed7b6
|
[
"Apache-2.0"
] | null | null | null |
import argparse
import sys
import subprocess
import psutil
def insepect_process(pid):
"""Determine
1. is the process running in the container
2. if it's true, ourput the container id and the user
:return:
"""
assert psutil.pid_exists(pid), "The process doesn't exist"
try:
result = subprocess.check_output(f'cat /proc/{pid}/cgroup', shell=True)
# print(result)
except subprocess.CalledProcessError as e:
return_code = e.returncode
print(f"Inspect Wrong Error Code{return_code}")
sys.exit(1)
line = result.decode('utf-8').split('\n')[0].strip()
is_in_container = 'docker' in line
container_id = ''
user_name = ''
if is_in_container:
container_id = line.split('/')[-1][:12] #Only save first 12 char of container id
container_info = subprocess.check_output(f'docker ps -a|grep {container_id}', shell=True).decode('utf-8')
user_name = container_info.strip().split()[-1]
return is_in_container, container_id, user_name
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Inspector for docker")
parser.add_argument("-p", type=int, help="the pid")
args = parser.parse_args()
is_in_container, container_id, user_name = insepect_process(args.p)
print(f"Is the process running in the container :{is_in_container}")
print(f"The container id {container_id}")
print(f"The user name {user_name}")
| 33.181818
| 113
| 0.678767
|
4bc9dc6d068e1225034cf817b0d3efa5bdeee220
| 128
|
py
|
Python
|
Aula10.py
|
rsmelocunha/Python-projects
|
1740d1cbafb0aebfffeb0bfdb4ccccf0dbd14093
|
[
"MIT"
] | null | null | null |
Aula10.py
|
rsmelocunha/Python-projects
|
1740d1cbafb0aebfffeb0bfdb4ccccf0dbd14093
|
[
"MIT"
] | null | null | null |
Aula10.py
|
rsmelocunha/Python-projects
|
1740d1cbafb0aebfffeb0bfdb4ccccf0dbd14093
|
[
"MIT"
] | null | null | null |
ano = int(input('Digite o ano do seu carro: '))
idadecarro = 2022 - ano
print('Carro novo' if idadecarro <=3 else 'Carro Velho')
| 42.666667
| 56
| 0.695313
|
4bca99ba9eda853683218d8ee0882faa531e6181
| 3,531
|
py
|
Python
|
companion_app/live_log_retrieval.py
|
MorganJamesSmith/uni-project-ideas
|
6c48d0edb526908ed95192e97ab47df1257b6036
|
[
"BSD-3-Clause"
] | 1
|
2020-09-15T15:33:33.000Z
|
2020-09-15T15:33:33.000Z
|
companion_app/live_log_retrieval.py
|
MorganJamesSmith/uni-project
|
6c48d0edb526908ed95192e97ab47df1257b6036
|
[
"BSD-3-Clause"
] | null | null | null |
companion_app/live_log_retrieval.py
|
MorganJamesSmith/uni-project
|
6c48d0edb526908ed95192e97ab47df1257b6036
|
[
"BSD-3-Clause"
] | null | null | null |
"""
implements a wrapper for loading live data from the serial connection and passing it to plotting
"""
import serial
import time
import struct
import plotly.express as px
try:
from . import log_parser
except ImportError:
import log_parser
# TODO: clean up CLI code
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("input_file", nargs="?",default="/dev/ttyACM0")
ns = parser.parse_args()
for [type, *fields] in log_parser.parse_data(log_parser.parse_raw_entries(LiveLogFile(ns.input_file))):
if type != 4:
continue # ignore all but IMU data
print(*map("{:>8}".format, fields), sep=",")
| 46.460526
| 118
| 0.664118
|
4bcbbf9c4a02cc75f67572b9d3e876126fc65c10
| 313
|
py
|
Python
|
bin/bucrm.py
|
aelzenaar/bucephalus
|
49cc084a5444ffbde2f850fc1f7b230d3bb8dfbc
|
[
"MIT"
] | null | null | null |
bin/bucrm.py
|
aelzenaar/bucephalus
|
49cc084a5444ffbde2f850fc1f7b230d3bb8dfbc
|
[
"MIT"
] | 12
|
2018-11-09T03:00:28.000Z
|
2019-01-02T05:39:55.000Z
|
bin/bucrm.py
|
aelzenaar/bucephalus
|
49cc084a5444ffbde2f850fc1f7b230d3bb8dfbc
|
[
"MIT"
] | null | null | null |
import sys
import dbops
from pathlib import Path
if len(sys.argv) < 2:
print("Bucephalus Remove File Script")
print("Usage: " + sys.argv[0] + " <identifier>")
sys.exit()
sys.argv.pop(0)
ident = sys.argv.pop(0)
if dbops.remove_record_by_id(ident) == None:
print("*** Error: failed to remove record.")
| 18.411765
| 50
| 0.677316
|
4bcbcc55408d8cf46761e62d961a3d39291ace5d
| 440
|
py
|
Python
|
tests/test_get_current_os_name.py
|
c-pher/PyWinOS
|
a16a16a24abaa53a06b9365b2535c8ab31a7fdfb
|
[
"MIT"
] | 4
|
2020-04-17T15:54:43.000Z
|
2020-11-08T06:39:05.000Z
|
tests/test_get_current_os_name.py
|
c-pher/PyWinOS
|
a16a16a24abaa53a06b9365b2535c8ab31a7fdfb
|
[
"MIT"
] | 65
|
2020-01-05T21:45:17.000Z
|
2022-03-31T16:50:20.000Z
|
tests/test_get_current_os_name.py
|
c-pher/PyWinOS
|
a16a16a24abaa53a06b9365b2535c8ab31a7fdfb
|
[
"MIT"
] | null | null | null |
import os
| 29.333333
| 70
| 0.661364
|
4bcc388c3974bdfcd63888beb8ed71bb0fa61380
| 5,133
|
py
|
Python
|
GUI/GUI_windows/TranslationLanguageWindow.py
|
Chenger1/stellaris-trpack
|
5d85bbbc7374975b5da729899b5691ea77c16ea2
|
[
"MIT"
] | 3
|
2020-07-23T00:32:06.000Z
|
2020-10-09T18:05:56.000Z
|
GUI/GUI_windows/TranslationLanguageWindow.py
|
Chenger1/stellaris-trpack
|
5d85bbbc7374975b5da729899b5691ea77c16ea2
|
[
"MIT"
] | 105
|
2020-07-16T12:23:57.000Z
|
2021-01-18T18:11:40.000Z
|
GUI/GUI_windows/TranslationLanguageWindow.py
|
Letiso/Stellaris-True-Machine-Translation-Tool
|
b80431c1c9b49c2482cb9aefa02eb0de62d7cc56
|
[
"MIT"
] | 1
|
2020-07-15T13:30:57.000Z
|
2020-07-15T13:30:57.000Z
|
"""
"""
from PyQt5 import QtWidgets, QtCore
from GUI.GUI_windows_source import TranslationLanguage
from json import load, dump
from functools import partial
import copy
from scripts.stylesheets import choosen_lang_style, not_chosen_lang_style
| 37.742647
| 109
| 0.601403
|
4bcc5632c54ea11fd3756fc709d789ae83392c50
| 55
|
py
|
Python
|
dearpygui_map/__init__.py
|
mkouhia/dearpygui_map
|
8db86e6917b88c118aff94a22e383ef517c40620
|
[
"MIT"
] | null | null | null |
dearpygui_map/__init__.py
|
mkouhia/dearpygui_map
|
8db86e6917b88c118aff94a22e383ef517c40620
|
[
"MIT"
] | 21
|
2022-02-21T08:31:03.000Z
|
2022-03-08T19:27:33.000Z
|
dearpygui_map/__init__.py
|
mkouhia/dearpygui_map
|
8db86e6917b88c118aff94a22e383ef517c40620
|
[
"MIT"
] | null | null | null |
"""Map widget for Dear PyGui"""
__version__ = "0.0.1"
| 13.75
| 31
| 0.636364
|
4bcdc5c2dfab2675a93de75f43fee73049b1f7fb
| 1,347
|
py
|
Python
|
demosauruswebapp/demosaurus/subject_headings.py
|
KBNLresearch/Demosaurus
|
9235e315d9eef9d8d64f94a90ab4fc8220670ef2
|
[
"Apache-2.0"
] | 1
|
2020-06-25T16:39:35.000Z
|
2020-06-25T16:39:35.000Z
|
demosauruswebapp/demosaurus/subject_headings.py
|
KBNLresearch/Demosaurus
|
9235e315d9eef9d8d64f94a90ab4fc8220670ef2
|
[
"Apache-2.0"
] | 6
|
2020-03-06T12:31:38.000Z
|
2021-09-20T15:08:17.000Z
|
demosauruswebapp/demosaurus/subject_headings.py
|
KBNLresearch/Demosaurus
|
9235e315d9eef9d8d64f94a90ab4fc8220670ef2
|
[
"Apache-2.0"
] | null | null | null |
from flask import (
Blueprint, request)#, flash, g, redirect, render_template, get_template_attribute, url_for, jsonify
# )
# from werkzeug.exceptions import abort
import requests
# from demosaurus.db import get_db
# import pandas as pd
# from nltk.metrics import distance
# import re
# import numpy as np
bp = Blueprint('subject_headings', __name__)
annif_url = 'https://kbresearch.nl/annif/v1/'
| 32.071429
| 104
| 0.697105
|
4bcf87fdcdb2f4bd16f622a1e7e79b1aeb825b7c
| 3,448
|
py
|
Python
|
server/Kusa/views.py
|
meshellchoo/senior-design-project-kusa
|
829575259c31a620c895a0f2d5654ea099298eb6
|
[
"MIT"
] | 1
|
2022-03-28T23:20:09.000Z
|
2022-03-28T23:20:09.000Z
|
server/Kusa/views.py
|
meshellchoo/senior-design-project-kusa
|
829575259c31a620c895a0f2d5654ea099298eb6
|
[
"MIT"
] | null | null | null |
server/Kusa/views.py
|
meshellchoo/senior-design-project-kusa
|
829575259c31a620c895a0f2d5654ea099298eb6
|
[
"MIT"
] | 2
|
2022-03-24T07:17:27.000Z
|
2022-03-28T23:20:18.000Z
|
from django.http import HttpResponse
from django.http.response import JsonResponse
from django.shortcuts import render
from rest_framework.serializers import Serializer
from admin import settings
import requests
from rest_framework import viewsets
from time import gmtime, strftime
from Kusa.models import SteamUser
from django.views.decorators.csrf import csrf_exempt
from bson import ObjectId
import json
from smtplib import SMTPException
from django.http import BadHeaderError
from django.http.response import JsonResponse
from django.shortcuts import redirect
from admin import settings
from admin.settings import FRONTEND_URL
from Kusa.authentication import get_token
from Kusa.authentication import validate_token
from collections import OrderedDict # keep this line for get_user_daily_hours
from datetime import datetime
from django.core.mail import send_mail
from Kusa.data_collection import get_steam_user
JWT_SECRET_KEY = settings.JWT_SECRET_KEY
conf = settings.CONF
def get_user_daily_hours(request):
"""
will return an array of the user's daily hours
Parameters: request
Returns: returns a list of json obj -> [{"date" : date1, "hours" : num_hours1},{"date" : date2, "hours" : num_hours2}]
"""
response = validate_token(request)
if "steamid" in response:
user = get_steam_user(response["steamid"])
daily_hours = user['daily_hours']
list_of_json = [dict(day) for day in eval(daily_hours)]
return JsonResponse(list_of_json, safe=False)
else:
return response
def get_user_achievements(request):
"""
Returns: returns a list of json obj -> [{id" : 1, "progress" : 0, "date_achieved" : "N/A"},...,{id" : 10, "progress" : 20, "date_achieved" : "03/10/2022"}]
"""
response = validate_token(request)
if "steamid" in response:
user = get_steam_user(response["steamid"])
achievements = user['achievements']
list_of_json = [dict(a) for a in eval(achievements)]
return JsonResponse(list_of_json , safe=False)
else:
return response
| 36.680851
| 231
| 0.706787
|
4bd7a0c2448cc617b69365e5bcaa51dd7caf5ceb
| 478
|
py
|
Python
|
webapp/config.py
|
sebastien6/simple-project
|
2f662c74695a7f566172330dcb7140efd6c71723
|
[
"MIT"
] | null | null | null |
webapp/config.py
|
sebastien6/simple-project
|
2f662c74695a7f566172330dcb7140efd6c71723
|
[
"MIT"
] | null | null | null |
webapp/config.py
|
sebastien6/simple-project
|
2f662c74695a7f566172330dcb7140efd6c71723
|
[
"MIT"
] | null | null | null |
import os
from redis import StrictRedis
| 31.866667
| 73
| 0.728033
|
4bd7fb5f5d36389c2c5a61d083613ef4ed377538
| 15,928
|
py
|
Python
|
moleculegen/estimation/model.py
|
sanjaradylov/moleculegen-ml
|
4acb77244909cf8cfe4fb75461d4bed9b77f29f1
|
[
"BSD-3-Clause"
] | 3
|
2021-11-18T11:41:21.000Z
|
2022-02-08T22:01:20.000Z
|
moleculegen/estimation/model.py
|
sanjaradylov/moleculegen-ml
|
4acb77244909cf8cfe4fb75461d4bed9b77f29f1
|
[
"BSD-3-Clause"
] | 20
|
2019-12-12T11:47:32.000Z
|
2021-06-02T07:55:18.000Z
|
moleculegen/estimation/model.py
|
sanjaradylov/moleculegen-ml
|
4acb77244909cf8cfe4fb75461d4bed9b77f29f1
|
[
"BSD-3-Clause"
] | 2
|
2019-12-23T08:17:01.000Z
|
2022-02-08T22:01:21.000Z
|
"""
Generative language models.
Classes
-------
SMILESEncoderDecoder
A generative recurrent neural network to encode-decode SMILES strings.
SMILESEncoderDecoderFineTuner
The fine-tuner of SMILESEncoderDecoder model.
"""
__all__ = (
'SMILESEncoderDecoder',
'SMILESEncoderDecoderFineTuner',
)
import json
import warnings
from typing import Optional, Union
import mxnet as mx
from mxnet import gluon
from . import _gluon_common
from .base import SMILESEncoderDecoderABC
from ..description.common import OneHotEncoder
| 36.28246
| 89
| 0.597878
|
4bdd1cbdd04848eac2016e69df46179145d19903
| 2,351
|
py
|
Python
|
projects/radish_paper/run_tpch_radish_sym_gbp.py
|
bmyerz/log2slqite
|
edb6bcba061132caa545b5e46c98b86547c68b48
|
[
"MIT"
] | null | null | null |
projects/radish_paper/run_tpch_radish_sym_gbp.py
|
bmyerz/log2slqite
|
edb6bcba061132caa545b5e46c98b86547c68b48
|
[
"MIT"
] | 1
|
2015-07-15T00:00:19.000Z
|
2015-07-15T00:06:33.000Z
|
projects/radish_paper/run_tpch_radish_sym_gbp.py
|
bmyerz/log2slqite
|
edb6bcba061132caa545b5e46c98b86547c68b48
|
[
"MIT"
] | null | null | null |
from grappa import GrappaExperiment, MPIRunGrappaExperiment
tpch_bigdatann = MPIRunGrappaExperiment({
'trial': range(1, 3 + 1),
#'qn': [x for x in range(8, 20 + 1) if x!=7 and x!=9 and x!=8 and x!=10 and x!=11], # exclude 7 that runs forever
#'qn': [x for x in range(1, 20 + 1) if x!=7 and x!=10 and x!=11 and x!=20], # exclude 7 that runs forever
'qn': [x for x in range(1, 20 + 1) if x!=7], # exclude 7 that runs forever
'exe': lambda qn: "grappa_tpc_q{0}_sym_gbp.exe".format(qn),
'sf': 10,
'ppn': 16,
'nnode': 16,
'np': lambda ppn, nnode: ppn*nnode,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'v99-noalign',
'machine': 'bigdata',
'system': 'radish-sym-gbp-noalign'
},
{
'shared_pool_memory_fraction': 0.5
})
tpch_sampa = GrappaExperiment({
'trial': range(1, 3 + 1),
#'qn': [x for x in range(8, 20 + 1) if x!=7 and x!=9 and x!=8 and x!=10 and x!=11], # exclude 7 that runs forever
'qn': [x for x in range(1, 20)], #if x!=7 and x!=10 and x!=11 and x!=20], # exclude 7 that runs forever
'exe': lambda qn: "grappa_tpc_q{0}_sym_gbp.exe".format(qn),
'sf': 10,
'ppn': 12,
'nnode': 16,
'np': lambda ppn, nnode: ppn*nnode,
'query': lambda qn: 'q{0}'.format(qn),
'vtag': 'align-fix',
'machine': 'sampa',
'system': 'radish-sym-gbp'
},
{
'shared_pool_memory_fraction': 0.5
})
#tpch_bigdatann.run()
tpch_sampa.run()
| 54.674419
| 146
| 0.35772
|
4bdf2c801d395b3543ef88d753e14f32dd4a9b4a
| 362
|
py
|
Python
|
Activation Function/Softmax/softmax_cpp/test.py
|
kaka-lin/ML-Notes
|
047b88d59346b2ec719b1b3e2fcd605e1ccfaf91
|
[
"MIT"
] | null | null | null |
Activation Function/Softmax/softmax_cpp/test.py
|
kaka-lin/ML-Notes
|
047b88d59346b2ec719b1b3e2fcd605e1ccfaf91
|
[
"MIT"
] | null | null | null |
Activation Function/Softmax/softmax_cpp/test.py
|
kaka-lin/ML-Notes
|
047b88d59346b2ec719b1b3e2fcd605e1ccfaf91
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.special import softmax
np.set_printoptions(precision=6)
if __name__ == "__main__":
x = np.array([[1, 4.2, 0.6, 1.23, 4.3, 1.2, 2.5]])
print("Input Array: ", x)
print("Softmax Array: ", k_softmax(x))
print("Softmax Array: ", softmax(x))
| 21.294118
| 54
| 0.618785
|
4be3297fddc6fb6fba4bd8355331638ba8b66d70
| 707
|
py
|
Python
|
models/zeros.py
|
DawyD/illumination-preserving-rotations
|
4fb69dc2526579a7677c27e75eae3a0b0000b5de
|
[
"MIT"
] | null | null | null |
models/zeros.py
|
DawyD/illumination-preserving-rotations
|
4fb69dc2526579a7677c27e75eae3a0b0000b5de
|
[
"MIT"
] | null | null | null |
models/zeros.py
|
DawyD/illumination-preserving-rotations
|
4fb69dc2526579a7677c27e75eae3a0b0000b5de
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from tensorflow.keras.layers import Conv2D, ReLU, SeparableConv2D, Input, SpatialDropout2D, MaxPool2D, Concatenate, Conv2DTranspose, BatchNormalization
from tensorflow.keras.regularizers import l1, l2
from models.net import Net
from layers.kerasGroupNorm import GroupNormalization
| 35.35
| 151
| 0.721358
|
4be3c4c8872c7fe3765bcf529106a1cedf839f7c
| 7,008
|
py
|
Python
|
util/post_db.py
|
ReadMoa/web-service
|
f47c6cce471d97104074d403ab9ec39a08276213
|
[
"MIT"
] | null | null | null |
util/post_db.py
|
ReadMoa/web-service
|
f47c6cce471d97104074d403ab9ec39a08276213
|
[
"MIT"
] | 21
|
2020-08-19T05:05:45.000Z
|
2021-02-07T23:21:17.000Z
|
util/post_db.py
|
ReadMoa/web-service
|
f47c6cce471d97104074d403ab9ec39a08276213
|
[
"MIT"
] | 1
|
2020-09-05T03:40:45.000Z
|
2020-09-05T03:40:45.000Z
|
"""PostDB class definition.
PostDB encapsualte interactions (lookup, scan, insert) with the posts table.
Typical usage example:
from post import Post
from post_db import PostDB
post_db = PostDB(mode = "dev")
post = Post(
post_url = "https://www.example.com/",
title = "Test",
main_image_url = "https://www.example.com/foo.png",
description = "Bar")
post_db.insert(post)
"""
import logging
import sqlalchemy
from util.database import Database
from util.post import Post
# Max post index to return in scan().
MAX_POSTS_TO_START = 1000
logger = logging.getLogger()
| 36.884211
| 85
| 0.558219
|
4be4aa437d26726d4e8976afdb8dcefd45f45a42
| 9,491
|
py
|
Python
|
plugins/leading_bot_mention.py
|
YukiSinonome/guided_bot
|
3aff47c4192e9dae4ad4d95c1553a4752ce043cc
|
[
"MIT"
] | null | null | null |
plugins/leading_bot_mention.py
|
YukiSinonome/guided_bot
|
3aff47c4192e9dae4ad4d95c1553a4752ce043cc
|
[
"MIT"
] | null | null | null |
plugins/leading_bot_mention.py
|
YukiSinonome/guided_bot
|
3aff47c4192e9dae4ad4d95c1553a4752ce043cc
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from slackbot.bot import respond_to
from slacker import Slacker
import slackbot_settings
# @respond_to("")
# @respond_to("")
# def cheer(message):
# message.reply("")
import MeCab
import random
import ChatBotScript
import SentenceGenerator
import datetime
import webbrowser
import time
import sys
try:
import urllib.request as urllib2
except ImportError:
import urllib2
import json
import requests
from requests.exceptions import Timeout
import os
#
#
# def greeting():
# todaydetail = datetime.datetime.today()
# if 4 <= todaydetail.hour <= 10:
# message.reply(ChatBotScript.greeting[0] + symbol[random.randrange(2)])
# elif 11 <= todaydetail.hour <= 17:
# message.reply(ChatBotScript.greeting[1] + symbol[random.randrange(2)])
# else:
# message.reply(ChatBotScript.greeting[2])
#
#--------------
#----------
#--------------
t_count = 0
f_count = 0
count_talk = 0
# count()
symbol = ["", "", ""]
main_talk()
| 38.425101
| 599
| 0.565272
|
4be54c7f61feb9501683fa638bd0374bbe09f529
| 13,922
|
py
|
Python
|
Lib/pagebot/elements/conditions.py
|
bghryct/PageBot
|
394150c0fd399f02faec28f4576046882f4d7d39
|
[
"MIT"
] | 68
|
2018-10-22T22:42:58.000Z
|
2022-03-19T11:07:31.000Z
|
Lib/pagebot/elements/conditions.py
|
TypeNetwork/PageBot
|
394150c0fd399f02faec28f4576046882f4d7d39
|
[
"MIT"
] | 97
|
2017-07-10T23:49:30.000Z
|
2018-10-03T08:17:55.000Z
|
Lib/pagebot/elements/conditions.py
|
TypeNetwork/PageBot
|
394150c0fd399f02faec28f4576046882f4d7d39
|
[
"MIT"
] | 9
|
2017-07-11T09:59:00.000Z
|
2018-09-12T11:59:30.000Z
|
#!/usr/bin/env python3
# -*- coding: UTF-8 -*-
# -----------------------------------------------------------------------------
#
# P A G E B O T
#
# Copyright (c) 2016+ Buro Petr van Blokland + Claudia Mens
# www.pagebot.io
# Licensed under MIT conditions
#
# Supporting DrawBot, www.drawbot.com
# Supporting Flat, xxyxyz.org/flat
# -----------------------------------------------------------------------------
#
# conditions.py
#
if __name__ == '__main__':
import doctest
import sys
sys.exit(doctest.testmod()[0])
| 35.789203
| 108
| 0.617943
|
4be5a05c40ee31ef9f187f13c41d25d878a65ca6
| 7,099
|
py
|
Python
|
Pix2Pix/Streamlit_Pix2Pix_Main.py
|
NB094/LHL_Final_Project
|
5df15d7bbf33d51840ea274629591cd938f58fce
|
[
"Apache-2.0"
] | 2
|
2021-10-04T05:53:29.000Z
|
2022-01-21T12:53:43.000Z
|
Pix2Pix/Streamlit_Pix2Pix_Main.py
|
NB094/LHL_Final_Project
|
5df15d7bbf33d51840ea274629591cd938f58fce
|
[
"Apache-2.0"
] | null | null | null |
Pix2Pix/Streamlit_Pix2Pix_Main.py
|
NB094/LHL_Final_Project
|
5df15d7bbf33d51840ea274629591cd938f58fce
|
[
"Apache-2.0"
] | 1
|
2021-10-04T05:53:32.000Z
|
2021-10-04T05:53:32.000Z
|
from PIL import Image
import streamlit as st
from streamlit_drawable_canvas import st_canvas
from Streamlit_Pix2Pix_Generator import Generator
import numpy as np
import urllib.request
from keras.preprocessing.image import load_img
from keras.models import load_model
import requests
# Page intro
st.title('Pix2Pix See Your Sketches Brought to Life!')
st.text('')
st.markdown('Sketch out an object using the canvas below, and let your computer do the rest of the heavy lifting.')
st.text('')
st.text('')
# Links and FAQ section
st.sidebar.markdown("### [SRGANs Web Page](https://share.streamlit.io/nb094/easy-gans/main/SRGAN/Streamlit_SRGAN_Main.py)")
st.sidebar.markdown("### [NumGen Web Page](https://share.streamlit.io/nb094/easy-gans/main/NumGen/Streamlit_NumGen_Main.py)")
st.sidebar.text('')
expander = st.sidebar.expander("Pix2Pix Frequently-Asked Questions", expanded=True)
expander.write("**What type of machine learning is being used?** \n\n \
The model's architecture is based on solving image-to-image translation with a Conditional Generative Adversarial Network, or cGAN. \n\n   \n\n \
**How do GANs work?** \n\n \
There are two main components to GAN models: a *discriminator* and a *generator*. \n\n \
The purpose of the discriminator is to classify images presented to it as real or fake. \
The purpose of the generator is to create plausible images to fool the discriminator. \n\n \
After many cycles of training, the skill of the generator improves enough to produce some impressive results! \n\n   \n\n \
**What is the difference between a GAN and a cGAN?** \n\n \
The basic idea behind cGANs is the same. The primary difference is way the model improves after each cycle, which is based on \
a *loss* calculation. For cGANs, this calculation optimizes the structure or joint configuration of the output. \n\n   \n\n \
**What are the possible applications of cGANs?** \n\n \
cGANs have been used in self-driving cars, creating maps from satellite images, colorizing black and white photos, and much more. \n\n   \n\n \
**Where can I read more about cGANs?** \n\n \
For more information on cGANs, check out [this paper.](https://arxiv.org/abs/1611.07004) \n\n   \n\n \
**Who developed this web page?** \n\n \
This web page and the underlying models were developed by Niklas Bergen with the help of some additional resources. \
Check out the [GitHub repo](https://github.com/NB094/Easy-GANs) for more information.")
##### CODE FOR Pix2Pix #####
# Define page layout
left_column, right_column = st.columns([2,1])
# Create selection box and logic for various sketch subjects.
subject_selection = left_column.selectbox(label = 'Select what you wish to draw...', options = ['Human', 'Shoe', 'Handbag'], index = 0)
if subject_selection == 'Human':
stroke_color = '#F44F36'
background_color='#000000'
else:
stroke_color = '#F44F36'
background_color='#FFFFFF'
# Initialize a random number in the session state. Used to randomize examples shown.
if 'random_num' not in st.session_state:
st.session_state.random_num = 1
# Change the random example number whenever the radio buttons are changed.
# Retrieve a randomly-selected example image
urllib.request.urlretrieve(f'https://github.com/NB094/Easy-GANs/raw/main/Pix2Pix/example_images_streamlit/example_{str.lower(subject_selection)}{st.session_state.random_num}.jpg?raw=true', \
'example_img.jpg')
# Create more options menus
canvas_mode = st.radio(label = 'Select canvas mode...', options = ('Draw on a blank canvas', 'View an example sketch', 'Try tracing an example sketch'), \
index = 1, help='Example sketches are chosen randomly out of 5 options.', on_change=random_num)
drawing_mode = right_column.selectbox(label = "Drawing tool:", options = ("freedraw", "line", "rect", "circle", "polygon", "transform"), index = 0)
# Create the drawing canvas
if canvas_mode == 'View an example sketch':
st.image('example_img.jpg')
else:
canvas_result = st_canvas(
fill_color="rgba(255, 255, 255, 0.0)", # Fill colors from shape objects have full transparency
stroke_width=1,
stroke_color=stroke_color,
background_color=background_color,
background_image=Image.open('example_img.jpg') if canvas_mode == 'Try tracing an example sketch' else None,
height=256,
width=256,
drawing_mode=drawing_mode,
key="canvas")
##### SKETCH PROCESSING #####
if canvas_mode == 'View an example sketch':
drawn_image = load_img('example_img.jpg')
else:
# Store canvas sketch data into a variable
drawn_image = canvas_result.image_data
# Insert try/except loop to prevent website from temporarily throwing error when unchecking the box.
try:
# Convert sketch data into parseable numpy array
drawn_image = np.array(Image.fromarray((drawn_image * 255).astype(np.uint8)).resize((256, 256)).convert('RGB'))
drawn_image = (drawn_image * 255).astype(np.uint8)
# If needed, convert black background to white before passing image to generator.
if subject_selection != 'Human':
drawn_image[drawn_image == 0] = 255
except:
pass
# Download load model files. Cache due to large file sizes
humans_model, shoes_model, handbags_model = cache_all_models()
if subject_selection=='Human':
model = humans_model
elif subject_selection=='Shoe':
model = shoes_model
elif subject_selection=='Handbag':
model = handbags_model
# Insert try/except loop to prevent website from temporarily throwing error when unchecking the box.
try:
# Pass numpy array into generator, and predict
gen = Generator(drawn_image, subject_selection)
gen_image = gen.generate_image(model)
# Display prediction
st.image(gen_image)
except:
pass
| 41.273256
| 190
| 0.720947
|
4be8b0689a8d30b24d0eb351d73f642c1be6c5a9
| 4,584
|
py
|
Python
|
rbs/rbs.py
|
dexbiobot/SML-Cogs
|
e8d3d12e5bf1d760196006f86a6c16ed95e3c964
|
[
"MIT"
] | 17
|
2017-05-30T13:21:18.000Z
|
2022-03-27T13:08:17.000Z
|
rbs/rbs.py
|
dexbiobot/SML-Cogs
|
e8d3d12e5bf1d760196006f86a6c16ed95e3c964
|
[
"MIT"
] | 16
|
2017-06-11T12:55:06.000Z
|
2019-02-20T21:00:59.000Z
|
rbs/rbs.py
|
dexbiobot/SML-Cogs
|
e8d3d12e5bf1d760196006f86a6c16ed95e3c964
|
[
"MIT"
] | 17
|
2017-05-03T16:09:46.000Z
|
2020-05-13T21:19:37.000Z
|
"""
The MIT License (MIT)
Copyright (c) 2017 SML
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
import os
from __main__ import send_cmd_help
from cogs.utils import checks
from cogs.utils.dataIO import dataIO
from discord.ext import commands
import discord
LOOP_INTERVAL = 60
SERVER_DEFAULTS = {
'autorole': {
"role_name": "Guest",
"role_id": None,
"timer": 86400
}
}
PATH = os.path.join('data', 'rbs')
JSON = os.path.join(PATH, 'settings.json')
def check_folder():
"""Check folder."""
if not os.path.exists(PATH):
os.makedirs(PATH)
def check_file():
"""Check files."""
if not dataIO.is_valid_json(JSON):
dataIO.save_json(JSON, SERVER_DEFAULTS)
def setup(bot):
"""Setup bot."""
check_folder()
check_file()
n = RBS(bot)
bot.add_cog(n)
| 29.960784
| 75
| 0.648778
|
4beabadec3de979135423c3abb7be1e6a84c41ad
| 2,845
|
py
|
Python
|
tests/nutsflow/test_iterfunction.py
|
maet3608/nuts-flow
|
0d7b8eefc80cb45c079b155ff5062d1d93ff2caf
|
[
"Apache-2.0"
] | 21
|
2017-05-01T10:15:41.000Z
|
2022-01-25T07:02:44.000Z
|
tests/nutsflow/test_iterfunction.py
|
maet3608/nuts-flow
|
0d7b8eefc80cb45c079b155ff5062d1d93ff2caf
|
[
"Apache-2.0"
] | 7
|
2017-02-09T03:36:37.000Z
|
2017-08-22T11:23:03.000Z
|
tests/nutsflow/test_iterfunction.py
|
maet3608/nuts-flow
|
0d7b8eefc80cb45c079b155ff5062d1d93ff2caf
|
[
"Apache-2.0"
] | 5
|
2017-05-30T01:56:31.000Z
|
2020-10-05T08:21:43.000Z
|
"""
.. module:: test_iterfunction
:synopsis: Unit tests for iterfunction module
"""
import time
import nutsflow.iterfunction as itf
from six.moves import range
| 23.319672
| 63
| 0.555712
|
4beb4afba8d4e82f6ec0587a4a66ce29bdfa1be9
| 6,591
|
py
|
Python
|
microcosm_flask/tests/conventions/test_upload.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 11
|
2017-01-30T21:53:20.000Z
|
2020-05-29T22:39:19.000Z
|
microcosm_flask/tests/conventions/test_upload.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 139
|
2016-03-09T19:09:59.000Z
|
2021-09-03T17:14:00.000Z
|
microcosm_flask/tests/conventions/test_upload.py
|
Sinon/microcosm-flask
|
c1404ebc94459c8156b04f5e04490a330117524c
|
[
"Apache-2.0"
] | 10
|
2016-12-19T22:39:42.000Z
|
2021-03-09T19:23:15.000Z
|
"""
Alias convention tests.
"""
from io import BytesIO
from json import loads
from uuid import uuid4
from hamcrest import (
all_of,
anything,
assert_that,
contains,
equal_to,
has_entries,
has_entry,
has_item,
has_key,
is_,
is_not,
)
from marshmallow import Schema, fields
from microcosm.api import create_object_graph
from microcosm_flask.conventions.base import EndpointDefinition
from microcosm_flask.conventions.swagger import configure_swagger
from microcosm_flask.conventions.upload import configure_upload
from microcosm_flask.namespaces import Namespace
from microcosm_flask.operations import Operation
from microcosm_flask.swagger.definitions import build_path
from microcosm_flask.tests.conventions.fixtures import Person
| 29.823529
| 103
| 0.576847
|
4becdb4fe42c069830f83a3d86842e13caf2edcf
| 135
|
py
|
Python
|
molecool/io/__init__.py
|
nitrosx/molecool
|
58ce78aceb707ff92b26bf6c90b3703714c09786
|
[
"BSD-3-Clause"
] | null | null | null |
molecool/io/__init__.py
|
nitrosx/molecool
|
58ce78aceb707ff92b26bf6c90b3703714c09786
|
[
"BSD-3-Clause"
] | null | null | null |
molecool/io/__init__.py
|
nitrosx/molecool
|
58ce78aceb707ff92b26bf6c90b3703714c09786
|
[
"BSD-3-Clause"
] | null | null | null |
'''
molecool.io package
configure access to subpackage functions
'''
from .pdb import open_pdb
from .xyz import open_xyz, write_xyz
| 13.5
| 40
| 0.77037
|
4bf119d7edb9acf18b1f1e428e435fcd728fc1f4
| 866
|
py
|
Python
|
tests/check-result.py
|
getupcloud/tiny-controllers
|
e896b2015a9e29eab421225cb5a5f0d488df9e37
|
[
"Apache-2.0"
] | null | null | null |
tests/check-result.py
|
getupcloud/tiny-controllers
|
e896b2015a9e29eab421225cb5a5f0d488df9e37
|
[
"Apache-2.0"
] | null | null | null |
tests/check-result.py
|
getupcloud/tiny-controllers
|
e896b2015a9e29eab421225cb5a5f0d488df9e37
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
import sys
import json
from flatten_dict import flatten as _flatten
try:
data = json.load(sys.stdin)['object']
except Exception as ex:
print("Missing or invalid test data:", ex)
sys.exit(1)
try:
results = json.load(open(sys.argv[1], "r"))['results']
except Exception as ex:
print("Missing or invalid test results:", ex)
sys.exit(1)
data = flatten(data)
ok = True
for r in [ flatten(i) for i in results ]:
for k, v in r.items():
if k not in data:
print(f'{k} not found in {data}')
ok = False
elif v != data[k]:
print(f'{k}={data[k]} do not matches {k}={v}')
ok = False
else:
print(f"Match: {r}")
sys.exit(0 if ok else 1)
| 23.405405
| 88
| 0.590069
|
4bf224e8c8f4fa354c35d1431a9957707b55eb9b
| 331
|
py
|
Python
|
thriftpy2_httpx_client/__init__.py
|
hans00/ThriftPy2-HTTPX-Client
|
e94944218915bcec6b2e0c00200f5d5e6f823053
|
[
"MIT"
] | null | null | null |
thriftpy2_httpx_client/__init__.py
|
hans00/ThriftPy2-HTTPX-Client
|
e94944218915bcec6b2e0c00200f5d5e6f823053
|
[
"MIT"
] | 5
|
2021-07-13T13:56:17.000Z
|
2022-03-02T02:43:46.000Z
|
thriftpy2_httpx_client/__init__.py
|
hans00/ThriftPy2-HTTPX-Client
|
e94944218915bcec6b2e0c00200f5d5e6f823053
|
[
"MIT"
] | 2
|
2021-07-13T06:08:59.000Z
|
2022-03-16T22:15:57.000Z
|
__all__ = [
'make_aio_client',
'make_sync_client',
'TAsyncHTTPXClient',
'THTTPXClient',
]
from .aio import TAsyncHTTPXClient, make_client as make_aio_client
from .sync import THTTPXClient, make_client as make_sync_client
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
| 23.642857
| 66
| 0.770393
|
4bf41bde14de2173375d4d1e4381757de1699557
| 3,553
|
py
|
Python
|
kalc/model/kinds/Node.py
|
KellyGriffin/kalc
|
9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583
|
[
"Apache-2.0"
] | null | null | null |
kalc/model/kinds/Node.py
|
KellyGriffin/kalc
|
9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583
|
[
"Apache-2.0"
] | null | null | null |
kalc/model/kinds/Node.py
|
KellyGriffin/kalc
|
9b78c4177ed9ffccbf1ecfbf9a7946286cd7c583
|
[
"Apache-2.0"
] | null | null | null |
import sys
import random
from kalc.model.system.base import ModularKind
from typing import Set
from kalc.model.system.primitives import Label, StatusNode
from kalc.model.system.base import HasLabel
from kalc.misc.util import cpuConvertToAbstractProblem, memConvertToAbstractProblem
from kalc.misc.const import STATUS_NODE
from kalc.model.system.globals import GlobalVar
# def __repr__(self):
# return 'Nodename : ' + str(self._get_value())
Node.NODE_NULL = Node("NULL")
Node.NODE_NULL.isNull = True
Node.NODE_NULL.status = STATUS_NODE["Inactive"]
Node.NODE_NULL.metadata_name = "Null-Node"
Node.NODE_NULL.searchable = False
| 33.838095
| 83
| 0.690684
|
4bf46aef0cec7975f957c42ac0e9212705e2eac4
| 6,154
|
py
|
Python
|
Betsy/Betsy/modules/summarize_fastqc_results.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | 9
|
2017-01-13T02:38:41.000Z
|
2021-04-08T00:44:39.000Z
|
Betsy/Betsy/modules/summarize_fastqc_results.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | null | null | null |
Betsy/Betsy/modules/summarize_fastqc_results.py
|
jefftc/changlab
|
11da8c415afefcba0b0216238387c75aeb3a56ac
|
[
"MIT"
] | 4
|
2017-01-05T16:25:25.000Z
|
2019-12-12T20:07:38.000Z
|
from Module import AbstractModule
def read_fastqc_results(fastqc_path):
import os
from genomicode import filelib
summary_file = os.path.join(fastqc_path, "summary.txt")
data_file = os.path.join(fastqc_path, "fastqc_data.txt")
filelib.assert_exists_nz(summary_file)
filelib.assert_exists_nz(data_file)
summary = read_fastqc_summary(summary_file)
data = read_fastqc_data(data_file)
# Figure out the sample names from the filenames.
samples = sorted([x[-1] for x in summary])
assert samples[0] == samples[-1], "%s %s" % (samples[0], samples[-1])
sample = samples[0]
if sample.lower().endswith(".gz"):
sample = sample[:-3]
if sample.lower().endswith(".fq"):
sample = sample[:-3]
if sample.lower().endswith(".fastq"):
sample = sample[:-6]
# Make the statistics dictionary.
statistics = {}
statistics_order = []
for x in summary:
status, statistic, x = x
assert statistic not in statistics
statistics[statistic] = status
statistics_order.append(statistic)
x = FastQCResults(
sample, data["total_sequences"], data["filtered_sequences"],
data["sequence_length"], data["percent_gc"],
statistics, statistics_order)
return x
def read_fastqc_summary(filename):
# Return list of (<status>, <statistic>, <filename>)
import os
from genomicode import filelib
assert os.path.exists(filename)
data = []
for x in filelib.read_cols(filename):
assert len(x) == 3
status, statistic, filename = x
data.append((status, statistic, filename))
return data
def read_fastqc_data(filename):
# Return a dictionary of:
# total_sequences <int>
# filtered_sequences <int>
# sequence_length <str> "205", "15-205"
# percent_gc <float>
from genomicode import parselib
data = {}
for line in open(filename):
# Line seems to end with:
# 'Total Sequences\t1056547\t\n'
# Not enough just to strip \r\n.
#cols = line.rstrip("\r\n").split("\t")
cols = line.rstrip().split("\t")
if line.startswith("Total Sequences"):
assert len(cols) == 2, repr(line)
data["total_sequences"] = int(cols[1])
elif line.startswith("Filtered Sequences"):
assert len(cols) == 2
data["filtered_sequences"] = int(cols[1])
elif line.startswith("Sequences flagged as poor quality"):
# Seems to be alternative to "Filtered Sequences".
assert len(cols) == 2
data["filtered_sequences"] = int(cols[1])
elif line.startswith("Sequence length"):
assert len(cols) == 2
data["sequence_length"] = cols[1]
elif line.startswith("%GC"):
assert len(cols) == 2
data["percent_gc"] = float(cols[1])/100
expected = [
"total_sequences", "filtered_sequences", "sequence_length",
"percent_gc"]
x = [x for x in expected if x not in data]
assert not x, "Missing (%s) from fastqc_data: %s" % (
parselib.pretty_list(x), filename)
return data
| 34.573034
| 76
| 0.614722
|
4bf674c2dd9e1aaac9f80a20682c800896278be3
| 792
|
py
|
Python
|
propnet/models/__init__.py
|
nile0316/propnet
|
3e1f1476c70a878c6eb43587c328d108b0e2a410
|
[
"BSD-3-Clause-LBNL"
] | 57
|
2018-01-09T14:56:20.000Z
|
2022-02-24T11:44:42.000Z
|
propnet/models/__init__.py
|
ruriboshi/propnet
|
770703fb4fc344f785f89c02f26b31ea5733d2bd
|
[
"BSD-3-Clause-LBNL"
] | 214
|
2017-09-26T23:31:09.000Z
|
2022-03-14T04:50:58.000Z
|
propnet/models/__init__.py
|
nile0316/propnet
|
3e1f1476c70a878c6eb43587c328d108b0e2a410
|
[
"BSD-3-Clause-LBNL"
] | 26
|
2017-10-29T21:34:22.000Z
|
2022-01-12T05:59:12.000Z
|
# noinspection PyUnresolvedReferences
import propnet.symbols
from propnet.models import serialized, python, composite
from propnet.core.registry import Registry
# This is just to enable importing the model directly from this module for example code generation
_update_globals()
| 33
| 98
| 0.792929
|
4bf6a8cffebce41ae5095ad681541b2d2a477027
| 1,369
|
py
|
Python
|
python/clean_dataset.py
|
catarinaacsilva/user_mapping_twitter
|
7350ed35b465a7db6747c4035e7b119bff23131d
|
[
"MIT"
] | null | null | null |
python/clean_dataset.py
|
catarinaacsilva/user_mapping_twitter
|
7350ed35b465a7db6747c4035e7b119bff23131d
|
[
"MIT"
] | null | null | null |
python/clean_dataset.py
|
catarinaacsilva/user_mapping_twitter
|
7350ed35b465a7db6747c4035e7b119bff23131d
|
[
"MIT"
] | null | null | null |
import csv
import re
regex = re.compile('[^a-zA-Z]')
if __name__ == '__main__':
for user in ['katyperry', 'TheEllenShow', 'YouTube', 'realDonaldTrump', 'BillGates',
'nytimes', 'CNN', 'espn', 'NASA', 'aliciakeys']:
clean_dataset(user)
| 30.422222
| 89
| 0.519357
|
4bf72918258e1f5f04c1079f6fc0ade0637b2962
| 4,690
|
py
|
Python
|
kpext/kp_crfsuite.py
|
snovd/sdavid-tests
|
c5f7e60f83ecb2d4cbaec18fff84861907f59c27
|
[
"MIT"
] | null | null | null |
kpext/kp_crfsuite.py
|
snovd/sdavid-tests
|
c5f7e60f83ecb2d4cbaec18fff84861907f59c27
|
[
"MIT"
] | null | null | null |
kpext/kp_crfsuite.py
|
snovd/sdavid-tests
|
c5f7e60f83ecb2d4cbaec18fff84861907f59c27
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
import sys
import os
from nltk.tokenize import TreebankWordTokenizer as Tokenizer
from nltk.tag.perceptron import PerceptronTagger
import operator
from itertools import chain
import nltk
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.preprocessing import LabelBinarizer
import sklearn
import pycrfsuite
import re
import kpcommon as kpc
import mdb_common_lib as mdbcl
if __name__ == "__main__":
try:
debug = True if sys.argv[-1] == "debug" else False
debug_tests = 3
file_count = 0
dir_corpus = sys.argv[1]
dir_output = sys.argv[2]
try:
training_crfsuite = sys.argv[3]
except:
training_crfsuite = 'keyphrase.crfsuite'
tokenizer = Tokenizer()
#pos
tagger = PerceptronTagger()
extra_features = True
qr = mdbcl.QueryResources()
crftagger = pycrfsuite.Tagger()
crftagger.open(training_crfsuite)
#test_sents = []
for (dirname, _, filenames) in os.walk(dir_corpus):
for f in filenames:
ext = f[-4:]
if ext == '.ann':
file_count += 1
if debug and file_count > debug_tests:
break
file_text = os.path.join(dirname, f[:-4] + ".txt")
text_file = open(file_text, "r")
file_kpe = os.path.join(dir_output, f[:-4] + ".ann")
kpe_file = open(file_kpe, "w")
raw_text = unicode(text_file.read(), encoding="utf-8")
tokens = tokenizer.tokenize(raw_text)
tagged_text = [t + ("None",) for t in tagger.tag(tokens)]
text_file.close()
#test_sents.append(tagged_text)
if extra_features:
X_test = kpc.sent2features_extra(tagged_text, qr)
else:
X_test = kpc.sent2features(tagged_text)
is_not_kp = "None"
tmp_label = is_not_kp
new_kp = []
kp_list = []
for kp in zip(crftagger.tag(X_test), [tt[0] for tt in tagged_text]):
if debug and False:
print >> sys.stderr, " ---- ", kp
if kp[0][0:2] == "B-":
if new_kp and tmp_label != is_not_kp:
kp_list.append((tmp_label, " ".join(new_kp)))
tmp_label = kp[0][2:]
new_kp = []
new_kp.append(kp[1])
if new_kp:
kp_list.append((tmp_label, " ".join(new_kp)))
if debug and False:
print >> sys.stderr, raw_text
kp_index = 0
for kp in kp_list:
print kp
kp_iter_counter = 0
for m in re.finditer("\W?(" + re.escape(kp[1]) + ")\W", raw_text):
kp_iter_counter += 1
kp_index += 1
#print kp_iter_counter, m.groups()
start = m.start(1)
end = m.end(1)
term_string = "T" + str(kp_index) + "\t" + kp[0] + " " + str(start) + " " + str(end) + "\t" + raw_text[start:end]
term_string = term_string.encode("utf-8")
print >> kpe_file, term_string
#tmp_kps_candidates.append((start, end, m.span(1), kp, raw_text[start:end]))
if debug and kp_iter_counter == 0:
"""
There is an error here and in the projections.
The match is made by tokens.
When some of semi-colon, comma or ( ) there is an extra espace.
"""
#print >> sys.stderr, raw_text
print >> sys.stderr, kp_iter_counter, ": ", kp[1].encode("utf-8")
kpe_file.close()
except:
print >> sys.stderr
print >> sys.stderr, "usage: python", sys.argv[0], "<corpus_dir_path> <output_dir_path>"
print >> sys.stderr, "example:"
print >> sys.stderr, " python", sys.argv[0], "some/path/to/corpus/ some/path/to/output/"
print >> sys.stderr, "Error: ", sys.exc_info()
| 41.875
| 141
| 0.465885
|
4bf9bd37e91a5feca68c63420808cdbf5f96022e
| 6,736
|
py
|
Python
|
models/analysis_transform.py
|
LiuLei95/PyTorch-Learned-Image-Compression-with-GMM-and-Attention
|
484aced5bea25fbc1ba1380f4ab81bda9b099c1e
|
[
"Apache-2.0"
] | 27
|
2021-07-28T01:33:02.000Z
|
2022-03-18T04:01:02.000Z
|
models/analysis_transform.py
|
LiuLei95/PyTorch-Learned-Image-Compression-with-GMM-and-Attention
|
484aced5bea25fbc1ba1380f4ab81bda9b099c1e
|
[
"Apache-2.0"
] | 5
|
2021-11-13T05:58:51.000Z
|
2022-02-13T09:07:44.000Z
|
models/analysis_transform.py
|
LiuLei95/PyTorch-Learned-Image-Compression-with-GMM-and-Attention
|
484aced5bea25fbc1ba1380f4ab81bda9b099c1e
|
[
"Apache-2.0"
] | 1
|
2021-08-21T13:14:28.000Z
|
2021-08-21T13:14:28.000Z
|
#!/Library/Frameworks/Python.framework/Versions/3.5/bin/python3.5
import math
import torch.nn as nn
import torch
from .GDN import GDN
from .attention import Attention
# class Analysis_transform(nn.Module):
# def __init__(self, num_filters=128):
# super(Analysis_transform, self).__init__()
# self.conv_shortcut0 = nn.Conv2d(3, num_filters, 1, stride=2, padding=0)
# self.conv0 = nn.Conv2d(3, num_filters, 3, stride=2, padding=1)
# self.conv1 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
# self.leaky_relu1 = nn.LeakyReLU()
# self.conv2 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
# self.leaky_relu2 = nn.LeakyReLU()
# self.conv_shortcut = nn.Conv2d(num_filters, num_filters, 1, stride=2, padding=0)
# self.conv3 = nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1)
# self.leaky_relu3 = nn.LeakyReLU()
# self.conv4 = nn.Conv2d(num_filters, num_filters, 3, stride=1, padding=1)
# self.gdn = GDN(num_filters)
# # self.leaky_relu4 = nn.LeakyReLU()
# self.conv5 = nn.Conv2d(num_filters, num_filters, 3, stride=2, padding=1, bias=False)
# self.attention1 = Attention(num_filters)
# self.attention2 = Attention(num_filters)
#
#
# def forward(self, x):
# for i in range(4):
# if i > 0:
# x2 = self.conv1(x)
# x2 = self.leaky_relu1(x2)
# # print("a 3x3 1")
# # print("%d"%(i), x2.shape)
# x2 = self.conv2(x2)
# x2 = self.leaky_relu2(x2)
# # print("b 3x3 1")
# # print("%d"%(i), x2.shape)
# x = x + x2
# # print("resblock result: ", x.shape)
#
#
# if i == 0:
# shortcut_tensor = self.conv_shortcut0(x)
# x = self.conv0(x)
# x = self.leaky_relu3(x)
# # print("c 3x3 2")
# # print("%d"%(i), x.shape)
# x = self.conv4(x)
# # x = self.leaky_relu4(x)
# x = self.gdn(x)
# # print("d 3x3 1")
# # print("%d"%(i), x.shape)
# x = x + shortcut_tensor
# # print("resblock result: ", x.shape)
# elif i < 3:
# shortcut_tensor = self.conv_shortcut(x)
# x = self.conv3(x)
# x = self.leaky_relu3(x)
# # print("c 3x3 2")
# # print("%d"%(i), x.shape)
# x = self.conv4(x)
# # x = self.leaky_relu4(x)
# x = self.gdn(x)
# # print("d 3x3 1")
# # print("%d"%(i), x.shape)
# x = x + shortcut_tensor
# # print("resblock result: ", x.shape)
# if i == 1:
# # Attenation
# x = self.attention1(x)
#
# else:
# x = self.conv5(x)
# x = self.attention2(x)
#
# return x
if __name__ == "__main__":
analysis_transform = Analysis_transform()
input_image = torch.zeros([1,3,256,256])
feature = analysis_transform(input_image)
print(feature.shape)
| 38.936416
| 96
| 0.55478
|
4bf9cae86ed3b64532d63a132ed50c966d6bd0b4
| 826
|
py
|
Python
|
app/models.py
|
Katze2/Flask-template
|
99925f6bfbaf92ace9b0fd7c792b989ed90a7e00
|
[
"MIT"
] | null | null | null |
app/models.py
|
Katze2/Flask-template
|
99925f6bfbaf92ace9b0fd7c792b989ed90a7e00
|
[
"MIT"
] | null | null | null |
app/models.py
|
Katze2/Flask-template
|
99925f6bfbaf92ace9b0fd7c792b989ed90a7e00
|
[
"MIT"
] | null | null | null |
# -*- encoding: utf-8 -*-
from app import db
| 24.294118
| 75
| 0.634383
|
4bfb4d961bec58ff15fe5b25777f51138ea3c5dc
| 1,516
|
py
|
Python
|
tests/dataset_balancer_test.py
|
MarinkoBa/Hate-Speech-Classification
|
72f6bbe93b823daefa138df4f81a3a4df5b34c4c
|
[
"MIT"
] | null | null | null |
tests/dataset_balancer_test.py
|
MarinkoBa/Hate-Speech-Classification
|
72f6bbe93b823daefa138df4f81a3a4df5b34c4c
|
[
"MIT"
] | null | null | null |
tests/dataset_balancer_test.py
|
MarinkoBa/Hate-Speech-Classification
|
72f6bbe93b823daefa138df4f81a3a4df5b34c4c
|
[
"MIT"
] | 1
|
2020-12-14T13:56:50.000Z
|
2020-12-14T13:56:50.000Z
|
# -*- coding: utf-8 -*-
from src.utils.get_data import load_data
from src.utils.get_data import get_datasets
from src.utils.get_data import concatenate_datasets
from src.utils.dataset_balancer import balance_data
import os
import pandas as pd
import unittest
if __name__ == "__main__":
unittest.main()
| 35.255814
| 110
| 0.550792
|
4bfb89534390da200300df58f33c846fbb2cba39
| 12,695
|
py
|
Python
|
gptorch/models/sparse_gpr.py
|
cics-nd/gptorch
|
80c62a227c466bb7fa29e11263e94c41f96ff93f
|
[
"MIT"
] | 28
|
2018-11-05T03:01:18.000Z
|
2021-04-02T18:11:05.000Z
|
gptorch/models/sparse_gpr.py
|
cics-nd/gptorch
|
80c62a227c466bb7fa29e11263e94c41f96ff93f
|
[
"MIT"
] | 7
|
2019-06-04T21:43:40.000Z
|
2021-11-04T04:19:26.000Z
|
gptorch/models/sparse_gpr.py
|
cics-nd/gptorch
|
80c62a227c466bb7fa29e11263e94c41f96ff93f
|
[
"MIT"
] | 8
|
2019-04-03T12:28:05.000Z
|
2021-12-23T10:15:34.000Z
|
#
# Yinhao Zhu, May 01, 2017
#
"""
Sparse GP regression, including variational GP and others.
"""
from __future__ import absolute_import
import torch
import numpy as np
from torch.utils.data import TensorDataset, DataLoader
from torch.distributions.transforms import LowerCholeskyTransform
from ..model import Param
from ..functions import cholesky, trtrs
from ..mean_functions import Zero
from ..likelihoods import Gaussian
from ..util import TensorType, torch_dtype, as_tensor, kmeans_centers
from .gpr import GPR
from .base import GPModel
def minibatch(loss_func):
"""
Decorator to use minibatching for a loss function (e.g. SVGP)
"""
return wrapped
| 33.232984
| 87
| 0.59228
|
4bfe8f82bf9964afbee833e2a996e71d61b97873
| 1,638
|
py
|
Python
|
Code/list.py
|
sunjinshuai/Python
|
b4d76bc20e9d740108c98cb8d023ca5da3e6c070
|
[
"MIT"
] | null | null | null |
Code/list.py
|
sunjinshuai/Python
|
b4d76bc20e9d740108c98cb8d023ca5da3e6c070
|
[
"MIT"
] | null | null | null |
Code/list.py
|
sunjinshuai/Python
|
b4d76bc20e9d740108c98cb8d023ca5da3e6c070
|
[
"MIT"
] | null | null | null |
list1 = ['physics', 'chemistry', 1997, 2000]
list2 = [1, 2, 3, 4, 5 ]
list3 = ["a", "b", "c", "d"]
print list1, list2, list3
#
#
print "list1[0]: ", list1[0]
print "list2[1:5]: ", list2[1:5]
#
# append()
list = [] ##
list.append('Google') ## append()
list.append('Python')
print list
#
# del
list1 = ['Python', 'iOS', 'Java', 'C++']
print list1
del list1[2]
print "After deleting value at index 2 : "
print list1
# Python
# + * + *
list1 = ['Python', 'iOS', 'Java', 'C++']
print len(list1)
list2 = ['C', 'Ruby', 'Javastript']
print list1 + list2
print ['Python'] * 4
print 'iOS' in list1
for str in list1:
print str
# Python
list1 = ['Python', 'iOS', 'Java', 'C++']
print list1[2]
print list1[-2]
print list1[1:]
# cmp()
# cmp()
# cmp(list1, list2)
# ,,
# ,
# ,,
# ,""("")
# ,
# ,""
# ,, 0
list1, list2 = [123, 'xyz'], [456, 'abc']
print cmp(list1, list2);
print cmp(list2, list1);
list3 = list2 + [786];
list4 = [123, 'xyz']
print cmp(list2, list3)
print cmp(list1, list4)
# extend()
# extend()
# list.extend(seq)
#
aList = [123, 'xyz', 'zara', 'abc', 123];
bList = [2009, 'manni'];
aList.extend(bList)
print "Extended List : ", aList
| 20.222222
| 49
| 0.651404
|
ef0025261578f6f3b594dd1953fdfd38e1b064c9
| 10,015
|
py
|
Python
|
xyw_macro/notify.py
|
xue0228/keyboard
|
dcb0def1d87a9197676c0f405b980a67e128ab24
|
[
"MIT"
] | null | null | null |
xyw_macro/notify.py
|
xue0228/keyboard
|
dcb0def1d87a9197676c0f405b980a67e128ab24
|
[
"MIT"
] | null | null | null |
xyw_macro/notify.py
|
xue0228/keyboard
|
dcb0def1d87a9197676c0f405b980a67e128ab24
|
[
"MIT"
] | null | null | null |
import tkinter as tk
import tkinter.font as tf
from tkinter import ttk
from tkinter import messagebox
from tkinter.filedialog import askopenfilename, askdirectory
import time
import threading
from functools import wraps
from xyw_macro.utils import SingletonType
from xyw_macro.contants import SLEEP_TIME
class InputBox:
"""
"""
def __init__(self, title='', *args):
"""
:param title:
"""
self.title = title
self.__args = args
self.top = None
self.vars = []
self.values = []
def show(self):
"""
:return:
"""
return self.top_window()
def input_box(*ags, title=''):
"""
:param title:
:return:
"""
return decorator
def confirm_box(message=''):
"""
:param message:
:return:
"""
return decorator
if __name__ == '__main__':
# notify = Notification()
# threading.Thread(target=auto_hide).start()
# notify.start()
# thd = threading.Thread(target=sub)
# thd.start()
# def auto_hide():
# time.sleep(2)
# # notify.destroy()
# # flag = False
# notify.hide()
notify = Notification('xyw_macro\n')
threading.Thread(target=sub).start()
notify.run()
# notify.show(0.2)
# print('end')
# time.sleep(2)
# notify.set_text('changed')
# notify.show()
# notify.start()
# print('xue')
# print(type(notify.get_window()))
# notify.start()
# flag = True
# while flag:
# # notify.get_window().update_idletasks()
# notify.get_window().update()
| 30.348485
| 120
| 0.563155
|
ef015b72b0d9f9a36582b5d4563b3165aa3bb897
| 1,206
|
py
|
Python
|
tests/test_utils.py
|
yiannisha/dbmanage
|
9e1e36e2b59e7e369595f4804bef2c2a7ec0ec56
|
[
"Apache-2.0"
] | null | null | null |
tests/test_utils.py
|
yiannisha/dbmanage
|
9e1e36e2b59e7e369595f4804bef2c2a7ec0ec56
|
[
"Apache-2.0"
] | 10
|
2021-11-06T18:12:54.000Z
|
2021-12-01T18:49:29.000Z
|
tests/test_utils.py
|
yiannisha/dbmanage
|
9e1e36e2b59e7e369595f4804bef2c2a7ec0ec56
|
[
"Apache-2.0"
] | null | null | null |
""" Utilities for testing """
import os
import json
TESTDATADIR = os.path.join(os.path.dirname(__file__), 'testdata')
def get_pass(pass_name : str) -> str:
""" Returns pass from test_credentials.json """
creds_path = os.path.join(os.path.dirname(__file__), 'test_credentials.json')
with open(creds_path, 'r', encoding='utf-8') as f:
for line in f.readlines():
creds = json.loads(line)
return creds[pass_name]
def read_temp_file(filename: str, delete = True, stdout: str = '', stderr: str = '') -> str:
""" Reads temp file and returns contents """
# wait for file to be generated
print(f'Waiting for {filename} file...')
try:
while(not os.path.exists(filename)):
pass
except KeyboardInterrupt as e:
error_msg = f'Stdout: {stdout}\nStderr: {stderr}\n'
raise Exception(error_msg)
# read file
with open(filename, 'r', encoding='utf-8') as f:
out_str = ''.join([line for line in f.readlines()])
# delete file
if delete and os.path.exists(filename):
try:
os.remove(filename)
except:
print(f'{filename} file already removed')
return out_str
| 28.046512
| 93
| 0.619403
|
ef0261d204ca26d250b0a03064510e798b9c7feb
| 152
|
py
|
Python
|
ballistics/collision/dispatch/__init__.py
|
flupke/ballistics
|
844ef7dd9fd55f6f7d0be04df6b564beaa5aaa1a
|
[
"Zlib"
] | null | null | null |
ballistics/collision/dispatch/__init__.py
|
flupke/ballistics
|
844ef7dd9fd55f6f7d0be04df6b564beaa5aaa1a
|
[
"Zlib"
] | null | null | null |
ballistics/collision/dispatch/__init__.py
|
flupke/ballistics
|
844ef7dd9fd55f6f7d0be04df6b564beaa5aaa1a
|
[
"Zlib"
] | 1
|
2020-04-29T13:52:31.000Z
|
2020-04-29T13:52:31.000Z
|
from ballistics.collision.dispatch.config import DefaultCollisionConfiguration
from ballistics.collision.dispatch.dispatcher import CollisionDispatcher
| 50.666667
| 78
| 0.907895
|
ef032589a15b54709c2cc0f764228c621cd157d2
| 750
|
py
|
Python
|
venv/lib/python3.7/site-packages/webdriver_manager/chrome.py
|
wayshon/pylogin
|
12ecfddc3ceaf552a42f62608027924541c63254
|
[
"Apache-2.0"
] | null | null | null |
venv/lib/python3.7/site-packages/webdriver_manager/chrome.py
|
wayshon/pylogin
|
12ecfddc3ceaf552a42f62608027924541c63254
|
[
"Apache-2.0"
] | 7
|
2019-12-04T23:08:08.000Z
|
2022-02-10T12:47:38.000Z
|
venv/lib/python3.7/site-packages/webdriver_manager/chrome.py
|
wayshon/pylogin
|
12ecfddc3ceaf552a42f62608027924541c63254
|
[
"Apache-2.0"
] | null | null | null |
import os
from webdriver_manager.driver import ChromeDriver
from webdriver_manager.manager import DriverManager
from webdriver_manager import utils
| 34.090909
| 72
| 0.652
|
ef038b82c703bdd42d7eb00adaf52c73105e5c39
| 321
|
py
|
Python
|
polling_stations/apps/data_importers/management/commands/import_brent.py
|
danielgriffin48/UK-Polling-Stations
|
0e5273357a4fdc00c2af794c71558b6f8f2a0a49
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_importers/management/commands/import_brent.py
|
danielgriffin48/UK-Polling-Stations
|
0e5273357a4fdc00c2af794c71558b6f8f2a0a49
|
[
"BSD-3-Clause"
] | null | null | null |
polling_stations/apps/data_importers/management/commands/import_brent.py
|
danielgriffin48/UK-Polling-Stations
|
0e5273357a4fdc00c2af794c71558b6f8f2a0a49
|
[
"BSD-3-Clause"
] | null | null | null |
from data_importers.management.commands import BaseDemocracyCountsCsvImporter
| 35.666667
| 77
| 0.76947
|
ef0469d45705f95287d4ed042d4ea25304eabf8c
| 3,217
|
py
|
Python
|
tests/test_data/movies.py
|
jmolinski/traktpy
|
e6ff22acaf273b7b45070a4f8938c210fe4d63d7
|
[
"MIT"
] | null | null | null |
tests/test_data/movies.py
|
jmolinski/traktpy
|
e6ff22acaf273b7b45070a4f8938c210fe4d63d7
|
[
"MIT"
] | 1
|
2019-04-13T10:15:48.000Z
|
2019-04-13T10:15:48.000Z
|
tests/test_data/movies.py
|
jmolinski/traktpy
|
e6ff22acaf273b7b45070a4f8938c210fe4d63d7
|
[
"MIT"
] | null | null | null |
MOVIE1 = {
"title": "Guardians of the Galaxy",
"year": 2014,
"ids": {
"trakt": 28,
"slug": "guardians-of-the-galaxy-2014",
"imdb": "tt2015381",
"tmdb": 118340,
},
}
MOVIE2 = {
"title": "Guardians of the Galaxy",
"year": 2014,
"ids": {
"trakt": 28,
"slug": "guardians-of-the-galaxy-2014",
"imdb": "tt2015381",
"tmdb": 118340,
},
}
MOVIE_PREMIERES = [
{"released": "2014-08-01", "movie": MOVIE1},
{"released": "2014-08-01", "movie": MOVIE2},
]
MOVIES = [MOVIE1, MOVIE2]
TRENDING_MOVIES = [{"watchers": 21, "movie": MOVIE1}, {"watchers": 17, "movie": MOVIE2}]
PLAYED_MOVIES = [
{
"watcher_count": 66667,
"play_count": 109736,
"collected_count": 27584,
"movie": MOVIE1,
},
{
"watcher_count": 76254,
"play_count": 104242,
"collected_count": 31877,
"movie": MOVIE2,
},
]
ANTICIPATED_MOVIES = [
{"list_count": 5362, "movie": MOVIE1},
{"list_count": 4405, "movie": MOVIE2},
]
BOX_OFFICE = [
{"revenue": 48464322, "movie": MOVIE1},
{"revenue": 17728313, "movie": MOVIE2},
]
UPDATED_MOVIES = [{"updated_at": "2014-09-22T21:56:03.000Z", "movie": MOVIE1}]
EXTENDED_MOVIE = {
"title": "TRON: Legacy",
"year": 2010,
"ids": {
"trakt": 343,
"slug": "tron-legacy-2010",
"imdb": "tt1104001",
"tmdb": 20526,
},
"tagline": "The Game Has Changed.",
"overview": "Sam Flynn, the tech-savvy and daring son of Kevin Flynn, investigates his father's disappearance and is pulled into The Grid. With the help of a mysterious program named Quorra, Sam quests to stop evil dictator Clu from crossing into the real world.",
"released": "2010-12-16",
"runtime": 125,
"country": "us",
"updated_at": "2014-07-23T03:21:46.000Z",
"trailer": None,
"homepage": "http://disney.go.com/tron/",
"rating": 8,
"votes": 111,
"comment_count": 92,
"language": "en",
"available_translations": ["en"],
"genres": ["action"],
"certification": "PG-13",
}
ALIASES = [
{"title": "Batman 1 - Batman Begins", "country": "ca"},
{"title": "Batman 5 Begins", "country": "br"},
]
RELEASES = [
{
"country": "us",
"certification": "PG",
"release_date": "2010-12-16",
"release_type": "theatrical",
"note": None,
},
{
"country": "gb",
"certification": "PG",
"release_date": "2010-12-17",
"release_type": "theatrical",
"note": None,
},
]
TRANSLATIONS = [
{
"title": "Batman Begins",
"overview": "...",
"tagline": "Das Bse frchtet den Ritter.",
"language": "de",
}
]
RATINGS = {
"rating": 7.33778,
"votes": 7866,
"distribution": {
"1": 298,
"2": 46,
"3": 87,
"4": 178,
"5": 446,
"6": 1167,
"7": 1855,
"8": 1543,
"9": 662,
"10": 1583,
},
}
RELATED_MOVIES = [MOVIE1, MOVIE2]
MOVIE_STATS = {
"watchers": 39204,
"plays": 51033,
"collectors": 27379,
"comments": 36,
"lists": 4561,
"votes": 7866,
}
| 22.496503
| 269
| 0.520361
|
ef051797168d89a7cce543aa7efcba75f787978c
| 2,689
|
py
|
Python
|
azext_iot/digitaltwins/common.py
|
v-andreaco/azure-iot-cli-extension
|
18b20b0a6ba9f75556979eb905e6d2271eb27ddd
|
[
"MIT"
] | null | null | null |
azext_iot/digitaltwins/common.py
|
v-andreaco/azure-iot-cli-extension
|
18b20b0a6ba9f75556979eb905e6d2271eb27ddd
|
[
"MIT"
] | null | null | null |
azext_iot/digitaltwins/common.py
|
v-andreaco/azure-iot-cli-extension
|
18b20b0a6ba9f75556979eb905e6d2271eb27ddd
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
"""
shared: Define shared data types(enums) and constant strings.
"""
from enum import Enum
# Retry constants
MAX_ADT_CREATE_RETRIES = 5
ADT_CREATE_RETRY_AFTER = 60
MAX_ADT_DH_CREATE_RETRIES = 20
# Data History strings
DT_IDENTITY_ERROR = "Digital Twins instance does not have System-Assigned Identity enabled. Please enable and try again."
FINISHED_CHECK_RESOURCE_LOG_MSG = "Finished checking the {0} resource."
ERROR_PREFIX = "Unable to"
FAIL_GENERIC_MSG = ERROR_PREFIX + " assign {0}. Please assign this role manually."
FAIL_RBAC_MSG = ERROR_PREFIX + " assign {0}. Please assign this role manually with the command `az {1}`."
ABORT_MSG = "Command was aborted."
CONT_INPUT_MSG = "Continue with Data History connection creation anyway?"
ADX_ROLE_MSG = "'Database Admin' permission on the Digital Twins instance for the Azure Data Explorer database '{0}'"
RBAC_ROLE_MSG = "'{0}' role on the Digital Twins instance for the scope '{1}'"
# Messages to be used with ADX_ROLE_MSG or RBAC_ROLE_MSG
# Example: "Trying to add the '{0}' role on the Digital Twins instance for the scope '{1}'.
TRY_ADD_ROLE_LOG_MSG = "Trying to add the {0}."
PRESENT_ADD_ROLE_LOG_MSG = "The {0} is already present."
FINISHED_ADD_ROLE_LOG_MSG = "Finished adding the {0}."
ADD_ROLE_INPUT_MSG = "Add the {0}?"
SKIP_ADD_ROLE_MSG = "Skipping addition of the {0}. This may prevent creation of the data history connection."
# Enums
| 31.267442
| 121
| 0.670881
|
ef05389e99b6d9f3d5e451c4f3f4a586cd843bd5
| 7,580
|
py
|
Python
|
lib/FeatureSetUtils/Utils/AveExpressionMatrixBuilder.py
|
mclark58/FeatureSetUtils
|
2b84bc40d6a8f8aec878aa965ca567537c67267e
|
[
"MIT"
] | 1
|
2020-01-13T19:38:50.000Z
|
2020-01-13T19:38:50.000Z
|
lib/FeatureSetUtils/Utils/AveExpressionMatrixBuilder.py
|
mclark58/FeatureSetUtils
|
2b84bc40d6a8f8aec878aa965ca567537c67267e
|
[
"MIT"
] | 6
|
2017-09-19T17:46:03.000Z
|
2020-06-09T04:28:36.000Z
|
lib/FeatureSetUtils/Utils/AveExpressionMatrixBuilder.py
|
mclark58/FeatureSetUtils
|
2b84bc40d6a8f8aec878aa965ca567537c67267e
|
[
"MIT"
] | 9
|
2017-06-30T16:01:48.000Z
|
2020-08-13T20:19:42.000Z
|
import json
import time
import uuid
from installed_clients.DataFileUtilClient import DataFileUtil
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.WorkspaceClient import Workspace as Workspace
def log(message, prefix_newline=False):
"""Logging function, provides a hook to suppress or redirect log messages."""
print(('\n' if prefix_newline else '') + '{0:.2f}'.format(time.time()) + ': ' + str(message))
| 41.648352
| 98
| 0.61504
|
ef055217f03abbaf7fba6a972f73a617fc132c0f
| 838
|
py
|
Python
|
src/python/modules/TensorflowCommon/utils.py
|
dsyme/ADBench
|
87af0219a568807f8432754688ceb636efac12c6
|
[
"MIT"
] | 58
|
2019-12-30T16:22:01.000Z
|
2022-01-23T12:26:51.000Z
|
src/python/modules/TensorflowCommon/utils.py
|
dsyme/ADBench
|
87af0219a568807f8432754688ceb636efac12c6
|
[
"MIT"
] | 112
|
2019-05-25T07:26:58.000Z
|
2019-12-28T13:55:33.000Z
|
src/python/modules/TensorflowCommon/utils.py
|
dsyme/ADBench
|
87af0219a568807f8432754688ceb636efac12c6
|
[
"MIT"
] | 22
|
2020-03-12T16:37:55.000Z
|
2022-02-23T10:14:37.000Z
|
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import tensorflow as tf
def to_tf_tensor(ndarray, dtype = tf.float64):
'''Converts the given multidimensional array to a tensorflow tensor.
Args:
ndarray (ndarray-like): parameter for conversion.
dtype (type, optional): defines a type of tensor elements. Defaults to
tf.float64.
Returns:
tensorflow tensor
'''
return tf.convert_to_tensor(ndarray, dtype = dtype)
def shape(tf_tensor):
'''Returns shape of a tensorflow tensor like a list if integers.'''
return tf_tensor.get_shape().as_list()
def flatten(tf_tensor, column_major = False):
'''Returns the flaten tensor.'''
if column_major:
tf_tensor = tf.transpose(tf_tensor)
return tf.reshape(tf_tensor, [-1])
| 22.648649
| 78
| 0.674224
|
ef066c9d7e1e24986e561e37f408aef403cdc52a
| 127
|
py
|
Python
|
learning_sets.py
|
guppikan/PythonLearning
|
b1674b7187c783b682da26c2190e2b47938faa16
|
[
"MIT"
] | null | null | null |
learning_sets.py
|
guppikan/PythonLearning
|
b1674b7187c783b682da26c2190e2b47938faa16
|
[
"MIT"
] | null | null | null |
learning_sets.py
|
guppikan/PythonLearning
|
b1674b7187c783b682da26c2190e2b47938faa16
|
[
"MIT"
] | null | null | null |
# this file describe sets data structures on python
thisSet={"Car","Bike","Truk"}
# Printing sets on terminal
print(thisSet)
| 21.166667
| 52
| 0.740157
|
ef071178a07b347765b3a959b7f835718f3934a3
| 588
|
py
|
Python
|
s3bro/pool_map.py
|
rsavordelli/s3bro
|
e5b1d41052fd2491c08589b8a2bffeb6aae7cf33
|
[
"MIT"
] | 22
|
2018-03-13T18:46:33.000Z
|
2021-11-03T09:41:39.000Z
|
s3bro/pool_map.py
|
rsavordelli/s3bro
|
e5b1d41052fd2491c08589b8a2bffeb6aae7cf33
|
[
"MIT"
] | 5
|
2018-06-26T21:39:06.000Z
|
2020-08-03T12:53:10.000Z
|
s3bro/pool_map.py
|
rsavordelli/s3bro
|
e5b1d41052fd2491c08589b8a2bffeb6aae7cf33
|
[
"MIT"
] | 2
|
2019-09-04T06:40:09.000Z
|
2020-07-06T01:56:44.000Z
|
from multiprocessing import Pool
import logging
| 32.666667
| 114
| 0.690476
|
ef07256f31589e2d434bffa64e958f93097dc4b3
| 11,290
|
py
|
Python
|
htmlmth/utils.py
|
ZwCreatePhoton/htmlmth
|
74d23ca2fa53e11b2587251d2f71c8f275548182
|
[
"MIT"
] | null | null | null |
htmlmth/utils.py
|
ZwCreatePhoton/htmlmth
|
74d23ca2fa53e11b2587251d2f71c8f275548182
|
[
"MIT"
] | null | null | null |
htmlmth/utils.py
|
ZwCreatePhoton/htmlmth
|
74d23ca2fa53e11b2587251d2f71c8f275548182
|
[
"MIT"
] | null | null | null |
import os
import yaml
from HTMLScriptExtractor import HTMLScriptExtractor
MIME_TYPE_MAP = {
'.htm': 'text/html',
'.html': 'text/html',
'.js': 'text/javascript',
'.vbs': 'text/vbscript',
'.txt': 'text/plain',
'.jpg': 'image/jpeg',
'.jpeg': 'image/jpeg'
}
# input:
# a function "mime_type_function_dict" a dictionary (mime type -> f) where "f" is a function that accepts the tuple: (string, MetaData) and returns the tuple: (string, MetaData)
# output:
# a function "g" that accepts a single argument of type list of tuple: (string, MetaData)
# # in this function, for each tuple in the list, the function mime_type_function_dict[tuple[1].mime_type] will be called with tuple as the argument
# for use with TransformFunctionArgument.content
# function(string) -> function(TransformFunctionArgument)
# for use with TransformFunctionArgument.metadata.http.normalized_headers
# function(list of headers) -> function(TransformFunctionArgument)
# for use with TransformFunctionArgument.metadata.http.payload
# function(bytes) -> function(TransformFunctionArgument)
def IsYaml(filepath):
return os.path.splitext(filepath)[-1].lower() == ".yaml"
# returns list of baseline
# baseline := dictionary of "host", "path", "filepath", "content"
# returns list of testcase
# testcase := dictionary of "host", "path", "casename"
| 35.84127
| 184
| 0.615766
|
ef094d452aa651937866c8d859cce7f5a8e866fa
| 1,265
|
py
|
Python
|
examples/235. Lowest Common Ancestor of a Binary Search Tree.py
|
yehzhang/RapidTest
|
2302fc10ddafba1d16ef1d7448d46c66f5a05da2
|
[
"MIT"
] | null | null | null |
examples/235. Lowest Common Ancestor of a Binary Search Tree.py
|
yehzhang/RapidTest
|
2302fc10ddafba1d16ef1d7448d46c66f5a05da2
|
[
"MIT"
] | null | null | null |
examples/235. Lowest Common Ancestor of a Binary Search Tree.py
|
yehzhang/RapidTest
|
2302fc10ddafba1d16ef1d7448d46c66f5a05da2
|
[
"MIT"
] | null | null | null |
from rapidtest import Test, Case, TreeNode
from solutions.lowest_common_ancestor_of_a_binary_search_tree import Solution
with Test(Solution, post_proc=TreeNode.get_val) as test:
root = TreeNode.from_iterable([6, 2, 8, 0, 4, 7, 9, None, None, 3, 5])
Case(root, TreeNode(2), TreeNode(4), result=TreeNode(2))
Case(root, TreeNode(4), TreeNode(2), result=TreeNode(2))
Case(root, TreeNode(2), TreeNode(8), result=TreeNode(6))
Case(root, TreeNode(8), TreeNode(2), result=TreeNode(6))
Case(root, TreeNode(3), TreeNode(7), result=TreeNode(6))
Case(root, TreeNode(0), TreeNode(4), result=TreeNode(2))
Case(root, TreeNode(0), TreeNode(5), result=TreeNode(2))
Case(root, TreeNode(2), TreeNode(6), result=TreeNode(6))
Case(root, TreeNode(6), TreeNode(2), result=TreeNode(6))
Case(root, TreeNode(6), TreeNode(2), result=TreeNode(6))
Case(root, TreeNode(0), TreeNode(0), result=TreeNode(0))
| 43.62069
| 91
| 0.660079
|
ef09cb460708054f80c71807033f5ec91f1f2963
| 12,087
|
py
|
Python
|
proto/npu_utilization_pb2.py
|
akaczm/jun-telemetry
|
84c7208669f4f1749f8db45f4815dafefdbec083
|
[
"MIT"
] | 4
|
2019-12-02T12:20:47.000Z
|
2021-08-25T12:52:26.000Z
|
proto/npu_utilization_pb2.py
|
akaczm/jun-telemetry
|
84c7208669f4f1749f8db45f4815dafefdbec083
|
[
"MIT"
] | null | null | null |
proto/npu_utilization_pb2.py
|
akaczm/jun-telemetry
|
84c7208669f4f1749f8db45f4815dafefdbec083
|
[
"MIT"
] | 1
|
2021-08-25T12:47:44.000Z
|
2021-08-25T12:47:44.000Z
|
# -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: npu_utilization.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
import telemetry_top_pb2 as telemetry__top__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='npu_utilization.proto',
package='',
syntax='proto2',
serialized_options=None,
serialized_pb=_b('\n\x15npu_utilization.proto\x1a\x13telemetry_top.proto\"C\n\x1bNetworkProcessorUtilization\x12$\n\x0enpu_util_stats\x18\x01 \x03(\x0b\x32\x0c.Utilization\"q\n\x0bUtilization\x12\x12\n\nidentifier\x18\x01 \x02(\t\x12\x13\n\x0butilization\x18\x02 \x01(\r\x12\x1c\n\x07packets\x18\x03 \x03(\x0b\x32\x0b.PacketLoad\x12\x1b\n\x06memory\x18\x04 \x03(\x0b\x32\x0b.MemoryLoad\"\xba\x01\n\nMemoryLoad\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x14\n\x0c\x61verage_util\x18\x02 \x01(\r\x12\x14\n\x0chighest_util\x18\x03 \x01(\r\x12\x13\n\x0blowest_util\x18\x04 \x01(\r\x12\x1e\n\x16\x61verage_cache_hit_rate\x18\x05 \x01(\r\x12\x1e\n\x16highest_cache_hit_rate\x18\x06 \x01(\r\x12\x1d\n\x15lowest_cache_hit_rate\x18\x07 \x01(\r\"\xa2\x01\n\nPacketLoad\x12\x12\n\nidentifier\x18\x01 \x02(\t\x12\x0c\n\x04rate\x18\x02 \x01(\x04\x12\'\n\x1f\x61verage_instructions_per_packet\x18\x03 \x01(\r\x12&\n\x1e\x61verage_wait_cycles_per_packet\x18\x04 \x01(\r\x12!\n\x19\x61verage_cycles_per_packet\x18\x05 \x01(\r:W\n\x18jnpr_npu_utilization_ext\x12\x17.JuniperNetworksSensors\x18\x0c \x01(\x0b\x32\x1c.NetworkProcessorUtilization')
,
dependencies=[telemetry__top__pb2.DESCRIPTOR,])
JNPR_NPU_UTILIZATION_EXT_FIELD_NUMBER = 12
jnpr_npu_utilization_ext = _descriptor.FieldDescriptor(
name='jnpr_npu_utilization_ext', full_name='jnpr_npu_utilization_ext', index=0,
number=12, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=True, extension_scope=None,
serialized_options=None, file=DESCRIPTOR)
_NETWORKPROCESSORUTILIZATION = _descriptor.Descriptor(
name='NetworkProcessorUtilization',
full_name='NetworkProcessorUtilization',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='npu_util_stats', full_name='NetworkProcessorUtilization.npu_util_stats', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=46,
serialized_end=113,
)
_UTILIZATION = _descriptor.Descriptor(
name='Utilization',
full_name='Utilization',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='identifier', full_name='Utilization.identifier', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='utilization', full_name='Utilization.utilization', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='packets', full_name='Utilization.packets', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='memory', full_name='Utilization.memory', index=3,
number=4, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=115,
serialized_end=228,
)
_MEMORYLOAD = _descriptor.Descriptor(
name='MemoryLoad',
full_name='MemoryLoad',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='MemoryLoad.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_util', full_name='MemoryLoad.average_util', index=1,
number=2, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='highest_util', full_name='MemoryLoad.highest_util', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lowest_util', full_name='MemoryLoad.lowest_util', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_cache_hit_rate', full_name='MemoryLoad.average_cache_hit_rate', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='highest_cache_hit_rate', full_name='MemoryLoad.highest_cache_hit_rate', index=5,
number=6, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='lowest_cache_hit_rate', full_name='MemoryLoad.lowest_cache_hit_rate', index=6,
number=7, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=231,
serialized_end=417,
)
_PACKETLOAD = _descriptor.Descriptor(
name='PacketLoad',
full_name='PacketLoad',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='identifier', full_name='PacketLoad.identifier', index=0,
number=1, type=9, cpp_type=9, label=2,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='rate', full_name='PacketLoad.rate', index=1,
number=2, type=4, cpp_type=4, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_instructions_per_packet', full_name='PacketLoad.average_instructions_per_packet', index=2,
number=3, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_wait_cycles_per_packet', full_name='PacketLoad.average_wait_cycles_per_packet', index=3,
number=4, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='average_cycles_per_packet', full_name='PacketLoad.average_cycles_per_packet', index=4,
number=5, type=13, cpp_type=3, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto2',
extension_ranges=[],
oneofs=[
],
serialized_start=420,
serialized_end=582,
)
_NETWORKPROCESSORUTILIZATION.fields_by_name['npu_util_stats'].message_type = _UTILIZATION
_UTILIZATION.fields_by_name['packets'].message_type = _PACKETLOAD
_UTILIZATION.fields_by_name['memory'].message_type = _MEMORYLOAD
DESCRIPTOR.message_types_by_name['NetworkProcessorUtilization'] = _NETWORKPROCESSORUTILIZATION
DESCRIPTOR.message_types_by_name['Utilization'] = _UTILIZATION
DESCRIPTOR.message_types_by_name['MemoryLoad'] = _MEMORYLOAD
DESCRIPTOR.message_types_by_name['PacketLoad'] = _PACKETLOAD
DESCRIPTOR.extensions_by_name['jnpr_npu_utilization_ext'] = jnpr_npu_utilization_ext
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
NetworkProcessorUtilization = _reflection.GeneratedProtocolMessageType('NetworkProcessorUtilization', (_message.Message,), {
'DESCRIPTOR' : _NETWORKPROCESSORUTILIZATION,
'__module__' : 'npu_utilization_pb2'
# @@protoc_insertion_point(class_scope:NetworkProcessorUtilization)
})
_sym_db.RegisterMessage(NetworkProcessorUtilization)
Utilization = _reflection.GeneratedProtocolMessageType('Utilization', (_message.Message,), {
'DESCRIPTOR' : _UTILIZATION,
'__module__' : 'npu_utilization_pb2'
# @@protoc_insertion_point(class_scope:Utilization)
})
_sym_db.RegisterMessage(Utilization)
MemoryLoad = _reflection.GeneratedProtocolMessageType('MemoryLoad', (_message.Message,), {
'DESCRIPTOR' : _MEMORYLOAD,
'__module__' : 'npu_utilization_pb2'
# @@protoc_insertion_point(class_scope:MemoryLoad)
})
_sym_db.RegisterMessage(MemoryLoad)
PacketLoad = _reflection.GeneratedProtocolMessageType('PacketLoad', (_message.Message,), {
'DESCRIPTOR' : _PACKETLOAD,
'__module__' : 'npu_utilization_pb2'
# @@protoc_insertion_point(class_scope:PacketLoad)
})
_sym_db.RegisterMessage(PacketLoad)
jnpr_npu_utilization_ext.message_type = _NETWORKPROCESSORUTILIZATION
telemetry__top__pb2.JuniperNetworksSensors.RegisterExtension(jnpr_npu_utilization_ext)
# @@protoc_insertion_point(module_scope)
| 40.972881
| 1,125
| 0.755522
|
ef0a465c711275ee344dd982144bb689f29fa28c
| 4,409
|
py
|
Python
|
tests/test_models.py
|
rramaa/pynnotate
|
7cf983dd16726032d3d53340415a823c9e8bd76c
|
[
"MIT"
] | 1
|
2019-07-24T12:56:16.000Z
|
2019-07-24T12:56:16.000Z
|
tests/test_models.py
|
rramaa/pynnotate
|
7cf983dd16726032d3d53340415a823c9e8bd76c
|
[
"MIT"
] | 14
|
2019-03-12T08:49:34.000Z
|
2019-04-04T09:51:16.000Z
|
tests/test_models.py
|
rramaa/pynnotate
|
7cf983dd16726032d3d53340415a823c9e8bd76c
|
[
"MIT"
] | 2
|
2019-10-13T14:45:11.000Z
|
2019-12-24T22:22:46.000Z
|
from annotatelib.models import (
models, class_from_filename,
table_name_from_filename, _get_column_description_from_object,
_get_indices_description_from_oject
)
import sqlite3
from orator import DatabaseManager
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '../')))
| 41.205607
| 148
| 0.577682
|
ef0b1e90a414cd10b99ab947636c1ca2151cab55
| 430
|
py
|
Python
|
flatlist/__init__.py
|
dwabece/flatlist
|
61b6f7f70bf9db2bf14f8bfdebce2c4f9a95811f
|
[
"WTFPL"
] | null | null | null |
flatlist/__init__.py
|
dwabece/flatlist
|
61b6f7f70bf9db2bf14f8bfdebce2c4f9a95811f
|
[
"WTFPL"
] | null | null | null |
flatlist/__init__.py
|
dwabece/flatlist
|
61b6f7f70bf9db2bf14f8bfdebce2c4f9a95811f
|
[
"WTFPL"
] | null | null | null |
__version__ = '0.0.1'
def flatten_list(input_list):
"""
Flattens list with many nested lists.
>>> flatten_list([1, [2, [3], [4]]])
[1, 2, 3, 4]
"""
result = []
for item in input_list:
if isinstance(item, list):
result.extend(flatten_list(item))
# yield from flatten_list(item)
else:
result.append(item)
# yield item
return result
| 20.47619
| 45
| 0.532558
|
ef0dbb4129bccb5de4e10f51b60990b9ac3393bb
| 607
|
py
|
Python
|
slackcast/token.py
|
rbdixon/slackcast
|
ac4ac4591bbcf62d64ec05b5479e6e8315f92a69
|
[
"MIT"
] | null | null | null |
slackcast/token.py
|
rbdixon/slackcast
|
ac4ac4591bbcf62d64ec05b5479e6e8315f92a69
|
[
"MIT"
] | 1
|
2021-11-15T17:47:27.000Z
|
2021-11-15T17:47:27.000Z
|
slackcast/token.py
|
rbdixon/slackcast
|
ac4ac4591bbcf62d64ec05b5479e6e8315f92a69
|
[
"MIT"
] | null | null | null |
import os
import keyring
from prompt_toolkit import prompt
KEY = ('slackcast', 'token')
SLACKCAST_INSTALL_URL = os.environ.get(
'SLACKCAST_INSTALL_URL', 'https://slackcast.devtestit.com/install'
)
| 22.481481
| 88
| 0.667216
|
ef0f41777334766f27b085f4b278863d8beee416
| 790
|
py
|
Python
|
baidupan.py
|
iSteveyang/GraduateDesign-pyqt
|
ce4e6c8b0de2398081a83c63fb98cc03126bc6d0
|
[
"MIT"
] | null | null | null |
baidupan.py
|
iSteveyang/GraduateDesign-pyqt
|
ce4e6c8b0de2398081a83c63fb98cc03126bc6d0
|
[
"MIT"
] | null | null | null |
baidupan.py
|
iSteveyang/GraduateDesign-pyqt
|
ce4e6c8b0de2398081a83c63fb98cc03126bc6d0
|
[
"MIT"
] | null | null | null |
import progressbar
from baidupcsapi import PCS
pcs = PCS('username','password')
test_file = open('bigfile.pdf','rb').read()
ret = pcs.upload('/',test_file,'bigfile.pdf',callback=ProgressBar())
| 34.347826
| 115
| 0.611392
|
ef0f95f25a14e3a1c31217d9a079a1f1c52c743d
| 541
|
py
|
Python
|
pps/message.py
|
SeungUkLee/preview-pipfile-script
|
d28d963f1feee9ed1621a04b25c02d34a0919829
|
[
"MIT"
] | null | null | null |
pps/message.py
|
SeungUkLee/preview-pipfile-script
|
d28d963f1feee9ed1621a04b25c02d34a0919829
|
[
"MIT"
] | null | null | null |
pps/message.py
|
SeungUkLee/preview-pipfile-script
|
d28d963f1feee9ed1621a04b25c02d34a0919829
|
[
"MIT"
] | null | null | null |
"""
messages
"""
from .color import ENDC, FAIL, OKBLUE, YELLOW
EXE_SCRIPT_ERR_MSG = '{0}[!]{1} An error occurred while executing script in Pipfile'.format(
FAIL, ENDC
)
KEYWORD_NOT_FOUND_MSG = "{0}[!]{1} {2}Pipfile{1} in {3}[scripts]{1} keyword not found!".format(
FAIL, ENDC, OKBLUE, YELLOW
)
FILE_NOT_FOUND_MSG = "{0}[!]{1} {2}Pipfile{1} not found!".format(
FAIL, ENDC, OKBLUE
)
KEYBOARD_INTERRUPT_MSG = "{0}[!]{1} KeyboardInterrupt".format(FAIL, ENDC)
INQUIRER_MSG = "{0}Select Pipfile script to run{1}".format(YELLOW, ENDC)
| 31.823529
| 95
| 0.685767
|
ef1093497c62d32b5e459bb8bfbe26c27ca18a49
| 2,101
|
py
|
Python
|
lambdafunctions/LogEvent/LogEvent.py
|
rpetrina/slack-sentiment-bot
|
47969d8a8c476aa60939fab88f0af793a24a4acc
|
[
"MIT"
] | null | null | null |
lambdafunctions/LogEvent/LogEvent.py
|
rpetrina/slack-sentiment-bot
|
47969d8a8c476aa60939fab88f0af793a24a4acc
|
[
"MIT"
] | null | null | null |
lambdafunctions/LogEvent/LogEvent.py
|
rpetrina/slack-sentiment-bot
|
47969d8a8c476aa60939fab88f0af793a24a4acc
|
[
"MIT"
] | null | null | null |
import sys
import logging
import pymysql
import json
import os
#rds settings - Lambda role must have RDS access
rds_host = os.environ['RDS_HOST'] # Set in Lambda Dashboard
name = os.environ['DB_USERNAME']
password = os.environ['DB_PW']
db_name = os.environ['DB_NAME']
db_table = os.environ['DB_TABLE']
logger = logging.getLogger()
logger.setLevel(logging.INFO)
def handler(event, context):
"""
This function handles SNS posts from Amazon SNS. Currently it:
1) Inserts the request into an RDS MySQL DB
Current Assumptions:
1) Messages don't contain special characters - i.e: '
2) Requests are correctly formated (contain body and event, and event contains the expected values)
"""
print("In logevent: ", event)
try:
slackevent = json.loads(event["Records"][0]["Sns"]["Message"])
writemessagetodb(slackevent)
response = response = {
"statusCode": 200,
"body": event
}
except Exception as e:
''' Just a stub. Please make this better in real use :) '''
logger.error(f"ERROR: {e}")
response = {
"statusCode": 400,
"body": event
}
return response
| 29.180556
| 107
| 0.619229
|
ef124d3ce81475f29c8f62fc6238715aeebcf110
| 764
|
py
|
Python
|
ACCNTS/migrations/0012_auto_20190329_0554.py
|
domambia/csdigital-gs1kenya-internal-erp
|
6736d0e9a3a51653689f8ae921cf811f378d9d8e
|
[
"MIT"
] | 12
|
2019-08-02T07:58:16.000Z
|
2022-01-31T23:45:08.000Z
|
ACCNTS/migrations/0012_auto_20190329_0554.py
|
domambia/csdigital-gs1kenya-internal-erp
|
6736d0e9a3a51653689f8ae921cf811f378d9d8e
|
[
"MIT"
] | 8
|
2019-08-02T08:06:18.000Z
|
2022-03-11T23:45:17.000Z
|
ACCNTS/migrations/0012_auto_20190329_0554.py
|
domambia/csdigital-gs1kenya-internal-erp
|
6736d0e9a3a51653689f8ae921cf811f378d9d8e
|
[
"MIT"
] | 11
|
2019-07-31T16:23:36.000Z
|
2022-01-29T08:30:07.000Z
|
# Generated by Django 2.1.5 on 2019-03-29 05:54
import datetime
from django.db import migrations, models
| 25.466667
| 66
| 0.590314
|
ef1252f9351ea7758743cb386119d19cc1470cf1
| 171
|
py
|
Python
|
doacao/forms.py
|
CyberDagger/quatropatas
|
7fd9b51dd65d6242112ab40c834a66c4cc8c8c73
|
[
"MIT"
] | null | null | null |
doacao/forms.py
|
CyberDagger/quatropatas
|
7fd9b51dd65d6242112ab40c834a66c4cc8c8c73
|
[
"MIT"
] | null | null | null |
doacao/forms.py
|
CyberDagger/quatropatas
|
7fd9b51dd65d6242112ab40c834a66c4cc8c8c73
|
[
"MIT"
] | 1
|
2019-04-16T19:19:10.000Z
|
2019-04-16T19:19:10.000Z
|
from django import forms
from .models import Doacao
| 21.375
| 37
| 0.631579
|
ef12df78f36f2adabef28423fa54313ee1270534
| 1,707
|
py
|
Python
|
data/build_wd_elastic_index.py
|
flaneuse/reframedb-backend
|
863423fb9fad547aa8c2f826dc2d39939fe1b991
|
[
"MIT"
] | null | null | null |
data/build_wd_elastic_index.py
|
flaneuse/reframedb-backend
|
863423fb9fad547aa8c2f826dc2d39939fe1b991
|
[
"MIT"
] | null | null | null |
data/build_wd_elastic_index.py
|
flaneuse/reframedb-backend
|
863423fb9fad547aa8c2f826dc2d39939fe1b991
|
[
"MIT"
] | null | null | null |
import requests
from elasticsearch import Elasticsearch, client
from elasticsearch.exceptions import RequestError
es = Elasticsearch()
# retrieve all QIDs from the populated reframe ES index
body = {
"_source": {
"includes": ["qid"],
},
"query": {
"query_string": {
"query": "Q*",
"fields": ['qid']
}
},
"from": 0, "size": 10000,
}
es.indices.refresh(index="reframe")
r = es.search(index="reframe", body=body)
bd = {
'mapping': {
'total_fields': {
'limit': 30000
}
}
}
c = client.IndicesClient(es)
# check if index exists, otherwise, create
if c.exists(index='wikidata'):
c.put_settings(index='wikidata', body=bd)
else:
c.create(index='wikidata', body=bd)
session = requests.Session()
for count, hit in enumerate(r['hits']['hits']):
qid = hit['_source']['qid']
header = {
'Accept': 'application/json'
}
r = session.get('http://www.wikidata.org/entity/{}'.format(qid), headers=header).json()
# print(r)
obj = r['entities'][qid]
del obj['descriptions']
for claim, value in obj['claims'].items():
# print(claim, value)
for x in value:
if 'references' in x:
del x['references']
if es.exists(index='wikidata', doc_type='compound', id=qid):
# print('this exists!!')
es.update(index='wikidata', id=qid, doc_type='compound', body={'doc': obj})
# pass
else:
try:
res = es.index(index="wikidata", doc_type='compound', id=qid, body=obj)
except RequestError as e:
print(e)
if count % 100 == 0:
print('imported ', count)
| 21.884615
| 91
| 0.565319
|
ef135d999c596568c19df6fc41a299bbb48ab07f
| 3,049
|
py
|
Python
|
dj_twitter_clone_app/blog/views.py
|
ivanprytula/dj_demo_app
|
49ca506b22d3d99608e192b28787e185b39d3c24
|
[
"MIT"
] | null | null | null |
dj_twitter_clone_app/blog/views.py
|
ivanprytula/dj_demo_app
|
49ca506b22d3d99608e192b28787e185b39d3c24
|
[
"MIT"
] | null | null | null |
dj_twitter_clone_app/blog/views.py
|
ivanprytula/dj_demo_app
|
49ca506b22d3d99608e192b28787e185b39d3c24
|
[
"MIT"
] | null | null | null |
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.urls import reverse_lazy
from django.views.generic import (ListView, CreateView, TemplateView, )
from django.views.generic.detail import DetailView
from django.views.generic.edit import (UpdateView, DeleteView, )
from blog.models import Post
# def post_detail(request, pk):
# post = Post.objects.get(pk=pk)
#
# # We create empty form when user visits a page
# form = CommentForm()
# if request.method == 'POST':
# form = CommentForm(request.POST)
# if form.is_valid():
# comment = Comment(
# author=form.cleaned_data['author'],
# content=form.cleaned_data['content'],
# post=post
# )
# comment.save()
#
# comments = Comment.objects.filter(post=post)
# context = {
# 'post': post,
# 'comments': comments,
# 'form': form,
# }
# return render(request, 'blog/post_detail.html', context)
| 29.601942
| 73
| 0.648081
|
ef1431dac95c7e69b7262a40569f5236bf33fd8b
| 4,217
|
py
|
Python
|
code/service/flaskapp/flaskapp.py
|
Wyss/evolvulator
|
c026b9e7425cabba8b7a5b49024173c6f9667337
|
[
"MIT"
] | 1
|
2016-01-05T20:10:10.000Z
|
2016-01-05T20:10:10.000Z
|
code/service/flaskapp/flaskapp.py
|
Wyss/evolvulator
|
c026b9e7425cabba8b7a5b49024173c6f9667337
|
[
"MIT"
] | null | null | null |
code/service/flaskapp/flaskapp.py
|
Wyss/evolvulator
|
c026b9e7425cabba8b7a5b49024173c6f9667337
|
[
"MIT"
] | null | null | null |
"""
Copyright (c) 2012 Wyss Institute at Harvard University
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
http://www.opensource.org/licenses/mit-license.php
"""
"""
flaskapp.py
"""
from flask import Flask, g, request, render_template, jsonify, make_response, send_from_directory
from werkzeug.exceptions import HTTPException, NotFound
from os.path import dirname, basename, split, abspath
from os.path import join as op_join
import random
import sys
from experimentcore.exp_dbifc import setupExperiment
exp_dict = setupExperiment('evolvulator')
app = Flask(__name__)
app.config.from_object(__name__)
app.wsport = 9000 # default
#end def
# end def
# end def
# end def
# end def
# @app.before_request
# def before_request():
# """Make sure we are connected to the database each request."""
# g.db = edb.connectToDB(thedatabase)
#
#
# @app.teardown_request
# def teardown_request(exception):
# """Closes the database again at the end of the request."""
# if hasattr(g, 'db'):
# g.db.close()
# # end def
| 35.436975
| 109
| 0.708798
|
ef1825ce5af0c1bb4c24887ac8d1e612fd32ac97
| 5,383
|
py
|
Python
|
ena-dts/framework/rst.py
|
amzn/amzn-ec2-ena-utilities
|
99502ff5bb025dc71727d4991ea5e29a4e9388c6
|
[
"MIT-0"
] | 7
|
2021-04-29T05:23:56.000Z
|
2022-03-23T02:26:55.000Z
|
ena-dts/framework/rst.py
|
amzn/amzn-ec2-ena-utilities
|
99502ff5bb025dc71727d4991ea5e29a4e9388c6
|
[
"MIT-0"
] | null | null | null |
ena-dts/framework/rst.py
|
amzn/amzn-ec2-ena-utilities
|
99502ff5bb025dc71727d4991ea5e29a4e9388c6
|
[
"MIT-0"
] | 4
|
2021-06-10T19:02:57.000Z
|
2021-12-06T01:31:06.000Z
|
# BSD LICENSE
#
# Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import re
from exception import VerifyFailure
"""
Generate Rst Test Result Report
Example:
import rst
rst.write_title("Test Case: " + test_case.__name__)
out = table.draw()
rst.write_text('\n' + out + '\n\n')
rst.write_result("PASS")
Result:
<copyright>
<Prerequisites>
Test Case: CASE
---------------
Result: PASS
"""
path2Plan = 'test_plans'
path2Result = 'output'
| 33.228395
| 75
| 0.583132
|
ef191d6989e1e630c43331526ecc6be3b87686af
| 331
|
py
|
Python
|
tests/test_models.py
|
kajigga/dj-pylti
|
2388719ee799b3033a9ab7ccf28667e69bcd8cd6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_models.py
|
kajigga/dj-pylti
|
2388719ee799b3033a9ab7ccf28667e69bcd8cd6
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_models.py
|
kajigga/dj-pylti
|
2388719ee799b3033a9ab7ccf28667e69bcd8cd6
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_dj-pylti
------------
Tests for `dj-pylti` models module.
"""
from django.test import TestCase
from dj_pylti import models
| 12.730769
| 35
| 0.607251
|
ef19d273749fc5c7cda4c1d9c7f1b0e4fb378f5e
| 30,467
|
py
|
Python
|
mutation.py
|
nklapste/mutation
|
28eb3eaa3173f0a9cfcd22c2cabe6d0c87f50dfa
|
[
"MIT"
] | null | null | null |
mutation.py
|
nklapste/mutation
|
28eb3eaa3173f0a9cfcd22c2cabe6d0c87f50dfa
|
[
"MIT"
] | null | null | null |
mutation.py
|
nklapste/mutation
|
28eb3eaa3173f0a9cfcd22c2cabe6d0c87f50dfa
|
[
"MIT"
] | null | null | null |
"""Mutation.
Usage:
mutation play [--verbose] [--exclude=<globs>] [--only-deadcode-detection] [--include=<globs>] [--sampling=<s>] [--randomly-seed=<n>] [--max-workers=<n>] [<file-or-directory> ...] [-- TEST-COMMAND ...]
mutation replay [--verbose] [--max-workers=<n>]
mutation list
mutation show MUTATION
mutation apply MUTATION
mutation (-h | --help)
mutation --version
Options:
--verbose Show more information.
-h --help Show this screen.
--version Show version.
"""
import asyncio
import fnmatch
import functools
import itertools
import os
import random
import re
import shlex
import sys
import time
from ast import Constant
from concurrent import futures
from contextlib import contextmanager
from copy import deepcopy
from datetime import timedelta
from difflib import unified_diff
from uuid import UUID
import lexode
import parso
import pygments
import pygments.formatters
import pygments.lexers
import zstandard as zstd
from aiostream import pipe, stream
from astunparse import unparse
from coverage import Coverage
from docopt import docopt
from humanize import precisedelta
from loguru import logger as log
from lsm import LSM
from pathlib3x import Path
from termcolor import colored
from tqdm import tqdm
from ulid import ULID
__version__ = (0, 4, 4)
MINUTE = 60 # seconds
HOUR = 60 * MINUTE
DAY = 24 * HOUR
MONTH = 31 * DAY
PRONOTION = "https://youtu.be/ihZEaj9ml4w?list=PLOSNaPJYYhrtliZqyEWDWL0oqeH0hOHnj"
log.remove()
if os.environ.get("DEBUG", False):
log.add(
sys.stdout,
format="<level>{level}</level> {message}",
level="TRACE",
colorize=True,
enqueue=True,
)
else:
log.add(
sys.stdout,
format="<level>{level}</level> {message}",
level="INFO",
colorize=True,
enqueue=True,
)
# The function patch was taken somewhere over the rainbow...
_hdr_pat = re.compile(r"^@@ -(\d+),?(\d+)? \+(\d+),?(\d+)? @@$")
def patch(diff, source):
"""Apply unified diff patch to string s to recover newer string. If
revert is True, treat s as the newer string, recover older string.
"""
s = source.splitlines(True)
p = diff.splitlines(True)
t = ""
i = sl = 0
(midx, sign) = (1, "+")
while i < len(p) and p[i].startswith(("---", "+++")):
i += 1 # skip header lines
while i < len(p):
m = _hdr_pat.match(p[i])
if not m:
raise Exception("Cannot process diff")
i += 1
l = int(m.group(midx)) - 1 + (m.group(midx + 1) == "0")
t += "".join(s[sl:l])
sl = l
while i < len(p) and p[i][0] != "@":
if i + 1 < len(p) and p[i + 1][0] == "\\":
line = p[i][:-1]
i += 2
else:
line = p[i]
i += 1
if len(line) > 0:
if line[0] == sign or line[0] == " ":
t += line[1:]
sl += line[0] != sign
t += "\n" + "".join(s[sl:])
return t
def chunks(iterable, n):
"""Yield successive n-sized chunks from iterable."""
it = iter(iterable)
while chunk := tuple(itertools.islice(it, n)):
yield chunk
def diff(source, target, filename=""):
lines = unified_diff(
source.split("\n"), target.split("\n"), filename, filename, lineterm=""
)
out = "\n".join(lines)
return out
def mutate(node, index, mutations):
for mutation in mutations:
if not mutation.predicate(node):
continue
yield from mutation.mutate(node, index)
def mutation_create(item):
path, source, coverage, mutation_predicate = item
if not coverage:
msg = "Ignoring file {} because there is no associated coverage."
log.trace(msg, path)
return []
log.trace("Mutating file: {}...", path)
mutations = [m for m in Mutation.ALL if mutation_predicate(m)]
deltas = deltas_compute(source, path, coverage, mutations)
# return the compressed deltas to save some time in the
# mainthread.
out = [(path, zstd.compress(x.encode("utf8"))) for x in deltas]
log.trace("There is {} mutations for the file `{}`", len(out), path)
return out
def install_module_loader(uid):
db = LSM(".mutation.okvslite")
mutation_show(uid.hex)
path, diff = lexode.unpack(db[lexode.pack([1, uid])])
diff = zstd.decompress(diff).decode("utf8")
with open(path) as f:
source = f.read()
patched = patch(diff, source)
import imp
components = path[:-3].split("/")
while components:
for pythonpath in sys.path:
filepath = os.path.join(pythonpath, "/".join(components))
filepath += ".py"
ok = os.path.exists(filepath)
if ok:
module_path = ".".join(components)
break
else:
components.pop()
continue
break
if module_path is None:
raise Exception("sys.path oops!")
patched_module = imp.new_module(module_path)
try:
exec(patched, patched_module.__dict__)
except Exception:
# TODO: syntaxerror, do not produce those mutations
exec("", patched_module.__dict__)
sys.modules[module_path] = patched_module
def pytest_configure(config):
mutation = config.getoption("mutation", default=None)
if mutation is not None:
uid = UUID(hex=mutation)
install_module_loader(uid)
PYTEST = "pytest --exitfirst --no-header --tb=no --quiet --assert=plain"
PYTEST = shlex.split(PYTEST)
# TODO: the `command` is a hack, maybe there is a way to avoid the
# following code: `if command is not None.
def check_tests(root, seed, arguments, command=None):
max_workers = arguments["--max-workers"] or (os.cpu_count() - 1) or 1
max_workers = int(max_workers)
log.info("Let's check that the tests are green...")
if arguments["<file-or-directory>"] and arguments["TEST-COMMAND"]:
log.error("<file-or-directory> and TEST-COMMAND are exclusive!")
sys.exit(1)
if command is not None:
command = list(command)
if max_workers > 1:
command.extend(
[
# Use pytest-xdist to make sure it is possible to run the
# tests in parallel
"--numprocesses={}".format(max_workers),
]
)
else:
if arguments["TEST-COMMAND"]:
command = list(arguments["TEST-COMMAND"])
else:
command = list(PYTEST)
command.extend(arguments["<file-or-directory>"])
if max_workers > 1:
command.append(
# Use pytest-xdist to make sure it is possible to run
# the tests in parallel
"--numprocesses={}".format(max_workers)
)
command.extend(
[
# Setup coverage options to only mutate what is tested.
"--cov=.",
"--cov-branch",
"--no-cov-on-fail",
# Pass random seed
"--randomly-seed={}".format(seed),
]
)
with timeit() as alpha:
out = run(command)
if out == 0:
log.info("Tests are green ")
alpha = alpha() * max_workers
else:
msg = "Tests are not green... return code is {}..."
log.warning(msg, out)
log.warning("I tried the following command: `{}`", " ".join(command))
# Same command without parallelization
if arguments["TEST-COMMAND"]:
command = list(arguments["TEST-COMMAND"])
else:
command = list(PYTEST)
command.extend(arguments["<file-or-directory>"])
command += [
# Setup coverage options to only mutate what is tested.
"--cov=.",
"--cov-branch",
"--no-cov-on-fail",
# Pass random seed
"--randomly-seed={}".format(seed),
]
with timeit() as alpha:
out = run(command)
if out != 0:
msg = "Tests are definitly red! Return code is {}!!"
log.error(msg, out)
log.error("I tried the following command: `{}`", " ".join(command))
sys.exit(2)
# Otherwise, it is possible to run the tests but without
# parallelization.
msg = "Setting max_workers=1 because tests do not pass in parallel"
log.warning(msg)
max_workers = 1
alpha = alpha()
msg = "Time required to run the tests once: {}..."
log.info(msg, humanize(alpha))
return alpha, max_workers
def mutation_only_deadcode(x):
return getattr(x, "deadcode_detection", False)
def mutation_all(x):
return True
def mutation_diff_size(db, uid):
_, diff = lexode.unpack(db[lexode.pack([1, uid])])
out = len(zstd.decompress(diff))
return out
def replay_mutation(db, uid, alpha, seed, max_workers, command):
log.info("* Use Ctrl+C to exit.")
command = list(command)
command.append("--randomly-seed={}".format(seed))
max_workers = 1
if max_workers > 1:
command.append("--numprocesses={}".format(max_workers))
timeout = alpha * 2
while True:
ok = mutation_pass((command, uid, timeout))
if not ok:
mutation_show(uid.hex)
msg = "* Type 'skip' to go to next mutation or just enter to retry."
log.info(msg)
skip = input().startswith("s")
if skip:
db[lexode.pack([2, uid])] = b"\x01"
return
# Otherwise loop to re-test...
else:
del db[lexode.pack([2, uid])]
return
if __name__ == "__main__":
main()
| 28.961027
| 202
| 0.573046
|
ef1a0f68bf7e4627785fe119d1363f10a767d348
| 1,058
|
py
|
Python
|
main.py
|
bijilap/ColorRecognition
|
a070645e5bda40c0d06d03db468f31c79b63d0bd
|
[
"Apache-2.0"
] | 2
|
2018-03-29T12:15:04.000Z
|
2019-01-09T02:09:41.000Z
|
main.py
|
bijilap/ColorRecognition
|
a070645e5bda40c0d06d03db468f31c79b63d0bd
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
bijilap/ColorRecognition
|
a070645e5bda40c0d06d03db468f31c79b63d0bd
|
[
"Apache-2.0"
] | null | null | null |
import argparse
from ColorDetector import ColorDetector
if __name__ == "__main__":
main()
| 30.228571
| 111
| 0.660681
|
ef1bad6bf6953bfcc6d21e0a6fe6026bfa17d421
| 286
|
py
|
Python
|
desafio64.py
|
DantonMatheus/desafios-python
|
709a3f1774596fc536dd4b882c78a6b951c92a9c
|
[
"MIT"
] | null | null | null |
desafio64.py
|
DantonMatheus/desafios-python
|
709a3f1774596fc536dd4b882c78a6b951c92a9c
|
[
"MIT"
] | null | null | null |
desafio64.py
|
DantonMatheus/desafios-python
|
709a3f1774596fc536dd4b882c78a6b951c92a9c
|
[
"MIT"
] | null | null | null |
print('===== DESAFIO 64 =====')
num = 0
cont = 0
soma = 0
num = int(input('Digite um nmero [999 para SAIR]: '))
while num != 999:
soma += num
cont += 1
num = int(input('Digite um nmero [999 para SAIR]: '))
print(f'Voc digitou {cont} nmeros! A soma entre eles {soma}')
| 26
| 65
| 0.594406
|