hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a72993531283fe9cd45b23f3481f393933bdc390 | 15,777 | py | Python | main.py | chilipolygon/Amazon-Requests-Module | 20fcfa9b9764e097bc107aa9dc5b0db772ce3ad9 | [
"Apache-2.0"
] | 3 | 2022-01-18T20:54:08.000Z | 2022-02-05T23:27:13.000Z | main.py | chilipolygon/Amazon-Requests-Module | 20fcfa9b9764e097bc107aa9dc5b0db772ce3ad9 | [
"Apache-2.0"
] | null | null | null | main.py | chilipolygon/Amazon-Requests-Module | 20fcfa9b9764e097bc107aa9dc5b0db772ce3ad9 | [
"Apache-2.0"
] | null | null | null | # ---------------------
from bs4 import BeautifulSoup as bs
import requests
import urllib3
import urllib
from urllib.parse import unquote
import re
import os
import sys
import json
import time
from colorama import Fore, init
from pprint import pprint
from datetime import datetime
import uuid
import threading
# ----------------------
from dhooks import Webhook
from dhooks import Webhook, Embed
# ---------------------
init()
init(autoreset=True)
urllib3.disable_warnings()
os.system('cls' if os.name == 'nt' else 'clear')
# ---------------------
# MUST HAVE PRIME
# MUST HAVE ONE CLICK
# MUST SELECT "Keep me signed in"
# MUST USE AGED ACCOUNT
# ====================================
# MUST HAVE THESE FOR BEST SUCCESS
if __name__ == "__main__":
f = open(f'./appdata/config.json')
account = json.load(f)['account']
callback(account)
# asin, promo code, email
# if you don't have a promocode, leave it as ''
| 41.518421 | 272 | 0.548647 |
a72b62dfb661d28b942c1bbe2cd44f6d11909efd | 10,504 | py | Python | tests/test_word_distance.py | hasibaasma/alfpy | c8c0c1300108015746320cede2207ac57e630d3e | [
"MIT"
] | 19 | 2017-02-20T17:42:02.000Z | 2021-12-16T19:07:17.000Z | tests/test_word_distance.py | eggleader/alfpy | e0782e9551458ef17ab29df8af13fc0f8925e894 | [
"MIT"
] | 3 | 2018-03-12T23:54:27.000Z | 2020-12-09T21:53:19.000Z | tests/test_word_distance.py | eggleader/alfpy | e0782e9551458ef17ab29df8af13fc0f8925e894 | [
"MIT"
] | 6 | 2016-12-06T09:12:04.000Z | 2021-09-24T14:40:47.000Z | import unittest
from alfpy import word_pattern
from alfpy import word_vector
from alfpy import word_distance
from alfpy.utils import distmatrix
from . import utils
if __name__ == '__main__':
unittest.main()
| 43.949791 | 77 | 0.58035 |
a72d7496d5e3f428cdf8342b764e52a9a68ac6a0 | 3,092 | py | Python | cdparser/Features.py | opengulf/nyc-directories-support-scripts | e22582b8f4cb3c365e9aac1d860d9c36831277a5 | [
"MIT"
] | 1 | 2021-09-07T20:41:00.000Z | 2021-09-07T20:41:00.000Z | cdparser/Features.py | opengulf/nyc-directories-support-scripts | e22582b8f4cb3c365e9aac1d860d9c36831277a5 | [
"MIT"
] | null | null | null | cdparser/Features.py | opengulf/nyc-directories-support-scripts | e22582b8f4cb3c365e9aac1d860d9c36831277a5 | [
"MIT"
] | 2 | 2021-09-07T20:49:14.000Z | 2021-11-05T02:03:47.000Z | from functools import partial | 28.366972 | 88 | 0.559185 |
a73018c4b01cc941e04ea8bb39a52a6d8c243fb6 | 10,631 | py | Python | IRIS_data_download/IRIS_download_support/obspy/core/tests/test_util_attribdict.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-03-05T01:03:01.000Z | 2020-12-17T05:04:07.000Z | IRIS_data_download/IRIS_download_support/obspy/core/tests/test_util_attribdict.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 4 | 2021-03-31T19:25:55.000Z | 2021-12-13T20:32:46.000Z | IRIS_data_download/IRIS_download_support/obspy/core/tests/test_util_attribdict.py | earthinversion/Fnet_IRIS_data_automated_download | 09a6e0c992662feac95744935e038d1c68539fa1 | [
"MIT"
] | 2 | 2020-09-08T19:33:40.000Z | 2021-04-05T09:47:50.000Z | # -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from future.builtins import * # NOQA @UnusedWildImport
import unittest
from obspy.core import AttribDict
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| 35.555184 | 77 | 0.577462 |
a730e555a53175f843e80e26bb1889169e4678c3 | 458 | py | Python | data/datasetFactory.py | dcsgfl/acceleratefl | 9c928ff06dd4dd02eb27cb71d7d539ba4527ec58 | [
"MIT"
] | null | null | null | data/datasetFactory.py | dcsgfl/acceleratefl | 9c928ff06dd4dd02eb27cb71d7d539ba4527ec58 | [
"MIT"
] | null | null | null | data/datasetFactory.py | dcsgfl/acceleratefl | 9c928ff06dd4dd02eb27cb71d7d539ba4527ec58 | [
"MIT"
] | null | null | null | from cifar10 import CIFAR10
from mnist import MNIST | 28.625 | 66 | 0.676856 |
a73131170f5bdfaf1161caf237d671d9dbf5663d | 253 | py | Python | jsonresume/__init__.py | kelvintaywl/jsonresume-validator | 73ac162cb30ca70699c942def629188f7dfd4d3c | [
"MIT"
] | 42 | 2016-06-03T18:17:24.000Z | 2021-12-09T04:13:14.000Z | jsonresume/__init__.py | kelvintaywl/jsonresume-validator | 73ac162cb30ca70699c942def629188f7dfd4d3c | [
"MIT"
] | 3 | 2016-04-27T12:32:41.000Z | 2020-09-29T16:43:35.000Z | jsonresume/__init__.py | kelvintaywl/jsonresume-validator | 73ac162cb30ca70699c942def629188f7dfd4d3c | [
"MIT"
] | 9 | 2016-05-08T15:31:53.000Z | 2021-04-28T09:17:47.000Z | # -*- coding: utf-8 -*-
"""
JSON Resume Validator
~~~~~~
JSON Resume Validator helps validate python dictionaries to
ensure they are valid representation of a JSON Resume.
"""
from jsonresume.resume import Resume
__all__ = ['Resume']
| 19.461538 | 63 | 0.675889 |
a731c3353defbbffeebffba89c597908966a9fbc | 936 | py | Python | Catchphrase.py | YaruKatsaros/Catchphrase | 5d674cc251be226e233fd427f9533a56f1a24284 | [
"MIT"
] | null | null | null | Catchphrase.py | YaruKatsaros/Catchphrase | 5d674cc251be226e233fd427f9533a56f1a24284 | [
"MIT"
] | null | null | null | Catchphrase.py | YaruKatsaros/Catchphrase | 5d674cc251be226e233fd427f9533a56f1a24284 | [
"MIT"
] | null | null | null | import glob
import os
import sys
import re
savedlines = []
startreading()
| 21.767442 | 96 | 0.573718 |
a733182bb7d063e48b371c3b9b8871a0afe48521 | 19,712 | py | Python | dashboard/api/config.py | x3niasweden/fomalhaut-panel | 8b4b3d81e2c91bef8f24ccbaf9cf898a47ac38a6 | [
"MIT"
] | 14 | 2017-08-01T08:28:00.000Z | 2020-08-29T06:55:16.000Z | dashboard/api/config.py | x3niasweden/fomalhaut-panel | 8b4b3d81e2c91bef8f24ccbaf9cf898a47ac38a6 | [
"MIT"
] | 1 | 2021-03-29T06:16:34.000Z | 2021-03-29T06:16:34.000Z | dashboard/api/config.py | x3niasweden/fomalhaut-panel | 8b4b3d81e2c91bef8f24ccbaf9cf898a47ac38a6 | [
"MIT"
] | 12 | 2017-07-18T02:59:03.000Z | 2021-03-23T04:04:58.000Z | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# created by restran on 2016/1/2
from __future__ import unicode_literals, absolute_import
import traceback
from django.views.decorators.http import require_http_methods
from django.views.decorators.csrf import csrf_protect
from django.db import transaction
from cerberus import Validator
import redis
from fomalhaut import settings
from ..forms import *
from common.utils import http_response_json, json_dumps, json_loads
from accounts.decorators import login_required
from common.utils import error_404
logger = logging.getLogger(__name__)
def do_import_config(upload_file):
"""
json
:param upload_file:
:return:
"""
file_contents = upload_file.read()
try:
json_data = json_loads(file_contents)
except Exception as e:
logger.error(e.message)
return False, u'JSON', []
json_data_schema = {
'clients': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'id': {
'type': 'integer',
'required': True,
},
'name': {
'type': 'string',
'required': True,
},
'app_id': {
'type': 'string',
'required': True,
},
'secret_key': {
'type': 'string',
'required': True,
},
'enable': {
'type': 'boolean',
'required': True,
},
'memo': {
'type': 'string',
'required': True,
}
}
}
},
'client_endpoints': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'id': {
'type': 'integer',
'required': True,
},
'client_id': {
'type': 'integer',
'required': True,
},
'endpoint_id': {
'type': 'integer',
'required': True,
},
'enable': {
'type': 'boolean',
'required': True,
}
}
}
},
'endpoints': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'id': {
'type': 'integer',
'required': True,
},
'unique_name': {
'type': 'string',
'required': True,
},
'name': {
'type': 'string',
'required': True,
},
'version': {
'type': 'string',
'required': True,
},
'url': {
'type': 'string',
'required': True,
},
'memo': {
'type': 'string',
'required': True,
},
'async_http_connect_timeout': {
'type': 'integer',
'required': True,
},
'async_http_request_timeout': {
'type': 'integer',
'required': True,
},
'enable_acl': {
'type': 'boolean',
'required': True,
},
'acl_rules': {
'type': 'list',
'required': True,
'schema': {
'type': 'dict',
'schema': {
'is_permit': {
'type': 'boolean',
'required': True,
},
're_uri': {
'type': 'string',
'required': True,
}
}
}
}
}
}
}
}
validator = Validator(json_data_schema, allow_unknown=True)
if not validator.validate(json_data):
errors = []
for (k, v) in validator.errors.items():
errors.append('%s: %s' % (k, v))
return False, ' JSON JSON ', errors
else:
success, msg, errors = False, '', []
try:
#
with transaction.atomic():
# Client Endpoint
ClientEndpoint.objects.all().delete()
ACLRule.objects.all().delete()
old_client_list = Client.objects.all()
old_client_dict = {}
for t in old_client_list:
old_client_dict[t.app_id] = t
old_endpoint_list = Endpoint.objects.all()
old_endpoint_dict = {}
for t in old_endpoint_list:
old_endpoint_dict[t.unique_name] = t
new_client_dict = {}
for t in json_data['clients']:
# del t['id']
old_client = old_client_dict.get(t['app_id'])
#
if old_client is not None:
form = ClientForm(t, instance=old_client)
del old_client_dict[t['app_id']]
else:
form = ClientForm(t)
if not form.is_valid():
errors = []
form_errors = form.get_form_json()
for (k, v) in form_errors.items():
if v['has_error']:
errors.append('%s: %s' % (k, v['errors']))
msg, errors = ' JSON JSON ', errors
raise Exception('error')
client = form.save()
new_client_dict[t['id']] = client
new_endpoint_dict = {}
for t in json_data['endpoints']:
# del t['id']
old_endpoint = old_endpoint_dict.get(t['unique_name'])
#
if old_endpoint is not None:
form = EndpointForm(t, instance=old_endpoint)
del old_endpoint_dict[t['unique_name']]
else:
form = EndpointForm(t)
if not form.is_valid():
errors = []
form_errors = form.get_form_json()
for (k, v) in form_errors.items():
if v['has_error']:
errors.append('%s: %s' % (k, v['errors']))
msg, errors = ' JSON JSON ', errors
raise Exception('error')
endpoint = form.save(commit=False)
endpoint.save()
new_endpoint_dict[t['id']] = endpoint
acl_rules = t['acl_rules']
for y in acl_rules:
# del t['id']
tf = ACLRuleForm(y)
if not tf.is_valid():
msg, errors = ' JSON JSON ', \
['']
raise Exception('error')
acl_rules = [ACLRule(endpoint_id=endpoint.id,
re_uri=t['re_uri'], is_permit=t['is_permit'])
for t in acl_rules]
# ACLRule
ACLRule.objects.bulk_create(acl_rules)
# id client_endpoint
client_endpoint_list = []
for t in json_data['client_endpoints']:
client = new_client_dict.get(t['client_id'])
endpoint = new_endpoint_dict.get(t['endpoint_id'])
enable = t['enable']
ce = ClientEndpoint(client=client, endpoint=endpoint, enable=enable)
client_endpoint_list.append(ce)
ClientEndpoint.objects.bulk_create(client_endpoint_list)
# Client
Client.objects.filter(id__in=[t.id for t in old_client_dict.values()]).delete()
# Endpoint
Endpoint.objects.filter(id__in=[t.id for t in old_endpoint_dict.values()]).delete()
success, msg = True, u''
except Exception as e:
logger.error(e.message)
return success, msg, errors
| 33.241147 | 122 | 0.491985 |
a733c76add330a704c87d51a39a3121429990715 | 2,209 | py | Python | WX_BG.py | boristown/WX_BG | c715d1f3ffeef60187be0289f26549204d6b963f | [
"MIT"
] | 1 | 2019-08-17T23:21:28.000Z | 2019-08-17T23:21:28.000Z | WX_BG.py | boristown/WX_BG | c715d1f3ffeef60187be0289f26549204d6b963f | [
"MIT"
] | null | null | null | WX_BG.py | boristown/WX_BG | c715d1f3ffeef60187be0289f26549204d6b963f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# filename: WX_BG.py
import prices
import glob
import prediction
import os
import time
import random
#
prices_file_pattern = "Output\\prices\\*.csv"
#
predict_file_pattern = "Output\\predict\\*.csv"
#
prices_file_second_pattern = "Output\\prices_second\\*.csv"
#
predict_file_second_pattern = "Output\\predict_second\\*.csv"
modeStr = {0: "v1", 1:"v2"}
predict_batch_size = 10000
while True:
'''
randint = random.randint(0, 9)
if randint == 0:
modeType = 0
else:
modeType = 1
'''
modeType = 1
print( "mode = " + modeStr[modeType] )
#
prices_files = glob.glob(prices_file_pattern)
for prices_file in prices_files:
os.remove(prices_file)
prices_files_second = glob.glob(prices_file_second_pattern)
for prices_file_second in prices_files_second:
os.remove(prices_file_second)
#
predict_files = glob.glob(predict_file_pattern)
for predict_file in predict_files:
os.remove(predict_file)
predict_files_second = glob.glob(predict_file_second_pattern)
for predict_file_second in predict_files_second:
os.remove(predict_file_second)
time.sleep(10)
print("")
#
if modeType == 0:
symbol_id_list = prices.read_prices()
else:
symbol_id_list = prices.read_pricehistory(predict_batch_size)
try:
if len(symbol_id_list) == 0:
continue
except:
continue
print("")
#
while True:
time.sleep(1)
predict_files = glob.glob(predict_file_pattern)
predict_files_second = glob.glob(predict_file_second_pattern)
if len(predict_files) == 0 or len(predict_files_second) == 0:
continue
print("", predict_files[0])
print("2", predict_files_second[0])
time.sleep(2)
if modeType == 0:
prediction.get_prediction(symbol_id_list, predict_files[0])
else:
prediction.get_predictionhistory(symbol_id_list, predict_files[0], predict_files_second[0])
break
print("")
time.sleep(20)
| 26.939024 | 103 | 0.663649 |
a734a04a2790536248f0af4b3c7aedde27c72873 | 929 | py | Python | hyppo/d_variate/tests/test_dhsic.py | zdbzdb123123/hyppo | c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde | [
"MIT"
] | 116 | 2020-02-28T10:29:22.000Z | 2022-03-22T12:19:39.000Z | hyppo/d_variate/tests/test_dhsic.py | zdbzdb123123/hyppo | c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde | [
"MIT"
] | 253 | 2020-02-17T16:18:56.000Z | 2022-03-30T16:55:02.000Z | hyppo/d_variate/tests/test_dhsic.py | zdbzdb123123/hyppo | c22dcfb7bdf25c9945e6d4ddd7c6bfe5fcdd0cde | [
"MIT"
] | 27 | 2020-03-02T21:07:41.000Z | 2022-03-08T08:33:23.000Z | import numpy as np
import pytest
from numpy.testing import assert_almost_equal
from ...tools import linear, power
from .. import dHsic # type: ignore
| 27.323529 | 77 | 0.620022 |
a7351f98fb299d1d929cbe7b4a8c9742f60b725d | 2,844 | py | Python | Pages/showHistory.py | ajaydeepsingh/ATLZoo | ab5ba27dc8602da39ce8bb47c4a050ff09d79b82 | [
"MIT"
] | null | null | null | Pages/showHistory.py | ajaydeepsingh/ATLZoo | ab5ba27dc8602da39ce8bb47c4a050ff09d79b82 | [
"MIT"
] | null | null | null | Pages/showHistory.py | ajaydeepsingh/ATLZoo | ab5ba27dc8602da39ce8bb47c4a050ff09d79b82 | [
"MIT"
] | null | null | null | from tkinter import *
from PIL import ImageTk, Image
import pymysql
from tkinter import messagebox
from tkinter import ttk
from datetime import datetime, timedelta
import decimal
a = ATLzooShowHistory()
| 37.92 | 120 | 0.688819 |
a738885fc845ac09ce24d938e1de039911e09569 | 6,061 | py | Python | python/federatedml/protobuf/generated/sample_weight_model_param_pb2.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 715 | 2019-01-24T10:52:03.000Z | 2019-10-31T12:19:22.000Z | python/federatedml/protobuf/generated/sample_weight_model_param_pb2.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 270 | 2019-02-11T02:57:36.000Z | 2019-08-29T11:22:33.000Z | python/federatedml/protobuf/generated/sample_weight_model_param_pb2.py | rubenlozanoaht3m/DataDogm | cd605e8072cca31e8418830c3300657ae2fa5b16 | [
"Apache-2.0"
] | 200 | 2019-01-26T14:21:35.000Z | 2019-11-01T01:14:36.000Z | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: sample-weight-model-param.proto
import sys
_b = sys.version_info[0] < 3 and (lambda x: x) or (lambda x: x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(name='sample-weight-model-param.proto', package='com.webank.ai.fate.core.mlmodel.buffer', syntax='proto3', serialized_options=_b('B\033SampleWeightModelParamProto'), serialized_pb=_b(
'\n\x1fsample-weight-model-param.proto\x12&com.webank.ai.fate.core.mlmodel.buffer\"\xd8\x01\n\x16SampleWeightModelParam\x12\x0e\n\x06header\x18\x01 \x03(\t\x12\x13\n\x0bweight_mode\x18\x02 \x01(\t\x12\x65\n\x0c\x63lass_weight\x18\x03 \x03(\x0b\x32O.com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.ClassWeightEntry\x1a\x32\n\x10\x43lassWeightEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x42\x1d\x42\x1bSampleWeightModelParamProtob\x06proto3'))
_SAMPLEWEIGHTMODELPARAM_CLASSWEIGHTENTRY = _descriptor.Descriptor(
name='ClassWeightEntry',
full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.ClassWeightEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key',
full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.ClassWeightEntry.key',
index=0,
number=1,
type=9,
cpp_type=9,
label=1,
has_default_value=False,
default_value=_b("").decode('utf-8'),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='value',
full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.ClassWeightEntry.value',
index=1,
number=2,
type=1,
cpp_type=5,
label=1,
has_default_value=False,
default_value=float(0),
message_type=None,
enum_type=None,
containing_type=None,
is_extension=False,
extension_scope=None,
serialized_options=None,
file=DESCRIPTOR),
],
extensions=[],
nested_types=[],
enum_types=[],
serialized_options=_b('8\001'),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[],
serialized_start=242,
serialized_end=292,
)
_SAMPLEWEIGHTMODELPARAM = _descriptor.Descriptor(
name='SampleWeightModelParam',
full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='header', full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.header', index=0,
number=1, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='weight_mode', full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.weight_mode', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='class_weight', full_name='com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.class_weight', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[_SAMPLEWEIGHTMODELPARAM_CLASSWEIGHTENTRY, ],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=76,
serialized_end=292,
)
_SAMPLEWEIGHTMODELPARAM_CLASSWEIGHTENTRY.containing_type = _SAMPLEWEIGHTMODELPARAM
_SAMPLEWEIGHTMODELPARAM.fields_by_name['class_weight'].message_type = _SAMPLEWEIGHTMODELPARAM_CLASSWEIGHTENTRY
DESCRIPTOR.message_types_by_name['SampleWeightModelParam'] = _SAMPLEWEIGHTMODELPARAM
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
SampleWeightModelParam = _reflection.GeneratedProtocolMessageType('SampleWeightModelParam', (_message.Message,), {
'ClassWeightEntry': _reflection.GeneratedProtocolMessageType('ClassWeightEntry', (_message.Message,), {
'DESCRIPTOR': _SAMPLEWEIGHTMODELPARAM_CLASSWEIGHTENTRY,
'__module__': 'sample_weight_model_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam.ClassWeightEntry)
}),
'DESCRIPTOR': _SAMPLEWEIGHTMODELPARAM,
'__module__': 'sample_weight_model_param_pb2'
# @@protoc_insertion_point(class_scope:com.webank.ai.fate.core.mlmodel.buffer.SampleWeightModelParam)
})
_sym_db.RegisterMessage(SampleWeightModelParam)
_sym_db.RegisterMessage(SampleWeightModelParam.ClassWeightEntry)
DESCRIPTOR._options = None
_SAMPLEWEIGHTMODELPARAM_CLASSWEIGHTENTRY._options = None
# @@protoc_insertion_point(module_scope)
| 42.985816 | 502 | 0.707144 |
a739bd10614848db1a73028a77c6c885008e1463 | 63,679 | py | Python | postprocessing/pyplotgen/config/Case_definitions.py | larson-group/clubb_release | b4d671e3e238dbe00752c0dead6a0d4f9897350a | [
"Intel",
"Unlicense",
"NetCDF"
] | null | null | null | postprocessing/pyplotgen/config/Case_definitions.py | larson-group/clubb_release | b4d671e3e238dbe00752c0dead6a0d4f9897350a | [
"Intel",
"Unlicense",
"NetCDF"
] | null | null | null | postprocessing/pyplotgen/config/Case_definitions.py | larson-group/clubb_release | b4d671e3e238dbe00752c0dead6a0d4f9897350a | [
"Intel",
"Unlicense",
"NetCDF"
] | 1 | 2022-01-28T22:22:04.000Z | 2022-01-28T22:22:04.000Z | """
:author: Nicolas Strike
:date: Early 2019
This file is mostly a definition of Cases. Each case is defined in the following format
using python dictionaries (values surrounded with < > must have the < > removed to be valid).
.. code-block:: python
:linenos:
CASENAME = {'name': 'casename',
'description': "",
'start_time': <numeric value>, 'end_time': <numeric value>,
'height_min_value': <numeric value>, 'height_max_value': <numeric value>,
'blacklisted_vars': ['list', 'of', 'variable', 'names', 'to', 'exclude', 'from', 'plotting'],
'sam_benchmark_file': <path to sam file>",
'clubb_file': {'zm': <path to file>,
'zt': <path to file>,
'sfc': <path to file>},
'coamps_benchmark_file': {'sm': <path to file>,
'sw': <path to file>},
'clubb_r408_benchmark_file': {'zm': <path to file>,
'zt': <path to file>,
'sfc': <path to file>},
'clubb_hoc_benchmark_file': {'zm': <path to file>',
'zt': <path to file>',
'sfc': <path to file>},
'e3sm_file': <path to file>,
'cam_file': <path to file>,
'sam_file': <path to file>,
'wrf_file': {'zm': <path to file>,
'zt': <path to file>,
'sfc': <path to file>},
'var_groups': [VariableGroupBase, <other variable groups to plot>]}
**Important note**:
When creating a new case, add it to the CASES_TO_PLOT list at the bottom of the file. Additionally, please add it in
alphabetical order.
**Case Definition values explained**:
*name*: must be the same as the filename without the extention.
E.g. to use lba_zt.nc and lba_zm.nc the case's name must be 'lba'. Extensions are determined
by the last instance of _
*start_time*: An integer value representing which timestep to begin the time-averaging interval.
Valid options are from 1 -> list minute value. Give in terms of clubb minutes.
*end_time*: An integer value representing which timestep to end the time-averaging interval.
Valid options are from 1 -> list minute value. Give in terms of clubb minutes.
Also used to determine where to stop timeseries plots
*height_min_value*: The elevation to begin height plots at
*height_max_value*: The elevation to end height plots at
*blacklisted_vars*: List of variables to avoid plotting for this case. Names must use the clubb-name version
*<model name>_file*: The path(s) to nc files for the given model.
(please use the <model name>_OUTPUT_ROOT variables as the beginning of the path).
*var_groups*: These are the groups of variables to be plotted for the given case. var_groups is defined as a
list of python class names, where the classes use the naming scheme VariableGroup____.py and define a variable
group. An example would be: 'var_groups': [VariableGroupBase, VariableGroupWs].
The variables inside a VariableGroup can be found in the file with the same name,
i.e. config/VariableGroupBase.py. An example would be thlm in VariableGroupBase.
"""
import os
from config.VariableGroupBase import VariableGroupBase
from config.VariableGroupCorrelations import VariableGroupCorrelations
from config.VariableGroupIceMP import VariableGroupIceMP
from config.VariableGroupKKMP import VariableGroupKKMP
from config.VariableGroupLiquidMP import VariableGroupLiquidMP
from config.VariableGroupSamProfiles import VariableGroupSamProfiles
from config.VariableGroupScalars import VariableGroupScalars
from config.VariableGroupWs import VariableGroupWs
from config.VariableGroupTaus import VariableGroupTaus
from config.VariableGroupNondimMoments import VariableGroupNondimMoments
from config.VariableGroupNormalizedVariations import VariableGroupNormalizedVariations
# ---------------------------
BENCHMARK_OUTPUT_ROOT = "/home/pub/les_and_clubb_benchmark_runs/"
if not os.path.isdir(BENCHMARK_OUTPUT_ROOT) and \
not os.path.islink(BENCHMARK_OUTPUT_ROOT):
print("Benchmark output was not found in " + BENCHMARK_OUTPUT_ROOT + ".\n\tChecking local location: " +
os.path.dirname(os.path.realpath(__file__)) + "/../les_and_clubb_benchmark_runs/")
BENCHMARK_OUTPUT_ROOT = os.path.dirname(os.path.realpath(__file__)) + "/../les_and_clubb_benchmark_runs/"
SAM_BENCHMARK_OUTPUT_ROOT = BENCHMARK_OUTPUT_ROOT + "sam_benchmark_runs"
COAMPS_BENCHMARK_OUTPUT_ROOT = BENCHMARK_OUTPUT_ROOT + "les_runs"
WRF_LASSO_BENCHMARK_OUTPUT_ROOT = BENCHMARK_OUTPUT_ROOT + "wrf_lasso_runs"
ARCHIVED_CLUBB_OUTPUT_ROOT = BENCHMARK_OUTPUT_ROOT + "archived_clubb_runs"
R408_OUTPUT_ROOT = BENCHMARK_OUTPUT_ROOT + ""
HOC_OUTPUT_ROOT = BENCHMARK_OUTPUT_ROOT + "HOC_20051217"
# This folder is passed in as a command line parameter
# It is not capitalized because it is not intended to
# be final, i.e. is changed depending on the cmd line arg
e3sm_output_root = ""
sam_output_root = ""
wrf_output_root = ""
cam_output_root = ""
clubb_output_root = ""
# ---------------------------
# These are all the names that represent the height variable within different models
HEIGHT_VAR_NAMES = ['z', 'Z3', 'altitude', 'lev', 'CSP_Zm', 'CSP_Z8Wm'] # CSP_* added for WRF-LASSO cases
TIME_VAR_NAMES = ['time', 'XTIME']
"""
To plot only a subset of cases, reguardless of what output exists
in the clubb folder, uncomment the last line of this file and
fill that array with the cases you'd like to plot. This overwrites the
CASES_TO_PLOT variable such that pyplotgen will only know about cases in that
list and ignore all others. The name must match the python variable name
below (all caps).
For example, to plot only bomex and fire:
CASES_TO_PLOT = [BOMEX, FIRE]
"""
ARM = {'name': 'arm',
'description': "Output may differ from plotgen in some models (e.g. WRF) due to a difference in the time "
"averaging interval.",
'start_time': 481, 'end_time': 540,
'height_min_value': 0, 'height_max_value': 3500,
'blacklisted_vars': ['radht'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT + "/JULY_2017/ARM_96x96x110/GCSSARM_96x96x110_67m_40m_1s.nc"},
'clubb_file': {'zm': clubb_output_root + '/arm_zm.nc',
'zt': clubb_output_root + '/arm_zt.nc',
'sfc': clubb_output_root + '/arm_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/arm_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/arm_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/arm_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/arm_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/arm_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/arm_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/arm_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/arm_sfc.nc'},
'e3sm_file': { 'e3sm': e3sm_output_root + "/arm.nc"},
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/GCSSARM_96x96x110_67m_40m_1s.nc"},
'wrf_file': {'zm': wrf_output_root + "/arm_zm_wrf.nc",
'zt': wrf_output_root + "/arm_zt_wrf.nc",
'sfc': wrf_output_root + "/arm_sfc_wrf.nc"
},
'var_groups': [VariableGroupBase, VariableGroupWs]}
ARM_97 = {'name': 'arm_97',
'description': "",
'start_time': 4321, 'end_time': 5580,
'height_min_value': 0, 'height_max_value': 18000,
'blacklisted_vars': ['rtp3', 'Skrt_zt', 'Skthl_zt', 'thlp3', 'rtpthvp', 'thlpthvp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/ARM97_r1315_128x128x128_1km_Morrison/ARM9707.nc"},
'clubb_file': {'zm': clubb_output_root + '/arm_97_zm.nc',
'zt': clubb_output_root + '/arm_97_zt.nc',
'sfc': clubb_output_root + '/arm_97_sfc.nc',
'subcolumns': clubb_output_root + '/arm_97_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/ARM9707_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupIceMP]}
ASTEX_A209 = {'name': 'astex_a209',
'description': "",
'start_time': 2340, 'end_time': 2400,
'height_min_value': 0, 'height_max_value': 6000,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/astex_a209_zm.nc',
'zt': clubb_output_root + '/astex_a209_zt.nc',
'sfc': clubb_output_root + '/astex_a209_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupCorrelations,
VariableGroupKKMP]}
ATEX = {'name': 'atex',
'description': "",
'start_time': 421, 'end_time': 480,
'height_min_value': 0, 'height_max_value': 2500,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/atex_zm.nc',
'zt': clubb_output_root + '/atex_zt.nc',
'sfc': clubb_output_root + '/atex_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/atex_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/atex_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/atex_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/atex_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/atex_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/atex_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/atex_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/atex_sfc.nc'},
'e3sm_file': None,
'cam_file': {'cam': cam_output_root + "/atex_cam.nc"},
'sam_file': None,
'wrf_file': {'zm': wrf_output_root + "/atex_zm_wrf.nc",
'zt': wrf_output_root + "/atex_zt_wrf.nc",
'sfc': wrf_output_root + "/atex_sfc_wrf.nc"
},
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupIceMP]}
BOMEX = {'name': 'bomex',
'description': "",
'start_time': 181, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 2500,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/BOMEX_64x64x75/BOMEX_64x64x75_100m_40m_1s.nc"},
'clubb_file': {'zm': clubb_output_root + '/bomex_zm.nc',
'zt': clubb_output_root + '/bomex_zt.nc',
'sfc': clubb_output_root + '/bomex_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/bomex_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/bomex_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/bomex_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/bomex_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/bomex_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/bomex_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/bomex_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/bomex_sfc.nc'},
'e3sm_file': { 'e3sm': e3sm_output_root + '/bomex.nc'},
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/BOMEX_SAM_CLUBB.nc"},
'wrf_file': {'zm': wrf_output_root + '/bomex_zm_wrf.nc',
'zt': wrf_output_root + '/bomex_zt_wrf.nc',
'sfc': wrf_output_root + '/bomex_sfc_wrf.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
CGILS_S6 = {'name': 'cgils_s6',
'description': "",
'start_time': 12960, 'end_time': 14400,
'height_min_value': 0, 'height_max_value': 5950,
'blacklisted_vars': ['Ngm', 'rgm', 'Skrt_zt', 'Skthl_zt', 'thlp3',
'rtpthvp', 'thlpthvp', 'wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/CLOUD_FEEDBACK_s6/ctl_s6_96x96x128_100m_DRZ_N100_tqndg.nc"},
'clubb_file': {'zm': clubb_output_root + '/cgils_s6_zm.nc',
'zt': clubb_output_root + '/cgils_s6_zt.nc',
'sfc': clubb_output_root + '/cgils_s6_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
CGILS_S11 = {'name': 'cgils_s11',
'description': "",
'start_time': 12960, 'end_time': 14400,
'height_min_value': 0, 'height_max_value': 5950,
'blacklisted_vars': ['Ngm', 'rgm', 'Skthl_zt', 'Skrt_zt', 'rtpthvp', 'thlpthvp', 'wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/CLOUD_FEEDBACK_s11/ctl_s11_96x96x320_50m_DRZ_N100_ref.nc"},
'clubb_file': {'zm': clubb_output_root + '/cgils_s11_zm.nc',
'zt': clubb_output_root + '/cgils_s11_zt.nc',
'sfc': clubb_output_root + '/cgils_s11_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
CGILS_S12 = {'name': 'cgils_s12',
'description': "",
'start_time': 12960, 'end_time': 14400,
'height_min_value': 0, 'height_max_value': 5950,
'blacklisted_vars': ['Ngm', 'rgm', 'Skrt_zt', 'Skthl_zt', 'rtpthvp', 'thlpthvp', 'wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/CLOUD_FEEDBACK_s12/ctl_s12_96x96x192_25m_DRZ_N100_fixnudge.nc"},
'clubb_file': {'zm': clubb_output_root + '/cgils_s12_zm.nc',
'zt': clubb_output_root + '/cgils_s12_zt.nc',
'sfc': clubb_output_root + '/cgils_s12_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
CLEX9_NOV02 = {'name': 'clex9_nov02',
'description': "",
'start_time': 181, 'end_time': 240,
'height_min_value': 4000, 'height_max_value': 6072,
'blacklisted_vars': ['Ngm'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/clex9_nov02_zm.nc',
'zt': clubb_output_root + '/clex9_nov02_zt.nc',
'sfc': clubb_output_root + '/clex9_nov02_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/clex9_nov02_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/clex9_nov02_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
CLEX9_OCT14 = {'name': 'clex9_oct14',
'description': "",
'start_time': 181, 'end_time': 240,
'height_min_value': 2230, 'height_max_value': 6688,
'blacklisted_vars': ['Ngm'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/clex9_oct14_zm.nc',
'zt': clubb_output_root + '/clex9_oct14_zt.nc',
'sfc': clubb_output_root + '/clex9_oct14_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/clex9_oct14_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/clex9_oct14_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
DYCOMS2_RF01 = {'name': 'dycoms2_rf01',
'description': "",
'start_time': 181, 'end_time': 240,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF01_96x96x320/DYCOMS_RF01_96x96x320.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf01_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf01_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf01_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf01_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf01_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf01_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf01_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf01_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf01_sfc.nc'},
'e3sm_file': { 'e3sm': e3sm_output_root + "/dycoms2_rf01.nc"},
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs]}
DYCOMS2_RF01_FIXED_SST = {'name': 'dycoms2_rf01_fixed_sst',
'description': "Copied from plotgen: Ran with a 5 min timestep and a 48-level grid",
'start_time': 2520, 'end_time': 2700,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': ['rtp3', 'Skrt_zt', 'Skthl_zt', 'rtpthvp', 'thlpthvp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/DYCOMS_RF01_fixed_sst/DYCOMS_RF01_96x96x320_LES_fixed_sst.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf01_fixed_sst_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf01_fixed_sst_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf01_fixed_sst_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase]}
DYCOMS2_RF02_DO = {'name': 'dycoms2_rf02_do',
'description': "",
'start_time': 301, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_dr_nosed/DYCOMS_RF02_128x128x96_dr_nosed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_do_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_do_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_do_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_do_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_do_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_do_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_do_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_do_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_do_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/DYCOMS_RF02_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupCorrelations,
VariableGroupKKMP]}
DYCOMS2_RF02_DS = {'name': 'dycoms2_rf02_ds',
'description': "",
'start_time': 301, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_dr_sed/DYCOMS_RF02_128x128x96_dr_sed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_ds_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_ds_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_ds_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_sfc.nc'},
'e3sm_file': {'e3sm': e3sm_output_root + "/dycoms2_rf02_ds.nc"},
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupCorrelations,
VariableGroupKKMP]}
DYCOMS2_RF02_ND = {'name': 'dycoms2_rf02_nd',
'description': "Copied from plotgen: ** Generated by doing a restart run after 7200 seconds. Note: "
"t = 0 corresponds to start time of the restart run, not the original run. ** ",
'start_time': 301, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': ['wprrp', 'wpNrp', 'corr_w_rr_1', 'corr_w_Nr_1'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_nodr_nosed/DYCOMS_RF02_128x128x96_nodr_nosed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_nd_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_nd_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_nd_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_nd_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_nd_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_nd_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_nd_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_nd_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_nd_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupKKMP]}
DYCOMS2_RF02_DS_RESTART = {'name': 'dycoms2_rf02_ds_restart',
'description': "Copied from plotgen: ** Uniform, coarse verticle grid spacing of 40 m. **",
'start_time': 181, 'end_time': 240,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_dr_sed/DYCOMS_RF02_128x128x96_dr_sed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_ds_restart_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_ds_restart_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_ds_restart_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_ds_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_ds_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_ds_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_ds_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP,
VariableGroupCorrelations, VariableGroupKKMP]}
DYCOMS2_RF02_SO = {'name': 'dycoms2_rf02_so',
'description': "Copied from plotgen: " +
"** WRF-type stretched (unevenly spaced) grid (grid_type = 3) ** ",
'start_time': 301, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1200,
'blacklisted_vars': ['wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/DYCOMS_RF02_128x128x96_nodr_sed/DYCOMS_RF02_128x128x96_nodr_sed.nc"},
'clubb_file': {'zm': clubb_output_root + '/dycoms2_rf02_so_zm.nc',
'zt': clubb_output_root + '/dycoms2_rf02_so_zt.nc',
'sfc': clubb_output_root + '/dycoms2_rf02_so_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_so_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_so_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/dycoms2_rf02_so_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/dycoms2_rf02_so_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/dycoms2_rf02_so_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/dycoms2_rf02_so_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/DYCOMS_RF02_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupKKMP]}
FIRE = {'name': 'fire',
'description': "",
'start_time': 61, 'end_time': 120,
'height_min_value': 0, 'height_max_value': 1000,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/fire_zm.nc',
'zt': clubb_output_root + '/fire_zt.nc',
'sfc': clubb_output_root + '/fire_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/fire_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/fire_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/fire_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/fire_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/fire_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + "/fire_zm.nc",
'zt': HOC_OUTPUT_ROOT + '/fire_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/fire_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': {'zm': wrf_output_root + "/fire_zm_wrf.nc",
'zt': wrf_output_root + "/fire_zt_wrf.nc",
'sfc': wrf_output_root + "/fire_sfc_wrf.nc"
},
'var_groups': [VariableGroupBase, VariableGroupWs]}
# No budgets
GABLS2 = {'name': 'gabls2',
'description': "",
'start_time': 2101, 'end_time': 2160,
'height_min_value': 0, 'height_max_value': 2500,
'blacklisted_vars': ['tau_zm', 'radht', 'Skw_zt', 'Skrt_zt', 'Skthl_zt', 'corr_w_chi_1', 'corr_chi_eta_1',
'rcp2', 'thlpthvp', 'rtpthvp'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/gabls2_zm.nc',
'zt': clubb_output_root + '/gabls2_zt.nc',
'sfc': clubb_output_root + '/gabls2_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/gabls2_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/gabls2_coamps_sw.nc",
'sfc': COAMPS_BENCHMARK_OUTPUT_ROOT + "/gabls2_coamps_sfc.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase]}
GABLS2_NIGHTLY = {'name': 'gabls2_nightly',
'description': "",
'start_time': 2101, 'end_time': 2160,
'height_min_value': 0, 'height_max_value': 2500,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/gabls2_zm.nc',
'zt': clubb_output_root + '/gabls2_zt.nc',
'sfc': clubb_output_root + '/gabls2_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupScalars]}
GABLS3 = {'name': 'gabls3',
'description': "",
'start_time': 1081, 'end_time': 1200,
'height_min_value': 0, 'height_max_value': 4970,
'blacklisted_vars': [],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/gabls3_zm.nc',
'zt': clubb_output_root + '/gabls3_zt.nc',
'sfc': clubb_output_root + '/gabls3_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase]}
GABLS3_NIGHT = {'name': 'gabls3_night',
'description': "Copied from plotgen: Uses a 5-min timestep with 48 levels",
'start_time': 421, 'end_time': 480,
'height_min_value': 0, 'height_max_value': 800,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/GABLS3_NIGHT/gabls3_night.nc"},
'clubb_file': {'zm': clubb_output_root + '/gabls3_night_zm.nc',
'zt': clubb_output_root + '/gabls3_night_zt.nc',
'sfc': clubb_output_root + '/gabls3_night_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase]}
GATE_SHEAR_RLSF = {'name': 'gate_shear_rlsf',
'description': "",
'start_time': 540, 'end_time': 720,
'height_min_value': 0, 'height_max_value': 24000,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/GATE_shear_rlsf/GATE_shear_rlsf_64x64x128_1km_5s.nc"},
'clubb_file': None,
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/GATE_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase]}
# Use to plot IOP forced SAM runs
IOP = {'name': 'iop',
'description': "",
'start_time': 181, 'end_time': 1440,
'height_min_value': 0, 'height_max_value': 27750,
'blacklisted_vars': [],
'clubb_datasets': None,
'sam_benchmark_file': None,
'clubb_file': None,
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'var_groups': [VariableGroupBase, VariableGroupSamProfiles]}
JUN25_ALTOCU = {'name': 'jun25_altocu',
'description': "",
'start_time': 181, 'end_time': 240,
'height_min_value': 4825, 'height_max_value': 7290,
'blacklisted_vars': ['Ngm', 'wprrp', 'wpNrp'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/jun25_altocu_zm.nc',
'zt': clubb_output_root + '/jun25_altocu_zt.nc',
'sfc': clubb_output_root + '/jun25_altocu_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/jun25_altocu_qc3_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/jun25_altocu_qc3_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
LBA = {'name': 'lba',
'description': "Note that sam-plotgen plots up to a height of 16000 not 12000.\n"
"Copied from plotgen: SAM-LES uses Morrison microphysics " +
"and CLUBB standalone uses COAMPS microphysics",
'start_time': 300, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 14000,
'blacklisted_vars': ['wprrp', 'wpNrp', 'Ngm'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/LBA_128kmx128kmx128_1km_Morrison/LBA_128kmx128kmx128_1km_Morrison.nc"},
'clubb_file': {'zm': clubb_output_root + '/lba_zm.nc',
'zt': clubb_output_root + '/lba_zt.nc',
'sfc': clubb_output_root + '/lba_sfc.nc',
'subcolumns': clubb_output_root + '/lba_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': {'sam': sam_output_root + "/LBA_SAM_CLUBB.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP, VariableGroupWs]}
MC3E = {'name': 'mc3e',
'description': "",
'start_time': 60, 'end_time': 64800,
'height_min_value': 0, 'height_max_value': 18000,
'blacklisted_vars': ['rtp3', 'Skrt_zt', 'Skthl_zt', 'rtpthvp', 'thlpthvp', 'wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/MC3E_r1359_128x128x128_1km_Morrison/MC3E.nc"},
'clubb_file': {'zm': clubb_output_root + '/mc3e_zm.nc',
'zt': clubb_output_root + '/mc3e_zt.nc',
'sfc': clubb_output_root + '/mc3e_sfc.nc',
'subcolumns': clubb_output_root + '/mc3e_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
MPACE_A = {'name': 'mpace_a',
'description': "Copied from plotgen: SAM-LES and CLUBB standalone use Morrison microphysics",
'start_time': 4141, 'end_time': 4320,
'height_min_value': 0, 'height_max_value': 10000,
'blacklisted_vars': ['Skrt_zt', 'Skthl_zt', 'rtpthvp', 'thlpthvp', 'Ngm', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/SAM6.6/MPACE_A/MPACE_A_128x128x69_morr_CEM.nc"},
'clubb_file': {'zm': clubb_output_root + '/mpace_a_zm.nc',
'zt': clubb_output_root + '/mpace_a_zt.nc',
'sfc': clubb_output_root + '/mpace_a_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
MPACE_B = {'name': 'mpace_b',
'description': "Copied from plotgen: **The nightly simulation uses COAMPS microphysics**",
'start_time': 541, 'end_time': 720,
'height_min_value': 0, 'height_max_value': 2750,
'blacklisted_vars': ['Ngm', 'wpNrp'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/mpace_b_zm.nc',
'zt': clubb_output_root + '/mpace_b_zt.nc',
'sfc': clubb_output_root + '/mpace_b_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/mpace_b_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/mpace_b_coamps_sw.nc",
'sfc': COAMPS_BENCHMARK_OUTPUT_ROOT + "/mpace_b_coamps_sfc.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
MPACE_B_SILHS = {'name': 'mpace_b_silhs',
'description': "",
'start_time': 541, 'end_time': 720,
'height_min_value': 0, 'height_max_value': 2750,
'blacklisted_vars': ['Ngm', 'wpNrp'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/mpace_b_silhs_zm.nc',
'zt': clubb_output_root + '/mpace_b_silhs_zt.nc',
'sfc': clubb_output_root + '/mpace_b_silhs_sfc.nc',
'subcolumns': clubb_output_root + '/mpace_b_silhs_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/mpace_b_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/mpace_b_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
NOV11_ALTOCU = {'name': 'nov11_altocu',
'description': "",
'start_time': 91, 'end_time': 150,
'height_min_value': 4160, 'height_max_value': 6150,
'blacklisted_vars': ['Ngm'],
'sam_benchmark_file': None,
'clubb_file': {'zm': clubb_output_root + '/nov11_altocu_zm.nc',
'zt': clubb_output_root + '/nov11_altocu_zt.nc',
'sfc': clubb_output_root + '/nov11_altocu_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/nov11_altocu_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/nov11_altocu_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/nov11_altocu_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/nov11_altocu_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/nov11_altocu_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/nov11_altocu_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/nov11_altocu_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/nov11_altocu_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupIceMP]}
RICO = {'name': 'rico',
'description': "Cam output may differ from plotgen due to a difference in time averaging.",
'start_time': 4201, 'end_time': 4320,
'height_min_value': 0, 'height_max_value': 5000,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/RICO_256x256x100_drizzle/RICO_256x256x100_drizzle.nc"},
'clubb_file': {'zm': clubb_output_root + '/rico_zm.nc',
'zt': clubb_output_root + '/rico_zt.nc',
'sfc': clubb_output_root + '/rico_sfc.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/rico_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/rico_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': {'e3sm': e3sm_output_root + "/rico.nc"},
'cam_file': {'cam': cam_output_root + "/rico_cam.nc"},
'sam_file': {'sam': sam_output_root + "/RICO_256x256x100_drizzle.nc"},
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupWs, VariableGroupCorrelations,
VariableGroupKKMP]}
RICO_SILHS = {'name': 'rico_silhs',
'description': "Copied from plotgen: CLUBB and SAM use Khairoutdinov-Kogan microphysics",
'start_time': 4201, 'end_time': 4320,
'height_min_value': 0, 'height_max_value': 4500,
'blacklisted_vars': ['wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/JULY_2017/RICO_256x256x100_drizzle/RICO_256x256x100_drizzle.nc"},
'clubb_file': {'zm': clubb_output_root + '/rico_silhs_zm.nc',
'zt': clubb_output_root + '/rico_silhs_zt.nc',
'sfc': clubb_output_root + '/rico_silhs_sfc.nc',
'subcolumns': clubb_output_root + '/rico_silhs_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': {'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/rico_coamps_sm.nc",
'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/rico_coamps_sw.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupLiquidMP, VariableGroupWs, VariableGroupCorrelations,
VariableGroupKKMP]}
NEUTRAL = {'name': 'neutral',
'description': "",
'start_time': 181, 'end_time': 360,
'height_min_value': 0, 'height_max_value': 1500,
'blacklisted_vars': [],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/NEUTRAL/NEUTRAL_96x96x96_32m_10m_LES.nc"},
'clubb_file': {'zm': clubb_output_root + '/neutral_zm.nc',
'zt': clubb_output_root + '/neutral_zt.nc',
'sfc': clubb_output_root + '/neutral_sfc.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs]}
TWP_ICE = {'name': 'twp_ice',
'description': "Copied from plotgen: Both vertical and horizontal fluxes applied to THLM and RTM for LES. "
"LES nudged U, V, RTM and THLM toward observed values. Forcings for LES derived from 10mb "
"forcing data.",
'start_time': 60, 'end_time': 9900,
'height_min_value': 0, 'height_max_value': 19000,
'blacklisted_vars': ['rtp3', 'Skrt_zt', 'Skthl_zt', 'rtpthvp', 'thlpthvp', 'wprrp', 'wpNrp'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/TWP_ICE_r1315_128x128x128_1km_Morrison/TWP_ICE.nc"},
'clubb_file': {'zm': clubb_output_root + '/twp_ice_zm.nc',
'zt': clubb_output_root + '/twp_ice_zt.nc',
'sfc': clubb_output_root + '/twp_ice_sfc.nc',
'subcolumns': clubb_output_root + '/twp_ice_nl_lh_sample_points_2D.nc'},
'coamps_benchmark_file': None,
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': None,
'var_groups': [VariableGroupBase, VariableGroupWs, VariableGroupLiquidMP, VariableGroupIceMP]}
WANGARA = {'name': 'wangara',
'description': "Note that COAMPS benchmark data is actually RAMS data by default.",
'start_time': 181, 'end_time': 240,
'height_min_value': 0, 'height_max_value': 1900,
'blacklisted_vars': ['Ngm'],
'sam_benchmark_file': {'sam_benchmark': SAM_BENCHMARK_OUTPUT_ROOT +
"/WANGARA/WANGARA_64x64x80_100m_40m_LES.nc"},
'clubb_file': {'zm': clubb_output_root + '/wangara_zm.nc',
'zt': clubb_output_root + '/wangara_zt.nc',
'sfc': clubb_output_root + '/wangara_sfc.nc'},
'coamps_benchmark_file': {'sw': COAMPS_BENCHMARK_OUTPUT_ROOT + "/wangara_rams.nc",
'sm': COAMPS_BENCHMARK_OUTPUT_ROOT + "/wangara_rams.nc"},
'wrf_benchmark_file': None,
'clubb_r408_benchmark_file': {'zm': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/wangara_zm.nc',
'zt': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/wangara_zt.nc',
'sfc': R408_OUTPUT_ROOT + '/Chris_Golaz_best_ever/wangara_sfc.nc'},
'clubb_hoc_benchmark_file': {'zm': HOC_OUTPUT_ROOT + '/wangara_zm.nc',
'zt': HOC_OUTPUT_ROOT + '/wangara_zt.nc',
'sfc': HOC_OUTPUT_ROOT + '/wangara_sfc.nc'},
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_file': {'zm': wrf_output_root + "/wangara_zm_wrf.nc",
'zt': wrf_output_root + "/wangara_zt_wrf.nc",
'sfc': wrf_output_root + "/wangara_sfc_wrf.nc"
},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20170627 = {'name': 'lasso_20170627',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2017-06-27/wrf_lasso_stats_2017-06-27.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2017-06-27_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2017-06-27_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2017-06-27_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2017-06-27_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20170717 = {'name': 'lasso_20170717',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2017-07-17/wrf_lasso_stats_2017-07-17.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2017-07-17_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2017-07-17_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2017-07-17_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2017-07-17_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20170728 = {'name': 'lasso_20170728',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2017-07-28/wrf_lasso_stats_2017-07-28.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2017-07-28_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2017-07-28_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2017-07-28_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2017-07-28_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20170923 = {'name': 'lasso_20170923',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2017-09-23/wrf_lasso_stats_2017-09-23.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2017-09-23_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2017-09-23_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2017-09-23_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2017-09-23_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20180911 = {'name': 'lasso_20180911',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2018-09-11/wrf_lasso_stats_2018-09-11.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2018-09-11_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2018-09-11_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2018-09-11_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2018-09-11_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20180917 = {'name': 'lasso_20180917',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2018-09-17/wrf_lasso_stats_2018-09-17.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2018-09-17_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2018-09-17_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2018-09-17_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2018-09-17_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20180918 = {'name': 'lasso_20180918',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2018-09-18/wrf_lasso_stats_2018-09-18.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2018-09-18_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2018-09-18_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2018-09-18_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2018-09-18_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
LASSO_20181002 = {'name': 'lasso_20181002',
'description': "Comparing WRF-CLUBB output to WRF-LASSO output.",
'start_time': 301, 'end_time': 600,
'height_min_value': 0, 'height_max_value': 4000,
'blacklisted_vars': [],
'e3sm_file': None,
'cam_file': None,
'sam_file': None,
'wrf_benchmark_file': {'lasso_benchmark':
WRF_LASSO_BENCHMARK_OUTPUT_ROOT + "/2018-10-02/wrf_lasso_stats_2018-10-02.nc"},
'sam_benchmark_file': None,
'coamps_benchmark_file': None,
'clubb_r408_benchmark_file': None,
'clubb_hoc_benchmark_file': None,
'clubb_file': None,
'wrf_file': {'zm': clubb_output_root + '/lasso_2018-10-02_zm_wrf.nc',
'zt': clubb_output_root + '/lasso_2018-10-02_zt_wrf.nc',
'sfc': clubb_output_root + '/lasso_2018-10-02_sfc_wrf.nc',
'subcolumns': clubb_output_root + '/lasso_2018-10-02_nl_lh_sample_points_2D.nc'},
'var_groups': [VariableGroupBase, VariableGroupWs]}
# DO NOT EDIT THIS LIST UNLESS YOU ARE ADDING A NEW CASE. NEVER REMOVE CASES FROM THIS LIST.
# You may define a subset of cases at the end of this file.
ALL_CASES = [ARM, ARM_97, ASTEX_A209, ATEX,
BOMEX,
CGILS_S6, CGILS_S11, CGILS_S12, CLEX9_NOV02, CLEX9_OCT14,
DYCOMS2_RF01, DYCOMS2_RF01_FIXED_SST, DYCOMS2_RF02_DO,
DYCOMS2_RF02_DS, DYCOMS2_RF02_DS_RESTART,
DYCOMS2_RF02_ND, DYCOMS2_RF02_SO,
FIRE,
GABLS2, GABLS2_NIGHTLY, GABLS3, GABLS3_NIGHT, GATE_SHEAR_RLSF,
# IOP,
JUN25_ALTOCU,
LBA,
MC3E, MPACE_A, MPACE_B, MPACE_B_SILHS,
NEUTRAL, NOV11_ALTOCU,
RICO, RICO_SILHS,
TWP_ICE,
WANGARA,
LASSO_20170627, LASSO_20170717, LASSO_20170728, LASSO_20170923,
LASSO_20180911, LASSO_20180917, LASSO_20180918, LASSO_20181002
]
CASES_TO_PLOT = ALL_CASES
# If uncommented, this line will override the real CASES_TO_PLOT given above, forcing pyplotgen to only plot some cases.
# CASES_TO_PLOT = [ARM]
# CASES_TO_PLOT = CASES_TO_PLOT[:3]
| 55.181109 | 135 | 0.56254 |
a739e22b895dd7f5b68d4cbbe585f6f9e1e16131 | 305 | py | Python | docker_sdk_api/domain/services/contracts/abstract_dataset_validator_service.py | BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI | 902f35a7e367e635898f687b16a830db892fbaa5 | [
"Apache-2.0"
] | 20 | 2021-07-13T13:08:57.000Z | 2022-03-29T09:38:00.000Z | docker_sdk_api/domain/services/contracts/abstract_dataset_validator_service.py | BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI | 902f35a7e367e635898f687b16a830db892fbaa5 | [
"Apache-2.0"
] | null | null | null | docker_sdk_api/domain/services/contracts/abstract_dataset_validator_service.py | BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI | 902f35a7e367e635898f687b16a830db892fbaa5 | [
"Apache-2.0"
] | 2 | 2021-07-12T08:42:53.000Z | 2022-03-04T18:41:25.000Z | from abc import ABC, ABCMeta, abstractmethod
from domain.models.datase_information import DatasetInformation
| 27.727273 | 99 | 0.816393 |
a739f43b0588186a90f5d8f8245209820d58a6a6 | 1,683 | py | Python | setup.py | eltonn/toki | 22efd9ce84414380904e3a5ac84e84de9bdb5bce | [
"Apache-2.0"
] | 1 | 2020-11-30T16:52:50.000Z | 2020-11-30T16:52:50.000Z | setup.py | eltonn/toki | 22efd9ce84414380904e3a5ac84e84de9bdb5bce | [
"Apache-2.0"
] | 7 | 2020-05-29T23:22:21.000Z | 2020-11-30T20:49:37.000Z | setup.py | eltonn/toki | 22efd9ce84414380904e3a5ac84e84de9bdb5bce | [
"Apache-2.0"
] | 1 | 2020-04-29T21:59:25.000Z | 2020-04-29T21:59:25.000Z | """The setup script."""
from setuptools import find_packages, setup
with open('README.md') as readme_file:
readme = readme_file.read()
with open('docs/release-notes.md') as history_file:
history = history_file.read()
requirements = []
dev_requirements = [
# lint and tools
'black',
'flake8',
'isort',
'mypy',
'pre-commit',
'seed-isort-config',
# publishing
're-ver',
'twine',
# docs
'jupyter-book',
'Sphinx>=2.0,<3',
# tests
'responses',
# devops
'docker-compose',
]
extra_requires = {'dev': requirements + dev_requirements}
setup(
author="Ivan Ogasawara",
author_email='ivan.ogasawara@gmail.com',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: Apache Software License',
'Natural Language :: English',
"Programming Language :: Python :: 2",
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
description="Toki: Database Expression API",
install_requires=requirements,
license="Apache Software License 2.0",
long_description=readme + '\n\n' + history,
include_package_data=True,
keywords='toki',
name='toki',
packages=find_packages(include=['toki']),
test_suite='tests',
extras_require=extra_requires,
url='https://github.com/toki-project/toki',
version='0.0.1',
zip_safe=False,
)
| 26.296875 | 61 | 0.616756 |
a73aed88b329c068d8782d3c38cdfcf8ff4be7a3 | 3,109 | py | Python | dq0/sdk/estimators/data_handler/csv.py | gradientzero/dq0-sdk | 90856dd5ac56216971ffe33004447fd037a21660 | [
"0BSD"
] | 2 | 2020-09-16T09:28:00.000Z | 2021-03-18T21:26:29.000Z | dq0/sdk/estimators/data_handler/csv.py | gradientzero/dq0-sdk | 90856dd5ac56216971ffe33004447fd037a21660 | [
"0BSD"
] | 22 | 2020-04-15T10:19:33.000Z | 2022-03-12T00:20:57.000Z | dq0/sdk/estimators/data_handler/csv.py | gradientzero/dq0-sdk | 90856dd5ac56216971ffe33004447fd037a21660 | [
"0BSD"
] | null | null | null | # -*- coding: utf-8 -*-
"""Base data handler.
Copyright 2021, Gradient Zero
All rights reserved
"""
import logging
import dq0.sdk
from dq0.sdk.estimators.data_handler.base import BasicDataHandler
import pandas as pd
from sklearn.model_selection import train_test_split
logger = logging.getLogger(__name__)
| 42.013514 | 184 | 0.690254 |
595209a149b488a190b55a28e227e0653341e30a | 407 | py | Python | core/utils/template_updater.py | blockomat2100/vulnman | 835ff3aae1168d8e2fa5556279bc86efd2e46472 | [
"MIT"
] | 3 | 2021-12-22T07:02:24.000Z | 2022-01-27T20:19:11.000Z | core/utils/template_updater.py | vulnman/vulnman | d48ee022bc0e4368060a990a527b1c7a5e437504 | [
"MIT"
] | 44 | 2021-12-14T07:24:29.000Z | 2022-03-23T07:01:16.000Z | core/utils/template_updater.py | blockomat2100/vulnman | 835ff3aae1168d8e2fa5556279bc86efd2e46472 | [
"MIT"
] | 1 | 2022-01-21T16:29:56.000Z | 2022-01-21T16:29:56.000Z | import os
from django.conf import settings
from git import Repo
| 27.133333 | 75 | 0.712531 |
5952c5d9520173eb54626c3cf8e791dbdc5d7f03 | 656 | py | Python | pages/basket_page.py | Espad/stepik_autotests_final_tasks | 2d9e3408766cc00387a8ddd656006556cce567b4 | [
"MIT"
] | null | null | null | pages/basket_page.py | Espad/stepik_autotests_final_tasks | 2d9e3408766cc00387a8ddd656006556cce567b4 | [
"MIT"
] | null | null | null | pages/basket_page.py | Espad/stepik_autotests_final_tasks | 2d9e3408766cc00387a8ddd656006556cce567b4 | [
"MIT"
] | null | null | null | from .base_page import BasePage
from .locators import BasketPageLocators
| 41 | 135 | 0.745427 |
5955db7626231d3711353993b2796474b288c67c | 169 | py | Python | tests/collaboration/factories.py | cad106uk/market-access-api | a357c33bbec93408b193e598a5628634126e9e99 | [
"MIT"
] | null | null | null | tests/collaboration/factories.py | cad106uk/market-access-api | a357c33bbec93408b193e598a5628634126e9e99 | [
"MIT"
] | 51 | 2018-05-31T12:16:31.000Z | 2022-03-08T09:36:48.000Z | tests/collaboration/factories.py | cad106uk/market-access-api | a357c33bbec93408b193e598a5628634126e9e99 | [
"MIT"
] | 2 | 2019-12-24T09:47:42.000Z | 2021-02-09T09:36:51.000Z | import factory
from api.collaboration.models import TeamMember
| 18.777778 | 59 | 0.781065 |
595945cb1c25f789695dd2fae8ba200ee3b77c80 | 1,454 | py | Python | trypython/extlib/aiohttp/aiohttp01.py | devlights/try-python-extlib | 9bfb649d3f5b249b67991a30865201be794e29a9 | [
"MIT"
] | null | null | null | trypython/extlib/aiohttp/aiohttp01.py | devlights/try-python-extlib | 9bfb649d3f5b249b67991a30865201be794e29a9 | [
"MIT"
] | null | null | null | trypython/extlib/aiohttp/aiohttp01.py | devlights/try-python-extlib | 9bfb649d3f5b249b67991a30865201be794e29a9 | [
"MIT"
] | null | null | null | """
aiohttp
REFERENCES:: http://bit.ly/2O2lmeU
http://bit.ly/2O08oy3
"""
import asyncio
from asyncio import Future
from typing import List, Dict
import aiohttp
from trypython.common.commoncls import SampleBase
| 25.068966 | 97 | 0.592847 |
595abb6fdb13a008e2f80cf057085a05a97b14a8 | 1,860 | py | Python | models.py | camerongray1515/HackDee-2015 | 6459c5bd3ad895e0a216ff61342eb73877dc9ee5 | [
"MIT"
] | null | null | null | models.py | camerongray1515/HackDee-2015 | 6459c5bd3ad895e0a216ff61342eb73877dc9ee5 | [
"MIT"
] | 1 | 2015-04-04T20:55:52.000Z | 2015-12-17T23:35:08.000Z | models.py | camerongray1515/HackDee-2015 | 6459c5bd3ad895e0a216ff61342eb73877dc9ee5 | [
"MIT"
] | null | null | null | from sqlalchemy import Column, String, Boolean, ForeignKey, Integer
from sqlalchemy.orm import relationship
from database import Base
from string import ascii_letters
from random import choice
| 29.0625 | 89 | 0.614516 |
595b940d98d4c9ba62ad1e7789fd5ad05f9b32ef | 3,270 | py | Python | Python3/726.py | rakhi2001/ecom7 | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 854 | 2018-11-09T08:06:16.000Z | 2022-03-31T06:05:53.000Z | Python3/726.py | rakhi2001/ecom7 | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 29 | 2019-06-02T05:02:25.000Z | 2021-11-15T04:09:37.000Z | Python3/726.py | rakhi2001/ecom7 | 73790d44605fbd51e8f7e804b9808e364fcfc680 | [
"MIT"
] | 347 | 2018-12-23T01:57:37.000Z | 2022-03-12T14:51:21.000Z | __________________________________________________________________________________________________
sample 24 ms submission
__________________________________________________________________________________________________
sample 13188 kb submission
__________________________________________________________________________________________________
| 30 | 98 | 0.45107 |
595ecf0b3419dbc932591ff7beb5487e3db35f47 | 932 | py | Python | script/rmLinebyIndFile.py | ASLeonard/danbing-tk | 15540124ff408777d0665ace73698b0c2847d1cc | [
"BSD-3-Clause"
] | 17 | 2020-08-16T14:28:11.000Z | 2022-03-23T23:30:47.000Z | script/rmLinebyIndFile.py | ASLeonard/danbing-tk | 15540124ff408777d0665ace73698b0c2847d1cc | [
"BSD-3-Clause"
] | 7 | 2021-01-25T15:26:18.000Z | 2022-03-31T14:30:46.000Z | script/rmLinebyIndFile.py | ASLeonard/danbing-tk | 15540124ff408777d0665ace73698b0c2847d1cc | [
"BSD-3-Clause"
] | 2 | 2020-11-01T20:41:38.000Z | 2021-05-29T03:22:24.000Z | #!/usr/bin/env python3
import sys
import numpy as np
if len(sys.argv) == 1 or sys.argv[1] == "-h" or sys.argv[1] == "--help":
print(
"""
Remove line indices (0-based) specified in 'index.txt'
usage: program [-k] index.txt inFile
-k Keep line indices in 'index.txt' instead of removing them.
""")
sys.exit()
rm = True
idxf = ""
infile = ""
for i, v in enumerate(sys.argv):
if i == 0:
continue
elif v == "-k":
rm = False
elif not idxf:
idxf = v
elif not infile:
infile = v
else:
assert False, f"too many arguments {v}"
if not idxf:
assert False, "index.txt not specified"
if not infile:
assert False, "inFile not specified"
ids = set(np.loadtxt(idxf, dtype=int, ndmin=1).tolist())
with open(infile) as f:
ind = 0
for line in f:
if (ind not in ids) == rm:
print(line, end='')
ind += 1
| 22.731707 | 78 | 0.55794 |
595f827df47c5f2bdd1ecfb6bc095d61ca198a03 | 538 | py | Python | dynaban/tests/postion.py | laukik-hase/imitation_of_human_arm_on_robotic_manipulator | 995beb1ab41597ca6cbecd0baecdef1ef13450f9 | [
"MIT"
] | 3 | 2021-11-13T16:54:31.000Z | 2021-11-13T20:50:18.000Z | dynaban/tests/postion.py | laukik-hase/human_arm_imitation | 995beb1ab41597ca6cbecd0baecdef1ef13450f9 | [
"MIT"
] | null | null | null | dynaban/tests/postion.py | laukik-hase/human_arm_imitation | 995beb1ab41597ca6cbecd0baecdef1ef13450f9 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import arm_control_utils
DURATION = 30000
TRAJ_POLY1 = [1000, 100, 100]
TORQUE_POLY1 = [1000, 100, 100]
MODE = 3
arm_control_utils.initialize_motors()
arm_control_utils.enable_state_torque()
arm_control_utils.set_debug(1, 0)
print("Ready to move")
arm_control_utils.set_position_trajectory(1, DURATION, TRAJ_POLY1, TORQUE_POLY1)
arm_control_utils.set_mode(1, MODE)
arm_control_utils.disable_state_torque()
arm_control_utils.stop_motors() | 28.315789 | 80 | 0.702602 |
595fa12df823f48a76595c65b488cfd3266708e8 | 5,758 | py | Python | google-datacatalog-connectors-commons/tests/google/datacatalog_connectors/commons/prepare/base_entry_factory_test.py | mesmacosta/datacatalog-connectors | 74a4b6272cb00f2831b669d1a41133913f3df3fa | [
"Apache-2.0"
] | 53 | 2020-04-27T21:50:47.000Z | 2022-02-18T22:08:49.000Z | google-datacatalog-connectors-commons/tests/google/datacatalog_connectors/commons/prepare/base_entry_factory_test.py | mesmacosta/datacatalog-connectors | 74a4b6272cb00f2831b669d1a41133913f3df3fa | [
"Apache-2.0"
] | 20 | 2020-05-26T13:51:45.000Z | 2022-01-25T00:06:19.000Z | google-datacatalog-connectors-commons/tests/google/datacatalog_connectors/commons/prepare/base_entry_factory_test.py | mesmacosta/datacatalog-connectors | 74a4b6272cb00f2831b669d1a41133913f3df3fa | [
"Apache-2.0"
] | 12 | 2020-04-30T22:14:02.000Z | 2021-10-09T03:44:39.000Z | #!/usr/bin/python
# coding=utf-8
#
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import mock
from google.datacatalog_connectors.commons import prepare
| 42.029197 | 93 | 0.70719 |
5960088035b5df4aefdc1abf2b6dd9894a0c53be | 5,978 | py | Python | estimators.py | RakitinDen/pytorch-recursive-gumbel-max-trick | 44f9854020e727946a074a6e53b20dd593f96cc1 | [
"Apache-2.0"
] | 20 | 2021-12-03T13:20:17.000Z | 2022-03-20T18:58:06.000Z | estimators.py | RakitinDen/pytorch-recursive-gumbel-max-trick | 44f9854020e727946a074a6e53b20dd593f96cc1 | [
"Apache-2.0"
] | null | null | null | estimators.py | RakitinDen/pytorch-recursive-gumbel-max-trick | 44f9854020e727946a074a6e53b20dd593f96cc1 | [
"Apache-2.0"
] | null | null | null | # Estimators are partially based on the "estimators.py" from the following repositories:
# https://github.com/agadetsky/pytorch-pl-variance-reduction
# https://github.com/sdrobert/pydrobert-pytorch
import torch
def uniform_to_exp(logits, uniform=None, enable_grad=False):
'''
Converts a tensor of independent uniform samples into a tensor of independent exponential samples
Tensor 'logits' contains log-means of the exponential distributions
Parameters of the exponentials can be represented as
lambda = exp(-logit), since expected value is equal to 1/lambda
'''
if uniform is not None:
assert uniform.size() == logits.size()
else:
uniform = torch.distributions.utils.clamp_probs(torch.rand_like(logits))
exp = torch.exp(logits + torch.log(-torch.log(uniform)))
if enable_grad:
exp.requires_grad_(True)
return exp
def reattach_exp_to_new_logits(logits, exp):
'''
Creates a new tensor of exponential variables that depends on logits in the same way
as if it was obtained by transforming uniform samples via 'uniform_to_exp'
Used in 'relax' to obtain gradient for the detached version of the logits
'''
exp = torch.exp(torch.log(exp.detach()) + logits - logits.detach())
return exp
def E_reinforce(loss_value, logits, exp, plus_samples=1, mask_unused_values=None, **kwargs):
'''
Returns the REINFORCE [williams1992] gradient estimate with respect to the exponential score
grad = loss(X) * (d / d logits) log p(E ; logits)
If plus_samples > 1, the estimate is E-REINFORCE+ / E-REINFORCE with LOO baseline [kool2019buy, richter2020vargrad]
'''
batch_size = logits.shape[0] // plus_samples
loss_value = loss_value.detach()
exp = exp.detach()
log_prob = -logits - torch.exp(torch.log(exp) - logits)
if mask_unused_values is not None:
log_prob = mask_unused_values(log_prob, **kwargs)
dims_except_batch = tuple(-i for i in range(1, logits.ndimension()))
log_prob = log_prob.sum(dim=dims_except_batch)
score = torch.autograd.grad([log_prob], [logits], grad_outputs=torch.ones_like(log_prob))[0]
if plus_samples > 1:
score_shape = (batch_size, plus_samples) + logits.shape[1:]
score = score.view(score_shape)
loss_value = loss_value.view(batch_size, plus_samples)
loss_value = loss_value - loss_value.mean(dim=-1)[:, None]
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = (loss_value * score).sum(dim=1) / (plus_samples - 1)
else:
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = loss_value * score
return grad
def T_reinforce(loss_value, struct_var, logits, f_log_prob, plus_samples=1, **kwargs):
'''
Returns the REINFORCE [williams1992] gradient estimate with respect to the score function of the execution trace
grad = loss(X) * (d / d logits) log p(T ; logits)
If plus_samples > 1, the estimate is T-REINFORCE+ / T-REINFORCE with LOO baseline [kool2019buy, richter2020vargrad]
'''
batch_size = logits.shape[0] // plus_samples
loss_value = loss_value.detach()
struct_var = struct_var.detach()
log_prob = f_log_prob(struct_var, logits, **kwargs)
score = torch.autograd.grad([log_prob], [logits], grad_outputs=torch.ones_like(log_prob))[0]
if plus_samples > 1:
score_shape = (batch_size, plus_samples) + logits.shape[1:]
score = score.view(score_shape)
loss_value = loss_value.view(batch_size, plus_samples)
loss_value = loss_value - loss_value.mean(dim=-1)[:, None]
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = (loss_value * score).sum(dim=1) / (plus_samples - 1)
else:
for i in range(logits.ndimension() - 1):
loss_value = loss_value.unsqueeze(-1)
grad = loss_value * score
return grad
def relax(loss_value, struct_var, logits, exp, critic, f_log_prob, f_cond, uniform=None, **kwargs):
'''
Returns the RELAX [grathwohl2017backpropagation] gradient estimate
grad = (loss(X(T)) - c(e_2)) * (d / d logits) log p(T ; logits) - (d / d logits) c(e_2) + (d / d logits) c(e_1)
e_1 ~ p(E ; logits) - exponential sample
T = T(e_1) - execution trace of the algorithm
X = X(T) - structured variable, obtained as the output of the algorithm
e_2 ~ p(E | T ; logits) - conditional exponential sample
c(.) - critic (typically, a neural network)
e_1 and e_2 are sampled using the reparameterization trick
(d / d logits) c(e_1) and (d / d logits) c(e_2) are the reparameterization gradients
In code, exp := e_1, cond_exp := e_2
'''
loss_value = loss_value.detach()
struct_var = struct_var.detach()
logits = logits.detach().requires_grad_(True)
exp = reattach_exp_to_new_logits(logits, exp)
cond_exp = f_cond(struct_var, logits, uniform, **kwargs)
baseline_exp = critic(exp)
baseline_cond = critic(cond_exp).squeeze()
diff = loss_value - baseline_cond
log_prob = f_log_prob(struct_var, logits, **kwargs)
score, = torch.autograd.grad(
[log_prob],
[logits],
grad_outputs = torch.ones_like(log_prob)
)
d_baseline_exp, = torch.autograd.grad(
[baseline_exp],
[logits],
create_graph=True,
retain_graph=True,
grad_outputs=torch.ones_like(baseline_exp)
)
d_baseline_cond, = torch.autograd.grad(
[baseline_cond],
[logits],
create_graph=True,
retain_graph=True,
grad_outputs=torch.ones_like(baseline_cond)
)
for i in range(logits.ndimension() - 1):
diff = diff.unsqueeze(-1)
grad = diff * score + d_baseline_exp - d_baseline_cond
assert grad.size() == logits.size()
return grad
| 36.674847 | 119 | 0.666109 |
596098c174bcd92a072f4a63dcf655eaaf7c83e8 | 1,332 | py | Python | squareroot.py | martinaobrien/pands-problem-sets | 5928f9ed2a743f46a9615f41192fd6dfb810b73c | [
"CNRI-Python"
] | null | null | null | squareroot.py | martinaobrien/pands-problem-sets | 5928f9ed2a743f46a9615f41192fd6dfb810b73c | [
"CNRI-Python"
] | null | null | null | squareroot.py | martinaobrien/pands-problem-sets | 5928f9ed2a743f46a9615f41192fd6dfb810b73c | [
"CNRI-Python"
] | null | null | null | #Martina O'Brien 10/3/2019
#Problem Set 7 - squareroots
#Programming Code to determining the squareroots of positive floating point numbers
## Reference for try and expect https://www.w3schools.com/python/python_try_except.asp
while True: # this loop will run to allow the user to input a value again if they do not enter a positive integer
try:
num = input("Please enter a positive number: ") # Here the user will enter positive number.
number = float(num) # using a float(num) to allow numbers with decimal points
except ValueError:
print('Sorry this is not a number. Can you please try again and enter a positive number.')
# If the value is entered is correct then the value will move to the next statement.
continue #continue to the next interation of the loop
if number <= 0:
print('Please enter a number greater than zero')
# to ensure that the user inputs a positive number
break
# break from the while loop to the next variable
number_sqrt = (number ** 0.5)
# Using ** 0.5 gives the squareroot of the num inputted
# Using %0.1f returns the answers to one decimal point
print("The square root of %0.1f is approx %0.1f" %(number, number_sqrt))
# print the result of the variable to one decimal place.
| 45.931034 | 114 | 0.693694 |
596187b54ca231442ef296c49a1a09d46c903d01 | 2,843 | py | Python | tests/org_group_tests.py | JonLMyers/MetroTransitAPI | d8f467570368cd563d69564b680cfdd47ad6b622 | [
"MIT"
] | null | null | null | tests/org_group_tests.py | JonLMyers/MetroTransitAPI | d8f467570368cd563d69564b680cfdd47ad6b622 | [
"MIT"
] | null | null | null | tests/org_group_tests.py | JonLMyers/MetroTransitAPI | d8f467570368cd563d69564b680cfdd47ad6b622 | [
"MIT"
] | null | null | null | import requests
import json
token = ''
email_token = ''
print("######## Pass ########")
target = 'http://127.0.0.1:5000/login'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}
data = {'username': 'jon@aaxus.com', 'password': 'password125'}
r = requests.post(target, data=json.dumps(data), headers=headers)
print(r.status_code, r.reason)
print(r.text)
data = json.loads(r.text)
token = data['access_token']
print(token)
print("######## Pass ########")
target = 'http://127.0.0.1:5000/group/manage'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'authorization': 'Bearer ' + token}
data = {
'name': 'Dev Ops',
'description': 'Devops',
'org_name': 'Aaxus'
}
r = requests.post(target, data=json.dumps(data), headers=headers)
print(r.status_code, r.reason)
print(r.text)
print("######## Pass ########")
target = 'http://127.0.0.1:5000/group/manage'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'authorization': 'Bearer ' + token}
data = {
'org_name': 'Aaxus',
'id': 'Dev Ops',
'description': 'Developer Operations Organization',
'member_username': ['spiro@aaxus.com', 'anthony@aaxus.com', 'ben@aaxus.com'],
'admin_username': ['spiro@aaxus.com', 'anthony@aaxus.com']
}
r = requests.put(target, data=json.dumps(data), headers=headers)
print(r.status_code, r.reason)
print(r.text)
print("######## Pass ########")
target = 'http://127.0.0.1:5000/group/manage'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'authorization': 'Bearer ' + token}
data = {
'org_name': 'Aaxus',
'id': 'Dev Ops',
'remove_admin': ['spiro@aaxus.com'],
'remove_member': ['ben@aaxus.com']
}
r = requests.put(target, data=json.dumps(data), headers=headers)
print(r.status_code, r.reason)
print(r.text)
print("######## Pass ########")
target = 'http://127.0.0.1:5000/group/manage'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'authorization': 'Bearer ' + token}
data = {
'name': 'Executives',
'description': 'Devops',
'org_name': 'Aaxus'
}
r = requests.post(target, data=json.dumps(data), headers=headers)
print(r.status_code, r.reason)
print(r.text)
print("######## Pass ########")
target = 'http://127.0.0.1:5000/group/view'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'authorization': 'Bearer ' + token}
data = {
'org_name': 'Aaxus',
'id': 'Dev Ops',
}
r = requests.post(target, data=json.dumps(data), headers=headers)
print(r.status_code, r.reason)
print(r.text)
print("######## Pass ########")
target = 'http://127.0.0.1:5000/group/view?org_name=Aaxus'
headers = {'Content-type': 'application/json', 'Accept': 'text/plain', 'authorization': 'Bearer ' + token}
r = requests.get(target, headers=headers)
print(r.status_code, r.reason)
print(r.text) | 33.05814 | 106 | 0.638762 |
5961e885fedcd68b3653416c363d4e461726bdc8 | 5,578 | py | Python | pywbemtools/pywbemlistener/_context_obj.py | pywbem/pywbemtools | 6b7c3f124324fd3ab7cffb82bc98c8f9555317e4 | [
"Apache-2.0"
] | 8 | 2017-04-01T13:55:00.000Z | 2022-03-15T18:28:47.000Z | pywbemtools/pywbemlistener/_context_obj.py | pywbem/pywbemtools | 6b7c3f124324fd3ab7cffb82bc98c8f9555317e4 | [
"Apache-2.0"
] | 918 | 2017-03-03T14:29:03.000Z | 2022-03-29T15:32:16.000Z | pywbemtools/pywbemlistener/_context_obj.py | pywbem/pywbemtools | 6b7c3f124324fd3ab7cffb82bc98c8f9555317e4 | [
"Apache-2.0"
] | 2 | 2020-01-17T15:56:46.000Z | 2020-02-12T18:49:30.000Z | # (C) Copyright 2021 Inova Development Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Click context object for the pybemlistener command.
"""
from __future__ import absolute_import, print_function, unicode_literals
import os
import click_spinner
def spinner_stop(self):
"""
Stop the spinner, if the spinner is enabled.
"""
if self.spinner_enabled:
self._spinner_obj.stop()
def execute_cmd(self, cmd):
"""
Call the command function for a command, after enabling the spinner
(except when in debug mode) and after entering debug mode if desired.
"""
if not self.pdb:
self.spinner_start()
try:
if self.pdb:
import pdb # pylint: disable=import-outside-toplevel
pdb.set_trace() # pylint: disable=forgotten-debug-statement
cmd() # The command function for the pywbemlistener command
finally:
if not self.pdb:
self.spinner_stop()
| 31.693182 | 80 | 0.620115 |
5962222919ba8cf295722ccc3d990ff5fdab4dcc | 1,704 | py | Python | ota_xml_api/util/xml_base.py | mihira/opentravel-xml-api | 24d1ea4d24cf2575de474becaa665f6fc0d1971d | [
"MIT"
] | 3 | 2016-01-14T01:12:06.000Z | 2021-04-16T04:00:47.000Z | ota_xml_api/util/xml_base.py | mihira/opentravel-xml-api | 24d1ea4d24cf2575de474becaa665f6fc0d1971d | [
"MIT"
] | null | null | null | ota_xml_api/util/xml_base.py | mihira/opentravel-xml-api | 24d1ea4d24cf2575de474becaa665f6fc0d1971d | [
"MIT"
] | 2 | 2017-09-04T13:02:09.000Z | 2018-06-09T11:10:03.000Z | #!/usr/bin/env python
"""
This module contains the base xml Node and Period classes
"""
from xml.dom.minidom import getDOMImplementation
from date import Period
from constants import START, END
| 27.483871 | 70 | 0.661385 |
59629f7a0c5633f940aafc1f0319ef57490ea9f2 | 9,441 | py | Python | phl_courts_scraper/court_summary/schema.py | PhiladelphiaController/phl-courts-scraper | 0c3c915a7fa355538c43a138fa7b104b8bf6ef1e | [
"MIT"
] | null | null | null | phl_courts_scraper/court_summary/schema.py | PhiladelphiaController/phl-courts-scraper | 0c3c915a7fa355538c43a138fa7b104b8bf6ef1e | [
"MIT"
] | 4 | 2020-12-09T18:25:53.000Z | 2021-03-19T22:30:18.000Z | phl_courts_scraper/court_summary/schema.py | PhiladelphiaController/phl-courts-scraper | 0c3c915a7fa355538c43a138fa7b104b8bf6ef1e | [
"MIT"
] | null | null | null | """Define the schema for the court summary report."""
import datetime
from dataclasses import dataclass, field, fields
from typing import Any, Iterator, List, Optional, Union
import desert
import marshmallow
import pandas as pd
from ..utils import DataclassSchema
__all__ = ["CourtSummary", "Docket", "Charge", "Sentence"]
| 26.594366 | 72 | 0.586696 |
5962e0c96855173baf9ead74168b62eef51ee37e | 216 | py | Python | Day_43/json_dump_python.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | Day_43/json_dump_python.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | Day_43/json_dump_python.py | kiranrraj/100Days_Of_Coding | ab75d83be9be87fb7bc83a3f3b72a4638dab22a1 | [
"MIT"
] | null | null | null | # Title : Json Module Module
# Author : Kiran Raj R.
# Date : 26/11/2020
python_json = {"name":"kiran", "email":"kiran@gmail.com", "isHappy": "Yes"}
import json
string_j = json.dumps(python_json)
print(string_j) | 24 | 75 | 0.680556 |
5963d226f34e95078375678dfe6099b78982408c | 573 | py | Python | userbot/modules/trd.py | LUCKYRAJPUTOP/VibeXUserbot | 257c86ff1775592688815435d8c5ce91e1dd299e | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/trd.py | LUCKYRAJPUTOP/VibeXUserbot | 257c86ff1775592688815435d8c5ce91e1dd299e | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | userbot/modules/trd.py | LUCKYRAJPUTOP/VibeXUserbot | 257c86ff1775592688815435d8c5ce91e1dd299e | [
"Naumen",
"Condor-1.1",
"MS-PL"
] | null | null | null | import asyncio
from asyncio import sleep
from random import choice
from userbot.events import register
T_R_D = [
"@PrajjuS",
"@Vin02vin",
"@Iamsaisharan",
"@venomsamurai",
]
| 22.92 | 74 | 0.602094 |
596512b76ad497342148f69daf0ea980f36bbf49 | 2,384 | py | Python | collectors/nct/collector.py | almeidaah/collectors | f03096855b8d702969d22af0b20a4d6a0d820bd0 | [
"MIT"
] | 17 | 2016-06-28T21:20:21.000Z | 2022-03-02T16:31:25.000Z | collectors/nct/collector.py | almeidaah/collectors | f03096855b8d702969d22af0b20a4d6a0d820bd0 | [
"MIT"
] | 41 | 2016-04-04T10:36:45.000Z | 2017-04-24T10:04:57.000Z | collectors/nct/collector.py | kenferrara/collectors | e6c1f45df3a1ffd5d60dada1816484812eb51417 | [
"MIT"
] | 25 | 2016-05-18T09:27:42.000Z | 2021-03-21T14:44:31.000Z | # -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import zipfile
import logging
import requests
import tempfile
import contextlib
from .parser import parse_record
from .. import base
logger = logging.getLogger(__name__)
# Module API
def collect(conf, conn, nct_xml_dump_url):
'''
Downloads and parse data from NCT's XML dump. Considering you want the data
from 2017-01-01 until 2017-02-01, the XML dump can be downloaded from:
https://clinicaltrials.gov/search?resultsxml=True&rcv_s=01/01/2017&rcv_e=01/02/2017
'''
base.helpers.start(conf, 'nct', {'url': nct_xml_dump_url})
with tempfile.TemporaryFile() as fp:
_download_to_file(nct_xml_dump_url, fp)
file_count = 0
for identifier, record_fp in _iter_nct_dump_files(fp):
base.config.SENTRY.extra_context({
'url': nct_xml_dump_url,
'identifier': identifier,
})
rec = parse_record(record_fp)
query = {'nct_id': rec['nct_id']}
if rec.table in conn['warehouse'].tables:
existing = conn['warehouse'][rec.table].find_one(**query)
if existing:
rec['nct_id'] = existing['nct_id']
rec.write(conf, conn)
file_count += 1
logger.info('Collected %s NCT records', file_count)
base.helpers.stop(conf, 'nct', {
'url': nct_xml_dump_url,
'collected': file_count,
})
| 32.657534 | 87 | 0.633389 |
5968638622036a0684e095d3de7062e4e3ce8115 | 292 | py | Python | bigcode-fetcher/tests/fixtures/__init__.py | sourcery-ai-bot/bigcode-tools | 87aaa609998017d0312b7f4f102d41cc2942fa9d | [
"MIT"
] | 6 | 2017-10-15T08:21:27.000Z | 2018-05-17T12:57:41.000Z | bigcode-fetcher/tests/fixtures/__init__.py | bdqnghi/bigcode-tools | 94ce416fbb40b9b25d49bf88284bf7ccb6132bd3 | [
"MIT"
] | 2 | 2017-12-17T19:02:06.000Z | 2018-03-01T04:00:26.000Z | bigcode-fetcher/tests/fixtures/__init__.py | bdqnghi/bigcode-tools | 94ce416fbb40b9b25d49bf88284bf7ccb6132bd3 | [
"MIT"
] | 2 | 2017-10-18T08:17:54.000Z | 2018-06-28T09:57:36.000Z | from os import path
import json
from bigcode_fetcher.project import Project
FIXTURES_DIR = path.dirname(__file__)
PROJECTS_PATH = path.join(FIXTURES_DIR, "projects.json")
with open(PROJECTS_PATH, "r") as f:
JSON_PROJECTS = json.load(f)
PROJECTS = [Project(p) for p in JSON_PROJECTS]
| 20.857143 | 56 | 0.763699 |
59692f082625d38c4980a6276af160523062869b | 1,465 | py | Python | examples/timeflies/timeflies_qt.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2018-11-16T09:07:13.000Z | 2018-11-16T09:07:13.000Z | examples/timeflies/timeflies_qt.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | examples/timeflies/timeflies_qt.py | yutiansut/RxPY | c3bbba77f9ebd7706c949141725e220096deabd4 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-11-04T11:13:49.000Z | 2021-11-04T11:13:49.000Z | from rx.subjects import Subject
from rx.concurrency import QtScheduler
import sys
try:
from PyQt4 import QtCore
from PyQt4.QtGui import QWidget, QLabel
from PyQt4.QtGui import QApplication
except ImportError:
try:
from PyQt5 import QtCore
from PyQt5.QtWidgets import QApplication, QWidget, QLabel
except ImportError:
from PySide import QtCore
from PySide.QtGui import QWidget, QLabel
from PySide.QtGui import QApplication
if __name__ == '__main__':
main()
| 24.416667 | 77 | 0.647099 |
5969ba0b61715dcc3c0755544d810b16a9ba7f4b | 6,116 | py | Python | src/contexts/context_local_structure.py | aindrila-ghosh/SmartReduce | b2b28055bc0b269155270c1f8206445e405e8d9b | [
"MIT"
] | null | null | null | src/contexts/context_local_structure.py | aindrila-ghosh/SmartReduce | b2b28055bc0b269155270c1f8206445e405e8d9b | [
"MIT"
] | null | null | null | src/contexts/context_local_structure.py | aindrila-ghosh/SmartReduce | b2b28055bc0b269155270c1f8206445e405e8d9b | [
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
from sklearn.manifold import Isomap
from scipy.spatial.distance import pdist
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import cross_val_score, LeaveOneOut
RANDOM_STATE = 42
def calculate_pairwise_distances(df_for_Box_Plot_features, points, distance='euclidean'):
"""
Computes Pairwise euclidean distances
Parameters
----------
df_for_Box_Plot_features : list
original features
points : nD array
embedding
distance: String
distance, default value is "euclidean"
Returns
----------
distance_original : nD array
euclidean distances in the original dataset
distance_embeddings : nD array
euclidean distances in the embedding
"""
distance_original = pdist(df_for_Box_Plot_features, metric=distance)
distance_embeddings = pdist(points, metric=distance)
return distance_original, distance_embeddings
def calculate_geodesic_distance(df_for_Box_Plot_features, points):
"""
Computes Pairwise geodesic distances
Parameters
----------
df_for_Box_Plot_features : list
original features
points : nD array
embedding
Returns
----------
geo_distance_original : nD array
geodesic distances in the original dataset
geo_distance_embeddings : nD array
geodesic distances in the embedding
"""
embedding = Isomap(n_components=2)
embedding.fit(df_for_Box_Plot_features)
unsquareform = lambda a: a[np.nonzero(np.triu(a, 1))] ## define a lambda to unsquare the distance matrix
geo_distance_original = unsquareform(embedding.dist_matrix_) ## get a condensed matrix of pairwise geodesic distance among points
embedding1 = Isomap(n_components=2)
embedding1.fit(points)
embedding1.dist_matrix_[embedding1.dist_matrix_ == 0] = -9999 ## turn all 0 distances to -9999
geo_distance_embeddings = unsquareform(embedding1.dist_matrix_) ## get a condensed matrix of pairwise geodesic distance among points
geo_distance_embeddings[geo_distance_embeddings == -9999] = 0 ## turn all -9999 distances back to 0
return geo_distance_original, geo_distance_embeddings
def generate_histograms(distance_original, distance_embeddings, no_of_bins):
"""
Generates histograms
Parameters
----------
distance_original : nD array
original distances
distance_embeddings : nD array
embedding distances
no_of_bins : integer
number of bins in the histogram
Returns
----------
bin_edges_original : list
bin edges
"""
countsOriginal, bin_edges_original = np.histogram(distance_original, bins = no_of_bins)
#print("Original Distance Binned Element Counts: ", countsOriginal)
countsEmbedding, bin_edges_embedding = np.histogram(distance_embeddings, bins = no_of_bins)
#print("Embedding Distance Binned Element Counts: ", countsEmbedding)
plt.figure()
plt.hist(distance_original, bins = no_of_bins)
plt.show()
plt.title("Pairwise distances in original data")
plt.hist(distance_embeddings, bins = no_of_bins)
plt.show()
plt.title("Pairwise distances in embeddings")
return bin_edges_original
def calculate_box_plot_details(distance_original, distance_embeddings, bin_edges_original):
"""
Computes the details of the Box-plots
"""
inds_original = np.digitize(distance_original, bins=bin_edges_original)
##print("number of bins = ", np.unique(inds_original))
for i in range(1,52):
globals()["array" + str(i)] = []
for j in range(0,len(inds_original)):
globals()["array" + str(inds_original[j])].append(distance_embeddings[j])
data_to_plot = [array1, array2, array3, array4, array5, array6, array7, array8, array9, array10,
array11, array12, array13, array14, array15, array16, array17, array18, array19, array20,
array21, array22, array23, array24, array25, array26, array27, array28, array29, array30,
array31, array32, array33, array34, array35, array36, array37, array38, array39, array40,
array41, array42, array43, array44, array45, array46, array47, array48, array49, array50, array51]
return data_to_plot
def generate_box_plots(data_to_plot):
"""
Generates Box-plots
"""
fig = plt.figure(1, figsize=(14, 10))
# Create an axes instance
ax = fig.add_subplot(111)
# Create the boxplot
bp = ax.boxplot(data_to_plot)
# Save the figure
fig.savefig('fig1.png', bbox_inches='tight')
## add patch_artist=True option to ax.boxplot()
## to get fill color
bp = ax.boxplot(data_to_plot, patch_artist=True)
## change outline color, fill color and linewidth of the boxes
for box in bp['boxes']:
# change outline color
box.set( color='#7570b3', linewidth=2)
# change fill color
box.set( facecolor = '#1b9e77' )
## change color and linewidth of the whiskers
for whisker in bp['whiskers']:
whisker.set(color='#7570b3', linewidth=2)
## change color and linewidth of the caps
for cap in bp['caps']:
cap.set(color='#7570b3', linewidth=2)
## change color and linewidth of the medians
for median in bp['medians']:
median.set(color='#b2df8a', linewidth=2)
## change the style of fliers and their fill
for flier in bp['fliers']:
flier.set(marker='o', color='#e7298a', alpha=0.5)
def gen_error_1_NN(embedding, labels):
"""
Computes 1-NN generalization error
Parameters
----------
embedding : nD array
embedding
labels : list
original labels
Returns
----------
gen_error : float
generalization error
"""
model = KNeighborsClassifier(n_neighbors=1)
loo = LeaveOneOut()
loo.get_n_splits(embedding)
scores = cross_val_score(model , X = embedding , y = labels, cv = loo)
gen_error = (1 - np.mean(scores))
return gen_error
| 28.985782 | 137 | 0.680347 |
596ab002529af664473cf2cc0c9a6d46e4922281 | 849 | py | Python | ADAMTR.py | akashsuper2000/codechef-archive | e0e4a7daf66812ab7aa3fe42132c3d067a72457b | [
"bzip2-1.0.6"
] | null | null | null | ADAMTR.py | akashsuper2000/codechef-archive | e0e4a7daf66812ab7aa3fe42132c3d067a72457b | [
"bzip2-1.0.6"
] | null | null | null | ADAMTR.py | akashsuper2000/codechef-archive | e0e4a7daf66812ab7aa3fe42132c3d067a72457b | [
"bzip2-1.0.6"
] | null | null | null |
for i in range(int(input())):
n = int(input())
p,q = [],[]
for j in range(n):
p.append([int(k) for k in input().split()])
for j in range(n):
q.append([int(k) for k in input().split()])
f = 0
for j in range(n):
for k in range(n):
if(p[j][k]!=q[j][k] and p[j][k]==q[k][j]):
swap(p,j,k,n)
elif(p[j][k]==q[j][k]):
continue
else:
f = 1
for j in range(n):
for k in range(n):
if(p[j][k]!=q[j][k]):
f = 1
break
if(f==1):
break
if(f==1):
print('No')
else:
print('Yes')
| 22.342105 | 54 | 0.366313 |
596bbf6cce06d70f6a325d7a5bf75a3e2280c89c | 1,110 | py | Python | hparams.py | TanUkkii007/vqvae | 6ac433490fd827174e5b925780d32bea14bfb097 | [
"MIT"
] | 2 | 2019-03-30T16:49:11.000Z | 2019-12-18T22:50:56.000Z | hparams.py | TanUkkii007/vqvae | 6ac433490fd827174e5b925780d32bea14bfb097 | [
"MIT"
] | null | null | null | hparams.py | TanUkkii007/vqvae | 6ac433490fd827174e5b925780d32bea14bfb097 | [
"MIT"
] | 1 | 2020-01-06T12:37:00.000Z | 2020-01-06T12:37:00.000Z | import tensorflow as tf
default_params = tf.contrib.training.HParams(
# Encoder
encoder_num_hiddens=128,
encoder_num_residual_hiddens=32,
encoder_num_residual_layers=2,
# Decoder
decoder_num_hiddens=128,
decoder_num_residual_hiddens=32,
decoder_num_residual_layers=2,
embedding_dim=64,
num_embeddings=512,
commitment_cost=0.25,
# VectorQuantizer
vector_quantizer="VectorQuantizer",
sampling_count=10,
# Training
batch_size=32,
learning_rate=3e-4,
save_summary_steps=100,
save_checkpoints_steps=500,
keep_checkpoint_max=200,
keep_checkpoint_every_n_hours=1,
log_step_count_steps=1,
shuffle_buffer_size=4,
# Validation
num_evaluation_steps=32,
eval_start_delay_secs=3600, # 1h: disable time based evaluation
eval_throttle_secs=86400, # 24h: disable time based evaluation
# Misc
logfile="log.txt",
)
| 23.617021 | 71 | 0.711712 |
596db7d21a1d0b9384a4b3ba2a66f7f8e7dbfeba | 1,080 | py | Python | coroutines.py | PraveenMathew92/python-chatroom-asyncio | 8b3048f17b76e649aff6bcbb7d084362cab32b58 | [
"MIT"
] | null | null | null | coroutines.py | PraveenMathew92/python-chatroom-asyncio | 8b3048f17b76e649aff6bcbb7d084362cab32b58 | [
"MIT"
] | null | null | null | coroutines.py | PraveenMathew92/python-chatroom-asyncio | 8b3048f17b76e649aff6bcbb7d084362cab32b58 | [
"MIT"
] | null | null | null | """
File to demonstrate the coroutines api in python
"""
import asyncio
"""
asyncio.run takes a coroutine and
A RuntimeWarning is generated if the coroutine is not awaited
Eg: coroutine('without_run')
"""
asyncio.run(coroutine('coroutine_call'))
"""
create_task creates a task which runs a coroutine in the event loop
"""
asyncio.run(task_runner())
print("""
\t\t\tRunning with gather task
""")
asyncio.run(gather_runner())
"""
OUTPUT:
entering $coroutine_call
exited coroutine_call
entering $task_call
exited task_call
Running with gather task
entering $gather
entering $task_call
exited gather
exited task_call
""" | 16.363636 | 76 | 0.694444 |
5970d34126fb063a7fca4ff450fce1eed6c84c32 | 494 | py | Python | projects/tornado_projects/tord/tord/urls.py | bigfoolliu/liu_aistuff | aa661d37c05c257ee293285dd0868fb7e8227628 | [
"MIT"
] | 1 | 2019-11-25T07:23:42.000Z | 2019-11-25T07:23:42.000Z | projects/tornado_projects/tord/tord/urls.py | bigfoolliu/liu_aistuff | aa661d37c05c257ee293285dd0868fb7e8227628 | [
"MIT"
] | 13 | 2020-01-07T16:09:47.000Z | 2022-03-02T12:51:44.000Z | projects/tornado_projects/tord/tord/urls.py | bigfoolliu/liu_aistuff | aa661d37c05c257ee293285dd0868fb7e8227628 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding:utf-8 -*-
# author: bigfoolliu
from tord.handlers import (block_test, gocron, index, media, upload)
url_patterns = [
(r"/", index.IndexHandler),
(r"/books", upload.BooksHandler),
(r"/images", media.ImageHandler),
(r"/videos", media.VideoHandler),
# (r"/async/test", async_test.Handler),
(r"/block/test", block_test.BlockHandler),
# (r"/async/(?P<url>/.*)", async_demo.Handler), # FIXME:
(r"/test", gocron.TestHandler),
]
| 26 | 68 | 0.629555 |
597101821b26dde66f369e5d6c9ba4029fcb1428 | 140 | py | Python | util/emojis.py | Lithimlin/TeaWaiter | fef8d6ef19b8bd10fcd48a2bb320f6cda3ac7156 | [
"MIT"
] | null | null | null | util/emojis.py | Lithimlin/TeaWaiter | fef8d6ef19b8bd10fcd48a2bb320f6cda3ac7156 | [
"MIT"
] | null | null | null | util/emojis.py | Lithimlin/TeaWaiter | fef8d6ef19b8bd10fcd48a2bb320f6cda3ac7156 | [
"MIT"
] | null | null | null | statusEmojis = {'yes':'', 'no':''}
numEmojis = {1:'1', 2:'2', 3:'3', 4:'4', 5:'5', 6:'6', 7:'7', 8:'8', 9:'9', 0:'0'}
| 46.666667 | 102 | 0.328571 |
59728e393c4e17abe11271bfcc3dd74f28baee1f | 28 | py | Python | platehunter/platehunter/module/__init__.py | ZombieIce/A-Stock-Plate-Crawling | e0478c720513876562ebe2a48b9f3131dad63e47 | [
"MIT"
] | 20 | 2018-10-09T18:53:01.000Z | 2022-02-20T13:26:43.000Z | platehunter/platehunter/module/__init__.py | ZombieIce/A-Stock-Plate-Crawling | e0478c720513876562ebe2a48b9f3131dad63e47 | [
"MIT"
] | 36 | 2018-09-20T19:27:54.000Z | 2022-01-23T14:41:39.000Z | insta_hashtag_crawler/__init__.py | point1304/insta-hashtag-crawler | ee056f91d14e19404335fcc49360942acc2e15e8 | [
"MIT"
] | 6 | 2021-09-25T14:03:57.000Z | 2022-03-19T14:44:04.000Z | from .crawler import Crawler | 28 | 28 | 0.857143 |
5972ea55ea758af92089d41c09629539cc06ea40 | 12,048 | py | Python | test/test_subprocess.py | python-useful-helpers/exec-helpers | 3e0adfa7dded72ac1c9c93bd88db070f4c9050b6 | [
"Apache-2.0"
] | 12 | 2018-03-23T23:37:40.000Z | 2021-07-16T16:07:28.000Z | test/test_subprocess.py | penguinolog/exec-helpers | 0784a4772f6e9937540b266fdbb1f5a060fd4b76 | [
"Apache-2.0"
] | 111 | 2018-03-26T14:10:52.000Z | 2021-07-12T07:12:45.000Z | test/test_subprocess.py | penguinolog/exec-helpers | 0784a4772f6e9937540b266fdbb1f5a060fd4b76 | [
"Apache-2.0"
] | 6 | 2018-03-26T13:37:21.000Z | 2018-09-07T03:35:09.000Z | # Copyright 2018 - 2020 Alexey Stepanov aka penguinolog.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Standard Library
import logging
import random
import subprocess
import typing
from unittest import mock
# External Dependencies
import pytest
# Package Implementation
import exec_helpers
from exec_helpers import _subprocess_helpers
from exec_helpers import proc_enums
from exec_helpers.subprocess import SubprocessExecuteAsyncResult
pytestmark = pytest.mark.skip("Rewrite whole execute tests.")
# All test coroutines will be treated as marked.
command = "ls ~\nline 2\nline 3\nline "
command_log = f"Executing command:\n{command.rstrip()!r}\n"
print_stdin = 'read line; echo "$line"'
default_timeout = 60 * 60 # 1 hour
configs = {
"positive_simple": dict(
ec=0, stdout=(b" \n", b"2\n", b"3\n", b" \n"), stderr=(), stdin=None, open_stdout=True, open_stderr=True
),
"with_stderr": dict(
ec=0,
stdout=(b" \n", b"2\n", b"3\n", b" \n"),
stderr=(b" \n", b"0\n", b"1\n", b" \n"),
stdin=None,
open_stdout=True,
open_stderr=True,
),
"negative": dict(
ec=1,
stdout=(b" \n", b"2\n", b"3\n", b" \n"),
stderr=(b" \n", b"0\n", b"1\n", b" \n"),
stdin=None,
open_stdout=True,
open_stderr=True,
),
"with_stdin_str": dict(
ec=0, stdout=(b" \n", b"2\n", b"3\n", b" \n"), stderr=(), stdin="stdin", open_stdout=True, open_stderr=True
),
"with_stdin_bytes": dict(
ec=0, stdout=(b" \n", b"2\n", b"3\n", b" \n"), stderr=(), stdin=b"stdin", open_stdout=True, open_stderr=True
),
"with_stdin_bytearray": dict(
ec=0,
stdout=(b" \n", b"2\n", b"3\n", b" \n"),
stderr=(),
stdin=bytearray(b"stdin"),
open_stdout=True,
open_stderr=True,
),
"no_stderr": dict(
ec=0, stdout=(b" \n", b"2\n", b"3\n", b" \n"), stderr=(), stdin=None, open_stdout=True, open_stderr=False
),
"no_stdout": dict(ec=0, stdout=(), stderr=(), stdin=None, open_stdout=False, open_stderr=False),
}
def pytest_generate_tests(metafunc):
"""Tests parametrization."""
if "run_parameters" in metafunc.fixturenames:
metafunc.parametrize(
"run_parameters",
[
"positive_simple",
"with_stderr",
"negative",
"with_stdin_str",
"with_stdin_bytes",
"with_stdin_bytearray",
"no_stderr",
"no_stdout",
],
indirect=True,
)
def test_001_execute_async(popen, subprocess_logger, run_parameters) -> None:
"""Test low level API."""
runner = exec_helpers.Subprocess()
res = runner._execute_async(
command,
stdin=run_parameters["stdin"],
open_stdout=run_parameters["open_stdout"],
open_stderr=run_parameters["open_stderr"],
)
assert isinstance(res, SubprocessExecuteAsyncResult)
assert res.interface.wait() == run_parameters["ec"]
assert res.interface.returncode == run_parameters["ec"]
stdout = run_parameters["stdout"]
stderr = run_parameters["stderr"]
if stdout is not None:
assert read_stream(res.stdout) == stdout
else:
assert res.stdout is stdout
if stderr is not None:
assert read_stream(res.stderr) == stderr
else:
assert res.stderr is stderr
if run_parameters["stdin"] is None:
stdin = None
elif isinstance(run_parameters["stdin"], bytes):
stdin = run_parameters["stdin"]
elif isinstance(run_parameters["stdin"], str):
stdin = run_parameters["stdin"].encode(encoding="utf-8")
else:
stdin = bytes(run_parameters["stdin"])
if stdin:
assert res.stdin is None
popen.assert_called_once_with(
args=[command],
stdout=subprocess.PIPE if run_parameters["open_stdout"] else subprocess.DEVNULL,
stderr=subprocess.PIPE if run_parameters["open_stderr"] else subprocess.DEVNULL,
stdin=subprocess.PIPE,
shell=True,
cwd=run_parameters.get("cwd", None),
env=run_parameters.get("env", None),
universal_newlines=False,
**_subprocess_helpers.subprocess_kw,
)
if stdin is not None:
res.interface.stdin.write.assert_called_once_with(stdin)
res.interface.stdin.close.assert_called_once()
def test_002_execute(popen, subprocess_logger, exec_result, run_parameters) -> None:
"""Test API without checkers."""
runner = exec_helpers.Subprocess()
res = runner.execute(
command,
stdin=run_parameters["stdin"],
open_stdout=run_parameters["open_stdout"],
open_stderr=run_parameters["open_stderr"],
)
assert isinstance(res, exec_helpers.ExecResult)
assert res == exec_result
popen().wait.assert_called_once_with(timeout=default_timeout)
assert subprocess_logger.mock_calls[0] == mock.call.log(level=logging.DEBUG, msg=command_log)
def test_003_context_manager(mocker, popen, subprocess_logger, exec_result, run_parameters) -> None:
"""Test context manager for threads synchronization."""
lock_mock = mocker.patch("threading.RLock")
with exec_helpers.Subprocess() as runner:
res = runner.execute(command, stdin=run_parameters["stdin"])
lock_mock.acquire_assert_called_once()
lock_mock.release_assert_called_once()
assert isinstance(res, exec_helpers.ExecResult)
assert res == exec_result
def test_004_check_call(execute, exec_result, subprocess_logger) -> None:
"""Test exit code validator."""
runner = exec_helpers.Subprocess()
if exec_result.exit_code == exec_helpers.ExitCodes.EX_OK:
assert runner.check_call(command, stdin=exec_result.stdin) == exec_result
else:
with pytest.raises(exec_helpers.CalledProcessError) as e:
runner.check_call(command, stdin=exec_result.stdin)
exc: exec_helpers.CalledProcessError = e.value
assert exc.cmd == exec_result.cmd
assert exc.returncode == exec_result.exit_code
assert exc.stdout == exec_result.stdout_str
assert exc.stderr == exec_result.stderr_str
assert exc.result == exec_result
assert exc.expected == (proc_enums.EXPECTED,)
assert subprocess_logger.mock_calls[-1] == mock.call.error(
msg=f"Command {exc.result.cmd!r} returned exit code {exc.result.exit_code!s} "
f"while expected {exc.expected!r}"
)
def test_005_check_call_no_raise(execute, exec_result, subprocess_logger) -> None:
"""Test exit code validator in permissive mode."""
runner = exec_helpers.Subprocess()
res = runner.check_call(command, stdin=exec_result.stdin, raise_on_err=False)
assert res == exec_result
if exec_result.exit_code != exec_helpers.ExitCodes.EX_OK:
expected = (proc_enums.EXPECTED,)
assert subprocess_logger.mock_calls[-1] == mock.call.error(
msg=f"Command {res.cmd!r} returned exit code {res.exit_code!s} while expected {expected!r}"
)
def test_006_check_call_expect(execute, exec_result, subprocess_logger) -> None:
"""Test exit code validator with custom return codes."""
runner = exec_helpers.Subprocess()
assert runner.check_call(command, stdin=exec_result.stdin, expected=[exec_result.exit_code]) == exec_result
def test_007_check_stderr(execute, exec_result, subprocess_logger) -> None:
"""Test STDERR content validator."""
runner = exec_helpers.Subprocess()
if not exec_result.stderr:
assert runner.check_stderr(command, stdin=exec_result.stdin, expected=[exec_result.exit_code]) == exec_result
else:
with pytest.raises(exec_helpers.CalledProcessError) as e:
runner.check_stderr(command, stdin=exec_result.stdin, expected=[exec_result.exit_code])
exc: exec_helpers.CalledProcessError = e.value
assert exc.result == exec_result
assert exc.cmd == exec_result.cmd
assert exc.returncode == exec_result.exit_code
assert exc.stdout == exec_result.stdout_str
assert exc.stderr == exec_result.stderr_str
assert exc.result == exec_result
assert subprocess_logger.mock_calls[-1] == mock.call.error(
msg=f"Command {exc.result.cmd!r} output contains STDERR while not expected\n"
f"\texit code: {exc.result.exit_code!s}"
)
def test_008_check_stderr_no_raise(execute, exec_result, subprocess_logger) -> None:
"""Test STDERR content validator in permissive mode."""
runner = exec_helpers.Subprocess()
assert (
runner.check_stderr(command, stdin=exec_result.stdin, expected=[exec_result.exit_code], raise_on_err=False)
== exec_result
)
def test_009_call(popen, subprocess_logger, exec_result, run_parameters) -> None:
"""Test callable."""
runner = exec_helpers.Subprocess()
res = runner(
command,
stdin=run_parameters["stdin"],
open_stdout=run_parameters["open_stdout"],
open_stderr=run_parameters["open_stderr"],
)
assert isinstance(res, exec_helpers.ExecResult)
assert res == exec_result
popen().wait.assert_called_once_with(timeout=default_timeout)
| 34.820809 | 117 | 0.664011 |
59733ab215ceaed85b6503b5568828c87eda4e73 | 1,943 | py | Python | Code/v1.0/message.py | arik-le/Chips-Bits | fa343ea79f13ce3172292871cebd1144b2c3c1c5 | [
"MIT"
] | 4 | 2017-11-06T15:12:07.000Z | 2020-12-20T13:44:05.000Z | Code/v1.0/message.py | arik-le/Chips-Bits | fa343ea79f13ce3172292871cebd1144b2c3c1c5 | [
"MIT"
] | 36 | 2017-11-03T12:07:40.000Z | 2018-06-22T11:59:59.000Z | Code/v1.0/message.py | arik-le/Chips-Bits | fa343ea79f13ce3172292871cebd1144b2c3c1c5 | [
"MIT"
] | null | null | null | import pickle
import os
from constant_variable import *
# class Message
# get message from queue
def get():
new_file=open(MESSAGE_QUEUE_FILE,"r")
message_list= new_file.read().split(BUFFER)
return pickle.loads(message_list[0])
# take from file and cast it to object
# remove the message from queue
# check if there is a message in the queue
| 26.616438 | 77 | 0.65054 |
597345ee49817e67d67ebede702d14893a6e8c4d | 4,732 | py | Python | Lib/featureMan/familyFeatures.py | typoman/featureman | f115ea8d3faae042845cfca9502d91da88405c68 | [
"MIT"
] | 13 | 2019-07-21T14:00:49.000Z | 2019-07-29T21:43:03.000Z | Lib/featureMan/familyFeatures.py | typoman/featureman | f115ea8d3faae042845cfca9502d91da88405c68 | [
"MIT"
] | 1 | 2019-07-28T12:06:23.000Z | 2019-07-28T12:06:23.000Z | Lib/featureMan/familyFeatures.py | typoman/featureman | f115ea8d3faae042845cfca9502d91da88405c68 | [
"MIT"
] | null | null | null | from featureMan.otSingleSubFeatures import *
from featureMan.otNumberFeatures import *
from featureMan.otLanguages import *
from featureMan.otLocalized import *
from featureMan.otLigatureFeatures import *
from featureMan.otMark import mark
from featureMan.otSyntax import fontDic, GDEF
from featureMan.otKern import kern
from featureMan.otCursive import cursive
if __name__ == '__main__':
import argparse
from fontParts.fontshell.font import RFont
parser = argparse.ArgumentParser()
parser.add_argument("-u", "--ufo", help="Path to the ufo file.", type=str)
parser.add_argument("-b", "--base", help="Base features to include in the begining. It can be used to add some manual features at top of the feature file.", type=str, default="")
parser.add_argument("-o", "--only", help="Only unclude the comma seperated feature tags written here. For example: mark,gdef", type=str)
parser.add_argument("-p", "--path", help="Path to save the feature file at, default path is next to the UFO.", type=str)
args = parser.parse_args()
if args.ufo is not None:
f = RFont(args.ufo)
generateFeatures(f, marksToSkip=None, base=args.base, include=args.only, path=args.path)
else:
print('You need a UFO for the familyFeatures module to work. Use the following command for help:\npython3 "/path/to/repo/Lib/featureMan/familyFeatures.py" -h')
| 40.793103 | 391 | 0.674134 |
5975a408ae1c989c338845f71aa3900205bb24fd | 15,265 | py | Python | FFSP/FFSP_MatNet/FFSPModel.py | MinahPark/MatNet | 63342de76f6a982bdfb5c1e8d5930d64ec3efa61 | [
"MIT"
] | 18 | 2021-11-22T09:37:52.000Z | 2022-03-31T03:48:00.000Z | FFSP/FFSP_MatNet/FFSPModel.py | MinahPark/MatNet | 63342de76f6a982bdfb5c1e8d5930d64ec3efa61 | [
"MIT"
] | 1 | 2021-12-04T05:14:26.000Z | 2021-12-14T03:04:55.000Z | FFSP/FFSP_MatNet/FFSPModel.py | MinahPark/MatNet | 63342de76f6a982bdfb5c1e8d5930d64ec3efa61 | [
"MIT"
] | 5 | 2021-12-15T01:56:02.000Z | 2022-03-07T13:13:05.000Z |
"""
The MIT License
Copyright (c) 2021 MatNet
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from FFSPModel_SUB import AddAndInstanceNormalization, FeedForward, MixedScore_MultiHeadAttention
########################################
# ENCODER
########################################
########################################
# Decoder
########################################
########################################
# NN SUB FUNCTIONS
########################################
def reshape_by_heads(qkv, head_num):
# q.shape: (batch, n, head_num*key_dim) : n can be either 1 or PROBLEM_SIZE
batch_s = qkv.size(0)
n = qkv.size(1)
q_reshaped = qkv.reshape(batch_s, n, head_num, -1)
# shape: (batch, n, head_num, key_dim)
q_transposed = q_reshaped.transpose(1, 2)
# shape: (batch, head_num, n, key_dim)
return q_transposed
| 39.141026 | 123 | 0.648411 |
5975bf51cf6b40314443cbac07c50fa49c107d36 | 1,697 | py | Python | compose.py | lvyufeng/mindspore_poems | 2f46afa290a8065cd1c774c26a96be76da30873e | [
"MIT"
] | null | null | null | compose.py | lvyufeng/mindspore_poems | 2f46afa290a8065cd1c774c26a96be76da30873e | [
"MIT"
] | null | null | null | compose.py | lvyufeng/mindspore_poems | 2f46afa290a8065cd1c774c26a96be76da30873e | [
"MIT"
] | null | null | null | import os
import numpy as np
import mindspore
from mindspore import Tensor
from mindspore import load_checkpoint, load_param_into_net
from src.model import RNNModel, RNNModelInfer
from src.utils import process_poems
start_token = 'B'
end_token = 'E'
model_dir = './ckpt/'
corpus_file = './data/poems.txt'
if __name__ == '__main__':
begin_char = input('## quit please input the first character: ')
if begin_char == 'quit':
exit()
poem = gen_poem(begin_char)
print(poem) | 30.303571 | 83 | 0.669417 |
5976b5eadcdfa649651a6db9b9bd714639c5b347 | 1,523 | py | Python | pychemia/core/from_file.py | petavazohi/PyChemia | e779389418771c25c830aed360773c63bb069372 | [
"MIT"
] | 67 | 2015-01-31T07:44:55.000Z | 2022-03-21T21:43:34.000Z | pychemia/core/from_file.py | petavazohi/PyChemia | e779389418771c25c830aed360773c63bb069372 | [
"MIT"
] | 13 | 2016-06-03T19:07:51.000Z | 2022-03-31T04:20:40.000Z | pychemia/core/from_file.py | petavazohi/PyChemia | e779389418771c25c830aed360773c63bb069372 | [
"MIT"
] | 37 | 2015-01-22T15:37:23.000Z | 2022-03-21T15:38:10.000Z | import os
import sys
from pychemia import HAS_PYMATGEN, pcm_log
from .structure import Structure
from pychemia.code.vasp import read_poscar
from pychemia.code.abinit import AbinitInput
def structure_from_file(structure_file):
"""
Attempts to reconstruct a PyChemia Structure from the contents of any given file. Valid entries
:param structure_file: The path to a file where the structure can be reconstructed
:type structure_file: str
:return: PyChemia Structure if succeed, None otherwise
"""
st = None
basename = os.path.basename(structure_file)
if not os.path.isfile(structure_file):
raise ValueError("ERROR: Could not open file '%s'" % structure_file)
if basename[-4:].lower() == 'json':
st = Structure.load_json(structure_file)
elif basename[-3:].lower() == 'cif' and HAS_PYMATGEN:
import pychemia.external.pymatgen
st = pychemia.external.pymatgen.cif2structure(structure_file)[0]
elif 'poscar' in basename.lower():
st = read_poscar(structure_file)
elif 'contcar' in basename.lower():
st = read_poscar(structure_file)
elif 'abinit' in basename.lower():
av = AbinitInput(structure_file)
st = av.get_structure()
else:
try:
st = read_poscar(structure_file)
except ValueError:
raise ValueError('ould not convert file as POSCAR')
if st is None:
pcm_log.debug("ERROR: Could not extract structure from file '%s'" % structure_file)
return st
| 37.146341 | 99 | 0.692055 |
59792e136f9480b5e034aa6d01981255bd1bfdd7 | 992 | py | Python | snptools/vc_matrix.py | pvanheus/variant_exploration_with_tralynca | 4ffadc29c19d68909beed2254646e36513311847 | [
"MIT"
] | null | null | null | snptools/vc_matrix.py | pvanheus/variant_exploration_with_tralynca | 4ffadc29c19d68909beed2254646e36513311847 | [
"MIT"
] | null | null | null | snptools/vc_matrix.py | pvanheus/variant_exploration_with_tralynca | 4ffadc29c19d68909beed2254646e36513311847 | [
"MIT"
] | null | null | null |
from os import listdir
import os.path
import pandas as pd
from .count_variants_per_gene import process_vcf
from .genetree import make_gene_tree | 41.333333 | 93 | 0.676411 |
5979cf5bed5000445a52e27786a6829f4458f888 | 481 | py | Python | oarepo_records_draft/merge.py | oarepo/invenio-records-draft | 6d77309996c58fde7731e5f182e9cd5400f81f14 | [
"MIT"
] | 1 | 2020-06-03T14:44:49.000Z | 2020-06-03T14:44:49.000Z | oarepo_records_draft/merge.py | oarepo/invenio-records-draft | 6d77309996c58fde7731e5f182e9cd5400f81f14 | [
"MIT"
] | 7 | 2020-06-02T14:45:48.000Z | 2021-11-16T08:38:47.000Z | oarepo_records_draft/merge.py | oarepo/invenio-records-draft | 6d77309996c58fde7731e5f182e9cd5400f81f14 | [
"MIT"
] | 1 | 2019-08-15T07:59:48.000Z | 2019-08-15T07:59:48.000Z | from deepmerge import Merger
draft_merger = Merger(
[
(list, [list_merge]),
(dict, ["merge"])
],
["override"],
["override"]
)
| 20.913043 | 52 | 0.534304 |
597bfa5b6f7cdb21349ef3d1cce73227ae2c86fc | 4,951 | py | Python | source/01_make_coordinates/make_coordinates.py | toshi-k/kaggle-airbus-ship-detection-challenge | 872a160057592022488b1772b6c7a8982677d1dc | [
"Apache-2.0"
] | 90 | 2018-11-17T21:37:41.000Z | 2021-11-24T11:55:34.000Z | source/01_make_coordinates/make_coordinates.py | jackweiwang/kaggle-airbus-ship-detection-challenge | 872a160057592022488b1772b6c7a8982677d1dc | [
"Apache-2.0"
] | 3 | 2018-11-27T14:23:15.000Z | 2020-03-09T09:23:25.000Z | source/01_make_coordinates/make_coordinates.py | jackweiwang/kaggle-airbus-ship-detection-challenge | 872a160057592022488b1772b6c7a8982677d1dc | [
"Apache-2.0"
] | 14 | 2018-11-17T21:37:44.000Z | 2020-11-30T02:22:28.000Z | import os
import numpy as np
import pandas as pd
from tqdm import tqdm
from PIL import Image
from lib.img2_coord_ica import img2_coord_iter, coord2_img
from lib.log import Logger
# ref: https://www.kaggle.com/paulorzp/run-length-encode-and-decode
def rle_decode(mask_rle, shape=(768, 768)):
"""
Args:
mask_rle: run-length as string formated (start length)
shape: (height,width) of array to return
Returns:
numpy array, 1 - mask, 0 - background
"""
s = mask_rle.split()
starts, lengths = [np.asarray(x, dtype=int) for x in (s[0:][::2], s[1:][::2])]
starts -= 1
ends = starts + lengths
img = np.zeros(shape[0]*shape[1], dtype=np.uint8)
for lo, hi in zip(starts, ends):
img[lo:hi] = 255
return img.reshape(shape).T
if __name__ == '__main__':
segmentations = pd.read_csv('../../dataset/train_ship_segmentations_v2.csv')
print(segmentations.head())
segmentations = segmentations.fillna('')
# main_test()
main()
| 30.006061 | 119 | 0.626338 |
597ddcf7272429172b7edee0cb03c0de356cd799 | 127 | py | Python | tests/test_main.py | skypaw/rconcrete | 30bc7e5ada2afa975caabcd38461707e094d695b | [
"MIT"
] | null | null | null | tests/test_main.py | skypaw/rconcrete | 30bc7e5ada2afa975caabcd38461707e094d695b | [
"MIT"
] | 2 | 2022-02-05T18:49:44.000Z | 2022-02-06T01:11:07.000Z | tests/test_main.py | skypaw/rconcrete | 30bc7e5ada2afa975caabcd38461707e094d695b | [
"MIT"
] | null | null | null | from src.main import sample_function | 21.166667 | 36 | 0.692913 |
597e7da85300fb6bd6d365c07bb2ba1dbac55565 | 1,598 | py | Python | scripts/combine_errors.py | nbren12/nn_atmos_param | cb138f0b211fd5743e56ad659aec38c082d2b3ac | [
"MIT"
] | 4 | 2018-09-16T20:55:57.000Z | 2020-12-06T11:27:50.000Z | scripts/combine_errors.py | nbren12/nn_atmos_param | cb138f0b211fd5743e56ad659aec38c082d2b3ac | [
"MIT"
] | 5 | 2018-04-07T07:40:39.000Z | 2018-06-20T06:56:08.000Z | scripts/combine_errors.py | nbren12/nn_atmos_param | cb138f0b211fd5743e56ad659aec38c082d2b3ac | [
"MIT"
] | null | null | null | import numpy as np
import re
import json
import xarray as xr
import pandas as pd
def read_train_loss(epoch, fname,
variables=['test_loss', 'train_loss']):
"""Read the loss.json file for the current epochs test and train loss"""
df = pd.read_json(fname)
epoch_means = df.groupby('epoch').mean()
# need to look for epoch-1 because this data is accumulated over the whole first epoch
if epoch > 0:
return epoch_means.loc[epoch-1][variables].to_dict()
else:
return {'test_loss': np.nan, 'train_loss': np.nan}
errors = []
dims = []
pattern = re.compile("data/output/model.(.*?)/(.*?)/(.*?)/error.nc")
for f in snakemake.input:
m = pattern.search(f)
if m:
model, seed, epoch = m.groups()
ds = xr.open_dataset(f)
arg_file = f"data/output/model.{model}/{seed}/arguments.json"
args = json.load(open(arg_file))
# nhidden is a list, so need to just take the first element
# since all the neural networks I fit are single layer
args['nhidden'] = args['nhidden'][0]
args.pop('seed', None)
ds = ds.assign(**args)
loss_file = f"data/output/model.{model}/{seed}/loss.json"
train_error = read_train_loss(int(epoch), loss_file)
ds = ds.assign(**train_error)
# append to lists
dims.append((model, seed, int(epoch)))
errors.append(ds)
names = ['model', 'seed', 'epoch']
dim = pd.MultiIndex.from_tuples(dims, names=names)
dim.name = 'tmp'
ds = xr.concat(errors, dim=dim).unstack('tmp')
ds.to_netcdf(snakemake.output[0])
| 30.150943 | 90 | 0.627034 |
59801917a885910b96ef72a02bd5c83398abe7ef | 705 | py | Python | tests/acceptance/selene_collection_should_test.py | KalinkinaMaria/selene | 859e1102c85740b52af8d0f08dd6b6490b4bd2ff | [
"MIT"
] | null | null | null | tests/acceptance/selene_collection_should_test.py | KalinkinaMaria/selene | 859e1102c85740b52af8d0f08dd6b6490b4bd2ff | [
"MIT"
] | 1 | 2021-06-02T04:21:17.000Z | 2021-06-02T04:21:17.000Z | tests/acceptance/selene_collection_should_test.py | vkarpenko/selene | 4776357430c940be38f38be9981006dd156f9730 | [
"MIT"
] | null | null | null | import pytest
from selenium.common.exceptions import TimeoutException
from selene.browser import *
from selene.support.conditions import have
from selene.support.jquery_style_selectors import ss
from tests.acceptance.helpers.helper import get_test_driver
from tests.acceptance.helpers.todomvc import given_active
| 25.178571 | 77 | 0.741844 |
5980640bb02c2631ecc30d2c519d9ed76e0a3bab | 2,422 | py | Python | genomics_data_index/test/unit/variant/service/test_SQLQueryInBatcher.py | apetkau/genomics-data-index | d0cc119fd57b8cbd701affb1c84450cf7832fa01 | [
"Apache-2.0"
] | 12 | 2021-05-03T20:56:05.000Z | 2022-01-04T14:52:19.000Z | genomics_data_index/test/unit/variant/service/test_SQLQueryInBatcher.py | apetkau/thesis-index | 6c96e9ed75d8e661437effe62a939727a0b473fc | [
"Apache-2.0"
] | 30 | 2021-04-26T23:03:40.000Z | 2022-02-25T18:41:14.000Z | genomics_data_index/test/unit/variant/service/test_SQLQueryInBatcher.py | apetkau/genomics-data-index | d0cc119fd57b8cbd701affb1c84450cf7832fa01 | [
"Apache-2.0"
] | null | null | null | from genomics_data_index.storage.service import SQLQueryInBatcherDict, SQLQueryInBatcherList
| 36.69697 | 92 | 0.676301 |
5980a13b88db20b5e773819c926a4981f53bb21e | 1,611 | py | Python | mu.py | cool2645/shadowsocksrr | 0a594857f4c3125ab14d27d7fd8143291b7c9fee | [
"Apache-2.0"
] | 2 | 2018-05-14T10:41:38.000Z | 2020-05-22T12:40:57.000Z | mu.py | cool2645/shadowsocksrr | 0a594857f4c3125ab14d27d7fd8143291b7c9fee | [
"Apache-2.0"
] | null | null | null | mu.py | cool2645/shadowsocksrr | 0a594857f4c3125ab14d27d7fd8143291b7c9fee | [
"Apache-2.0"
] | 1 | 2018-09-22T16:15:14.000Z | 2018-09-22T16:15:14.000Z | import db_transfer
import config
import logging
from musdk.client import Client
| 28.767857 | 76 | 0.590937 |
598126ffcc8da7b8ff9a91f8f601f2ef5306a660 | 2,001 | py | Python | tests/test_json.py | NyntoFive/data_extractor | 965e12570d6b7549aa2f8b3bd1951e06b010c444 | [
"MIT"
] | null | null | null | tests/test_json.py | NyntoFive/data_extractor | 965e12570d6b7549aa2f8b3bd1951e06b010c444 | [
"MIT"
] | null | null | null | tests/test_json.py | NyntoFive/data_extractor | 965e12570d6b7549aa2f8b3bd1951e06b010c444 | [
"MIT"
] | null | null | null | # Standard Library
import json
# Third Party Library
import pytest
from jsonpath_rw.lexer import JsonPathLexerError
# First Party Library
from data_extractor.exceptions import ExprError, ExtractError
from data_extractor.json import JSONExtractor
| 23.267442 | 82 | 0.590705 |
59814b4554d683700762543937d73f8de4e2078a | 938 | py | Python | demo/predictions/visualize.py | qixuxiang/maskrcnn_tianchi_stage2 | 52023b64268dc91f0b5b9f085203ab00a542458a | [
"MIT"
] | null | null | null | demo/predictions/visualize.py | qixuxiang/maskrcnn_tianchi_stage2 | 52023b64268dc91f0b5b9f085203ab00a542458a | [
"MIT"
] | null | null | null | demo/predictions/visualize.py | qixuxiang/maskrcnn_tianchi_stage2 | 52023b64268dc91f0b5b9f085203ab00a542458a | [
"MIT"
] | null | null | null | import numpy as np
from PIL import Image
import os
npy_file1 = './prediction/1110_1.npy'
npy_file2 = './prediction/1110_2.npy'
npy_file3 = './prediction/1110_3.npy'
npy_file4 = './prediction/1110_4.npy'
npy_file5 = './prediction/1110_5.npy'
arr1 = np.load(npy_file1)
arr2 = np.load(npy_file2)
arr3 = np.load(npy_file3)
arr4 = np.load(npy_file4)
arr5 = np.load(npy_file5)
print(sum(sum(arr1)))
print(sum(sum(arr2)))
print(sum(sum(arr3)))
print(sum(sum(arr4)))
print(sum(sum(arr5)))
arr1 = 50*arr1
arr2 = 50*arr2
arr3 = 50*arr3
arr4 = 50*arr4
arr5 = 50*arr5
img1 = Image.fromarray(arr1).convert("L")
img2 = Image.fromarray(arr2).convert("L")
img3 = Image.fromarray(arr3).convert("L")
img4 = Image.fromarray(arr4).convert("L")
img5 = Image.fromarray(arr5).convert("L")
img1.save("./test_pic/test1.png")
img2.save("./test_pic/test2.png")
img3.save("./test_pic/test3.png")
img4.save("./test_pic/test4.png")
img5.save("./test_pic/test5.png")
| 26.055556 | 41 | 0.715352 |
59821d30d6e5bb63ead4e418643ab63f3b0a5f6b | 1,125 | py | Python | examples/gbdt_classifier_example.py | tushushu/Imilu | 121c79574d3e6ca35b569dd58661175e5c3668e2 | [
"Apache-2.0"
] | 407 | 2018-08-22T05:58:33.000Z | 2022-03-31T11:44:48.000Z | examples/gbdt_classifier_example.py | tushushu/Imilu | 121c79574d3e6ca35b569dd58661175e5c3668e2 | [
"Apache-2.0"
] | 9 | 2018-11-07T07:44:02.000Z | 2021-12-10T11:59:47.000Z | examples/gbdt_classifier_example.py | tushushu/Imilu | 121c79574d3e6ca35b569dd58661175e5c3668e2 | [
"Apache-2.0"
] | 286 | 2018-08-22T08:00:19.000Z | 2022-03-30T00:59:20.000Z | # -*- coding: utf-8 -*-
"""
@Author: tushushu
@Date: 2018-08-21 14:33:11
@Last Modified by: tushushu
@Last Modified time: 2019-05-22 15:41:11
"""
import os
os.chdir(os.path.split(os.path.realpath(__file__))[0])
import sys
sys.path.append(os.path.abspath(".."))
from imylu.ensemble.gbdt_classifier import GradientBoostingClassifier
from imylu.utils.load_data import load_breast_cancer
from imylu.utils.model_selection import train_test_split, model_evaluation
from imylu.utils.utils import run_time
if __name__ == "__main__":
main()
| 28.846154 | 99 | 0.731556 |
5985441293e6489af243c2cd16aa10e62e49c056 | 16,658 | py | Python | gamestonk_terminal/cryptocurrency/due_diligence/pycoingecko_view.py | clairvoyant/GamestonkTerminal | 7b40cfe61b32782e36f5de8a08d075532a08c294 | [
"MIT"
] | null | null | null | gamestonk_terminal/cryptocurrency/due_diligence/pycoingecko_view.py | clairvoyant/GamestonkTerminal | 7b40cfe61b32782e36f5de8a08d075532a08c294 | [
"MIT"
] | null | null | null | gamestonk_terminal/cryptocurrency/due_diligence/pycoingecko_view.py | clairvoyant/GamestonkTerminal | 7b40cfe61b32782e36f5de8a08d075532a08c294 | [
"MIT"
] | null | null | null | """CoinGecko view"""
__docformat__ = "numpy"
import argparse
from typing import List, Tuple
import pandas as pd
from pandas.plotting import register_matplotlib_converters
import matplotlib.pyplot as plt
from tabulate import tabulate
import mplfinance as mpf
from gamestonk_terminal.helper_funcs import (
parse_known_args_and_warn,
plot_autoscale,
)
from gamestonk_terminal.feature_flags import USE_ION as ion
import gamestonk_terminal.cryptocurrency.due_diligence.pycoingecko_model as gecko
from gamestonk_terminal.cryptocurrency.dataframe_helpers import wrap_text_in_df
register_matplotlib_converters()
# pylint: disable=inconsistent-return-statements
# pylint: disable=R0904, C0302
def load(other_args: List[str]):
"""Load selected Cryptocurrency. You can pass either symbol of id of the coin
Parameters
----------
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="load",
description="""Load cryptocurrency, from CoinGecko.
You will have access to a lot of statistics on that coin like price data,
coin development stats, social media and many others. Loading coin
also will open access to technical analysis menu.""",
)
parser.add_argument(
"-c",
"--coin",
required="-h" not in other_args,
type=str,
dest="coin",
help="Coin to load data for (symbol or coin id). You can use either symbol of the coin or coinId"
"You can find all coins using command `coins` or visit https://www.coingecko.com/en. "
"To use load a coin use command load -c [symbol or coinId]",
)
try:
if other_args:
if "-" not in other_args[0]:
other_args.insert(0, "-c")
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
coin = gecko.Coin(ns_parser.coin)
print("")
return coin
except KeyError:
print(f"Could not find coin with the id: {ns_parser.coin}", "\n")
return None
except SystemExit:
print("")
return None
except Exception as e:
print(e, "\n")
return None
def chart(coin: gecko.Coin, other_args: List[str]):
"""Plots chart for loaded cryptocurrency
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="chart",
description="""
Display chart for loaded coin. You can specify currency vs which you want
to show chart and also number of days to get data for.
By default currency: usd and days: 30.
E.g. if you loaded in previous step Bitcoin and you want to see it's price vs ethereum
in last 90 days range use `chart --vs eth --days 90`
""",
)
parser.add_argument(
"--vs", default="usd", dest="vs", help="Currency to display vs coin"
)
parser.add_argument(
"-d", "--days", default=30, dest="days", help="Number of days to get data for"
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.get_coin_market_chart(ns_parser.vs, ns_parser.days)
df = df["price"].resample("1D").ohlc().ffill()
df.columns = [
"Open",
"High",
"Low",
"Close",
]
title = (
f"\n{coin.coin_symbol}/{ns_parser.vs} from {df.index[0].strftime('%Y/%m/%d')} "
f"to {df.index[-1].strftime('%Y/%m/%d')}",
)
mpf.plot(
df,
type="candle",
volume=False,
title=str(title[0]) if isinstance(title, tuple) else title,
xrotation=20,
style="binance",
figratio=(10, 7),
figscale=1.10,
figsize=(plot_autoscale()),
update_width_config=dict(
candle_linewidth=1.0, candle_width=0.8, volume_linewidth=1.0
),
)
if ion:
plt.ion()
plt.show()
print("")
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def load_ta_data(coin: gecko.Coin, other_args: List[str]) -> Tuple[pd.DataFrame, str]:
"""Load data for Technical Analysis
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
Returns
----------
Tuple[pd.DataFrame, str]
dataframe with prices
quoted currency
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ta",
description="""
Loads data for technical analysis. You can specify currency vs which you want
to show chart and also number of days to get data for.
By default currency: usd and days: 30.
E.g. if you loaded in previous step Bitcoin and you want to see it's price vs ethereum
in last 90 days range use `ta --vs eth --days 90`
""",
)
parser.add_argument(
"--vs", default="usd", dest="vs", help="Currency to display vs coin"
)
parser.add_argument(
"-d", "--days", default=30, dest="days", help="Number of days to get data for"
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return pd.DataFrame(), ""
df = coin.get_coin_market_chart(ns_parser.vs, ns_parser.days)
df = df["price"].resample("1D").ohlc().ffill()
df.columns = [
"Open",
"High",
"Low",
"Close",
]
df.index.name = "date"
return df, ns_parser.vs
except SystemExit:
print("")
return pd.DataFrame(), ""
except Exception as e:
print(e, "\n")
return pd.DataFrame(), ""
def info(coin: gecko.Coin, other_args: List[str]):
"""Shows basic information about loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="info",
description="""
Shows basic information about loaded coin like:
Name, Symbol, Description, Market Cap, Public Interest, Supply, and Price related metrics
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = wrap_text_in_df(coin.base_info, w=80)
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def web(coin: gecko.Coin, other_args: List[str]):
"""Shows found websites corresponding to loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="web",
description="""Websites found for given Coin. You can find there urls to
homepage, forum, announcement site and others.""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.websites
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def social(coin: gecko.Coin, other_args: List[str]):
"""Shows social media corresponding to loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="social",
description="""Shows social media corresponding to loaded coin. You can find there name of
telegram channel, urls to twitter, reddit, bitcointalk, facebook and discord.""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.social_media
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def dev(coin: gecko.Coin, other_args: List[str]):
"""Shows developers data for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="dev",
description="""Developers data for loaded coin. If the development data is available you can see
how the code development of given coin is going on.
There are some statistics that shows number of stars, forks, subscribers, pull requests,
commits, merges, contributors on github.""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.developers_data
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def ath(coin: gecko.Coin, other_args: List[str]):
"""Shows all time high data for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="ath",
description="""All time high data for loaded coin""",
)
parser.add_argument(
"--vs", dest="vs", help="currency", default="usd", choices=["usd", "btc"]
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.all_time_high(currency=ns_parser.vs)
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def atl(coin: gecko.Coin, other_args: List[str]):
"""Shows all time low data for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="atl",
description="""All time low data for loaded coin""",
)
parser.add_argument(
"--vs", dest="vs", help="currency", default="usd", choices=["usd", "btc"]
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.all_time_low(currency=ns_parser.vs)
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def score(coin: gecko.Coin, other_args: List[str]):
"""Shows different kind of scores for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="score",
description="""
In this view you can find different kind of scores for loaded coin.
Those scores represents different rankings, sentiment metrics, some user stats and others.
You will see CoinGecko scores, Developer Scores, Community Scores, Sentiment, Reddit scores
and many others.
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.scores
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def bc(coin: gecko.Coin, other_args: List[str]):
"""Shows urls to blockchain explorers
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="bc",
description="""
Blockchain explorers URLs for loaded coin. Those are sites like etherescan.io or polkascan.io
in which you can see all blockchain data e.g. all txs, all tokens, all contracts...
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.blockchain_explorers
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
def market(coin: gecko.Coin, other_args: List[str]):
"""Shows market data for loaded coin
Parameters
----------
coin : gecko_coin.Coin
Cryptocurrency
other_args : List[str]
argparse arguments
"""
parser = argparse.ArgumentParser(
add_help=False,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
prog="market",
description="""
Market data for loaded coin. There you find metrics like:
Market Cap, Supply, Circulating Supply, Price, Volume and many others.
""",
)
try:
ns_parser = parse_known_args_and_warn(parser, other_args)
if not ns_parser:
return
df = coin.market_data
print(
tabulate(
df,
headers=df.columns,
floatfmt=".2f",
showindex=False,
tablefmt="fancy_grid",
),
"\n",
)
except SystemExit:
print("")
except Exception as e:
print(e, "\n")
| 27.308197 | 117 | 0.551327 |
5985716e3511f569993e2ea970c450df3042b443 | 701 | py | Python | source/loaders/tploaders.py | rodsom22/gcn_refinement | b1b76811b145a2fa7e595cc6d131d75c0553d5a3 | [
"MIT"
] | 24 | 2020-05-04T20:24:35.000Z | 2022-03-21T07:57:02.000Z | source/loaders/tploaders.py | rodsom22/gcn_refinement | b1b76811b145a2fa7e595cc6d131d75c0553d5a3 | [
"MIT"
] | 3 | 2020-09-02T15:54:10.000Z | 2021-05-27T03:09:31.000Z | source/loaders/tploaders.py | rodsom22/gcn_refinement | b1b76811b145a2fa7e595cc6d131d75c0553d5a3 | [
"MIT"
] | 6 | 2020-08-03T21:01:37.000Z | 2021-02-04T02:24:46.000Z | """
Data loaders based on tensorpack
"""
import numpy as np
from utilities import nparrays as arrtools
| 29.208333 | 71 | 0.706134 |
5986324fbdcbaeae05e084715dcadf5d8b4991a3 | 1,199 | py | Python | app/stages/management/commands/import_stages_from_csv.py | guilloulouis/stage_medecine | 7ec9067402e510d812a375bbfe46f2ab545587f9 | [
"MIT"
] | null | null | null | app/stages/management/commands/import_stages_from_csv.py | guilloulouis/stage_medecine | 7ec9067402e510d812a375bbfe46f2ab545587f9 | [
"MIT"
] | null | null | null | app/stages/management/commands/import_stages_from_csv.py | guilloulouis/stage_medecine | 7ec9067402e510d812a375bbfe46f2ab545587f9 | [
"MIT"
] | 1 | 2021-04-30T16:38:19.000Z | 2021-04-30T16:38:19.000Z | # from django.core.management import BaseCommand
# import pandas as pd
#
# from stages.models import Category, Stage
#
#
# class Command(BaseCommand):
# help = 'Import a list of stage in the database'
#
# def add_arguments(self, parser):
# super(Command, self).add_arguments(parser)
# parser.add_argument(
# '--csv', dest='csv', default=None,
# help='Specify the csv file to parse',
# )
#
# def handle(self, *args, **options):
# csv = options.get('csv')
# csv_reader = pd.read_csv(csv)
# stages_to_create = []
# for index, item in csv_reader.iterrows():
# stage_raw = item['Stage']
# split = stage_raw.split('(')
# stage_name = split[0].strip()
# if len(split) > 1:
# category_name = split[1].replace(')', '').strip()
# category_object, created = Category.objects.get_or_create(name=category_name)
# else:
# category_object = None
# stages_to_create.append(Stage(name=stage_name, place_max=item['places'], category=category_object))
# Stage.objects.bulk_create(stages_to_create)
| 37.46875 | 113 | 0.584654 |
5986b5465c4c37fe33e19dc8df090df96c8f030d | 3,137 | py | Python | deep_learning/dl.py | remix-yh/moneycount | e8f35549ef96b8ebe6ca56417f0833f519179173 | [
"MIT"
] | null | null | null | deep_learning/dl.py | remix-yh/moneycount | e8f35549ef96b8ebe6ca56417f0833f519179173 | [
"MIT"
] | 7 | 2020-09-26T00:46:23.000Z | 2022-02-10T01:08:15.000Z | deep_learning/dl.py | remix-yh/moneycount | e8f35549ef96b8ebe6ca56417f0833f519179173 | [
"MIT"
] | null | null | null | import os
import io
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
from matplotlib.figure import Figure
from keras.applications.imagenet_utils import preprocess_input
from keras.backend.tensorflow_backend import set_session
from keras.preprocessing import image
import numpy as np
from scipy.misc import imread
import tensorflow as tf
from ssd_v2 import SSD300v2
from ssd_utils import BBoxUtility
voc_classes = ['10', '100', '5', 'Boat', 'Bottle',
'Bus', 'Car', 'Cat', 'Chair', 'Cow', 'Diningtable',
'Dog', 'Horse','Motorbike', 'Person', 'Pottedplant',
'Sheep', 'Sofa', 'Train', 'Tvmonitor']
NUM_CLASSES = len(voc_classes) + 1 | 31.37 | 95 | 0.667198 |
598974722569cb3c84cf300f7c787f22839c151a | 2,255 | py | Python | authors/tests/test_article_filters.py | andela/ah-backend-odin | 0e9ef1a10c8a3f6736999a5111736f7bd7236689 | [
"BSD-3-Clause"
] | null | null | null | authors/tests/test_article_filters.py | andela/ah-backend-odin | 0e9ef1a10c8a3f6736999a5111736f7bd7236689 | [
"BSD-3-Clause"
] | 43 | 2018-10-25T10:14:52.000Z | 2022-03-11T23:33:46.000Z | authors/tests/test_article_filters.py | andela/ah-backend-odin | 0e9ef1a10c8a3f6736999a5111736f7bd7236689 | [
"BSD-3-Clause"
] | 4 | 2018-10-29T07:04:58.000Z | 2020-04-02T14:15:10.000Z | from . import BaseAPITestCase
| 36.370968 | 75 | 0.640355 |
598d5551f035952fc6ef820f0bbd414d1bb129f0 | 720 | py | Python | myexporter/tcpexporter.py | abh15/flower | 7e1ab9393e0494f23df65bfa4f858cc35fea290e | [
"Apache-2.0"
] | null | null | null | myexporter/tcpexporter.py | abh15/flower | 7e1ab9393e0494f23df65bfa4f858cc35fea290e | [
"Apache-2.0"
] | null | null | null | myexporter/tcpexporter.py | abh15/flower | 7e1ab9393e0494f23df65bfa4f858cc35fea290e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
import subprocess
import time
from prometheus_client import start_http_server, Gauge
start_http_server(9200)
latencygauge = Gauge('tcprtt', 'provides rtt to fed server using ss',['cohort'])
while True:
stat, lbl= getstat()
latencygauge.labels(cohort=lbl).set(stat)
time.sleep(2) | 32.727273 | 112 | 0.590278 |
598f144f73e5a69e09521df868c498cc54751d48 | 516 | py | Python | tests/features/steps/roman.py | TestowanieAutomatyczneUG/laboratorium_14-maciejSzcz | b92186c574d3f21acd9f3e913e1a8ddcb5ec81fd | [
"MIT"
] | null | null | null | tests/features/steps/roman.py | TestowanieAutomatyczneUG/laboratorium_14-maciejSzcz | b92186c574d3f21acd9f3e913e1a8ddcb5ec81fd | [
"MIT"
] | null | null | null | tests/features/steps/roman.py | TestowanieAutomatyczneUG/laboratorium_14-maciejSzcz | b92186c574d3f21acd9f3e913e1a8ddcb5ec81fd | [
"MIT"
] | null | null | null | from behave import *
use_step_matcher("re")
| 24.571429 | 83 | 0.672481 |
599099e8cbd4ce7be2457cb90f171f8cb872d8d1 | 1,266 | py | Python | main.py | AbirLOUARD/AspiRobot | 0ea78bfd7c20f1371c01a0e912f5e92bed6648b7 | [
"MIT"
] | 1 | 2022-03-31T18:37:11.000Z | 2022-03-31T18:37:11.000Z | main.py | AbirLOUARD/AspiRobot | 0ea78bfd7c20f1371c01a0e912f5e92bed6648b7 | [
"MIT"
] | null | null | null | main.py | AbirLOUARD/AspiRobot | 0ea78bfd7c20f1371c01a0e912f5e92bed6648b7 | [
"MIT"
] | null | null | null | import functions
import Aspirobot
import time
import os
import Manoir
import Capteur
import Etat
import threading
import Case
from threading import Thread
manor_size = 5
gameIsRunning = True
clearConsole = lambda: os.system('cls' if os.name in ('nt', 'dos') else 'clear')
manoir = Manoir.Manoir(manor_size, manor_size)
caseRobot = Case.Case(1, 1)
agent = Aspirobot.Aspirobot(manoir, caseRobot)
manoir.draw()
"""
while (gameIsRunning):
clearConsole()
if (functions.shouldThereBeANewDirtySpace(dirtys_number)):
functions.generateDirt(manor_dirty)
dirtys_number += 1
if (functions.shouldThereBeANewLostJewel(jewels_number)):
functions.generateJewel(manor_jewel)
jewels_number += 1
functions.drawManor(manor_dirty, manor_jewel)
time.sleep(pause_length)
"""
for init in range(10):
manoir.initialisation()
init += 1
if __name__ == "__main__":
t1 = Thread(target = runAgent)
t2 = Thread(target = runManoir)
t1.setDaemon(True)
t2.setDaemon(True)
t1.start()
t2.start()
while True:
pass
| 21.827586 | 80 | 0.691153 |
599104a205da723279b528df24bd43e2dcb5bdbb | 1,169 | py | Python | docs/src/newsgroups_data.py | vishalbelsare/RLScore | 713f0a402f7a09e41a609f2ddcaf849b2021a0a7 | [
"MIT"
] | 61 | 2015-03-06T08:48:01.000Z | 2021-04-26T16:13:07.000Z | docs/src/newsgroups_data.py | andrecamara/RLScore | 713f0a402f7a09e41a609f2ddcaf849b2021a0a7 | [
"MIT"
] | 5 | 2016-09-08T15:47:00.000Z | 2019-02-25T17:44:55.000Z | docs/src/newsgroups_data.py | vishalbelsare/RLScore | 713f0a402f7a09e41a609f2ddcaf849b2021a0a7 | [
"MIT"
] | 31 | 2015-01-28T15:05:33.000Z | 2021-04-16T19:39:48.000Z | import numpy as np
from scipy import sparse as sp
from rlscore.utilities import multiclass
if __name__=="__main__":
print_stats()
| 30.763158 | 58 | 0.638152 |
59945bb43aee8c097a1605b49beb38bfd751d29b | 25 | py | Python | 1795.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | 6 | 2021-04-13T00:33:43.000Z | 2022-02-10T10:23:59.000Z | 1795.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | null | null | null | 1795.py | heltonricardo/URI | 160cca22d94aa667177c9ebf2a1c9864c5e55b41 | [
"MIT"
] | 3 | 2021-03-23T18:42:24.000Z | 2022-02-10T10:24:07.000Z | print(3 ** int(input()))
| 12.5 | 24 | 0.56 |
59962bcd6324fb181e2aeed2776a6d4ee13fa678 | 1,245 | py | Python | 5hours/14_dictionaries.py | matiasmasca/python | 7631583820d51e3132bdb793fed28cc83f4877a2 | [
"MIT"
] | null | null | null | 5hours/14_dictionaries.py | matiasmasca/python | 7631583820d51e3132bdb793fed28cc83f4877a2 | [
"MIT"
] | null | null | null | 5hours/14_dictionaries.py | matiasmasca/python | 7631583820d51e3132bdb793fed28cc83f4877a2 | [
"MIT"
] | null | null | null | # como los hash de ruby, guarda "clave" "valor"
# al igual que un diccionario, esta la Palabra, que es la clave y la defincin que seria el valor.
# las claves tienen que ser unicas
nombre_de_diccionario = {} #curly brackets.
monthConversions = {
"Jan": "January",
"Feb": "February",
"Mar": "March",
"Apr": "April",
"May": "May",
"Jun": "June",
"Jul": "July",
"Ago": "August",
"Sep": "September",
"Oct": "October",
"Nov": "November",
"Dic": "December",
}
# acceder a los valores del diccionario
# hay varias formas
# poner la clave entre brackets
print(monthConversions["Mar"])
# Get, permite definir que valor devuelve si no hay esa clave
print(monthConversions.get("Nov"))
print(monthConversions.get("Mat"))
print(monthConversions.get("Mat", "No es una clave valida"))
# Pueden ser claves pueden ser numericas, y los valores de diferentes tipos
monthConversions = {
1: ("January", "Enero", "Janeiro"), # un tupla
2: ["February", "Febrero", "Fevereiro"], #una lista
3: "March",
4: "April",
5: "May",
6: "June",
7: "July",
8: "August",
9: "September",
10: "October",
11: "November",
12: "December",
}
print(monthConversions[1])
print(monthConversions[1][1])
print(monthConversions[2][2])
| 23.055556 | 98 | 0.654618 |
599682564ad210bc55f3314403d4b2babc14038c | 578 | py | Python | tests/unit/test_runner.py | mariocj89/dothub | bcfdcc5a076e48a73c4e0827c56431522e4cc4ba | [
"MIT"
] | 12 | 2017-05-30T12:46:41.000Z | 2019-08-18T18:55:43.000Z | tests/unit/test_runner.py | mariocj89/dothub | bcfdcc5a076e48a73c4e0827c56431522e4cc4ba | [
"MIT"
] | 30 | 2017-07-10T19:28:35.000Z | 2021-11-22T11:09:25.000Z | tests/unit/test_runner.py | Mariocj89/dothub | bcfdcc5a076e48a73c4e0827c56431522e4cc4ba | [
"MIT"
] | 1 | 2017-08-02T21:04:43.000Z | 2017-08-02T21:04:43.000Z | from click.testing import CliRunner
from dothub.cli import dothub
base_args = ["--user=xxx", "--token=yyy"]
| 23.12 | 74 | 0.652249 |
5997a4ecb7f8086a5d0b295c0471521ff04b54f7 | 6,985 | py | Python | graph/__init__.py | worldwise001/stylometry | b5a4cc98fb8dfb6d1600d41bb15c96aeaf4ecb72 | [
"MIT"
] | 14 | 2015-02-24T16:14:07.000Z | 2022-02-19T21:49:55.000Z | graph/__init__.py | worldwise001/stylometry | b5a4cc98fb8dfb6d1600d41bb15c96aeaf4ecb72 | [
"MIT"
] | 1 | 2015-02-25T09:45:13.000Z | 2015-02-25T09:45:13.000Z | graph/__init__.py | worldwise001/stylometry | b5a4cc98fb8dfb6d1600d41bb15c96aeaf4ecb72 | [
"MIT"
] | 4 | 2015-11-20T10:47:11.000Z | 2021-03-30T13:14:20.000Z | import matplotlib
matplotlib.use('Agg')
import statsmodels.api as sm
import statsmodels.formula.api as smf
import numpy as np
from scipy.stats import linregress
import matplotlib.pyplot as plt
from sklearn.metrics import roc_curve, auc
| 30.502183 | 100 | 0.591553 |
59987eb32850dcd0908c67453364b8a38745fe6e | 68 | py | Python | tests/unit/test_thicket/test_finders.py | GabrielC101/filer | d506ed804d10891cea33c3884896b6f0dfa08b88 | [
"MIT"
] | null | null | null | tests/unit/test_thicket/test_finders.py | GabrielC101/filer | d506ed804d10891cea33c3884896b6f0dfa08b88 | [
"MIT"
] | 1 | 2017-12-19T19:38:22.000Z | 2017-12-19T19:38:22.000Z | tests/unit/test_thicket/test_finders.py | GabrielC101/filer | d506ed804d10891cea33c3884896b6f0dfa08b88 | [
"MIT"
] | null | null | null | from thicket import finders
| 11.333333 | 27 | 0.75 |
59995210d6ac282b5113ee3252c96de5a50256f9 | 2,251 | py | Python | test/test_component.py | gadalang/gada | 2dd4f4dfd5b7390c06307040cad23203a015f7a4 | [
"MIT"
] | null | null | null | test/test_component.py | gadalang/gada | 2dd4f4dfd5b7390c06307040cad23203a015f7a4 | [
"MIT"
] | null | null | null | test/test_component.py | gadalang/gada | 2dd4f4dfd5b7390c06307040cad23203a015f7a4 | [
"MIT"
] | 1 | 2021-06-15T13:52:33.000Z | 2021-06-15T13:52:33.000Z | __all__ = ["ComponentTestCase"]
import os
import sys
import yaml
import unittest
from gada import component
from test.utils import TestCaseBase
if __name__ == "__main__":
unittest.main()
| 32.157143 | 86 | 0.662372 |
599a3aac676f1bdb004c22bf7034b685260f3101 | 17,820 | py | Python | color pattern with threading.py | HashtagInnovator/Alpha-Star | f69a35b1924320dfec9610d6b61acae8d9de4afa | [
"Apache-2.0"
] | null | null | null | color pattern with threading.py | HashtagInnovator/Alpha-Star | f69a35b1924320dfec9610d6b61acae8d9de4afa | [
"Apache-2.0"
] | null | null | null | color pattern with threading.py | HashtagInnovator/Alpha-Star | f69a35b1924320dfec9610d6b61acae8d9de4afa | [
"Apache-2.0"
] | null | null | null | import time
import random
from multiprocessing import pool
from playsound import playsound
from threading import Thread
i = -1
l = 0
count = 0
print()
x = loops()
# DRIVER CODE
n = input("ENTER YOUR TEXT")
print("type any song name from here ...")
lis=["birth",'rider','standard','teri mitti me','chitrakaar']
print(lis)
#WE CAN ADD birthday and rider SONG HERE
thread=Thread(target=play)
thread.start()
time.sleep(7)
k = len(n)
aa,bb,cc,dd,ee,ff,gg,hh,ii,jj,kk,ll,mm,nn,oo,pp,qq,rr,ss,tt,uu,vv,ww,xx,yy,zz=0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0
s=0.5
list=[30,31,32,33,34,35,36,37]
color=0
for o in range(5):
i = i + 1
for f in range(k):
if (n[f] == "A" or n[f] == "a"):
if(aa==0):
aa=random.choice(list)
aa=aa+1
print("\033[1;{}m".format(aa),end="")
time.sleep(s)
x.A()
elif (n[f] == "B" or n[f] == "b"):
if(bb==0):
bb=random.choice(list)
bb=bb+1
print("\033[1;{}m".format(bb),end="")
time.sleep(s)
x.B()
elif (n[f] == "C" or n[f] == "c"):
if(cc==0):
cc=random.choice(list)
cc=cc+1
print("\033[1;{}m".format(cc),end="")
time.sleep(s)
x.C()
elif (n[f] == "D" or n[f] == "d"):
if(dd==0):
dd=random.choice(list)
dd=dd+1
print("\033[1;{}m".format(dd),end="")
time.sleep(s)
x.D()
elif (n[f] == "E" or n[f] == "e"):
if(ee==0):
ee=random.choice(list)
ee=ee+1
print("\033[1;{}m".format(ee),end="")
time.sleep(s)
x.E()
elif (n[f] == "F" or n[f] == "f"):
if(ff==0):
ff=random.choice(list)
ff=ff+1
print("\033[1;{}m".format(ff),end="")
time.sleep(s)
x.F()
elif (n[f] == "G" or n[f] == "g"):
if(gg==0):
gg=random.choice(list)
gg=gg+1
print("\033[1;{}m".format(gg),end="")
time.sleep(s)
x.G()
elif (n[f] == "H" or n[f] == "h"):
if(hh==0):
hh=random.choice(list)
hh=hh+1
print("\033[1;{}m".format(hh),end="")
time.sleep(s)
x.H()
elif (n[f] == "I" or n[f] == "i"):
if(ii==0):
ii=random.choice(list)
ii=ii+1
print("\033[1;{}m".format(ii),end="")
time.sleep(s)
x.I()
elif (n[f] == "J" or n[f] == "j"):
if(jj==0):
jj=random.choice(list)
jj=jj+1
print("\033[1;{}m".format(jj),end="")
time.sleep(s)
x.J()
elif (n[f] == "K" or n[f] == "k"):
if(kk==0):
kk=random.choice(list)
kk=kk+1
print("\033[1;{}m".format(kk),end="")
time.sleep(s)
x.K()
elif (n[f] == "L" or n[f] == "l"):
if(ll==0):
ll=random.choice(list)
ll=ll+1
print("\033[1;{}m".format(ll),end="")
time.sleep(s)
x.L()
elif (n[f] == "m" or n[f] == "M"):
if(mm==0):
mm=random.choice(list)
mm=mm+1
print("\033[1;{}m".format(mm),end="")
time.sleep(s)
x.M()
elif (n[f] == "N" or n[f] == "n"):
if(nn==0):
nn=random.choice(list)
nn=nn+1
print("\033[1;{}m".format(nn),end="")
time.sleep(s)
x.N()
elif (n[f] == "O" or n[f] == "o"):
if(oo==0):
oo=random.choice(list)
oo=oo+1
print("\033[1;{}m".format(oo),end="")
time.sleep(s)
x.O()
elif (n[f] == "P" or n[f] == "p"):
if(pp==0):
pp=random.choice(list)
pp=pp+1
print("\033[1;{}m".format(pp),end="")
time.sleep(s)
x.P()
elif (n[f] == "q" or n[f] == "Q"):
if(qq==0):
qq=random.choice(list)
qq=qq+1
print("\033[1;{}m".format(qq),end="")
time.sleep(s)
x.Q()
elif (n[f] == "R" or n[f] == "r"):
if(rr==0):
rr=random.choice(list)
rr=rr+1
print("\033[1;{}m".format(rr),end="")
time.sleep(s)
x.R()
elif (n[f] == "S" or n[f] == "s"):
if(ss==0):
ss=random.choice(list)
ss=ss+1
print("\033[1;{}m".format(ss),end="")
time.sleep(s)
x.S()
elif (n[f] == "T" or n[f] == "t"):
if(tt==0):
tt=random.choice(list)
tt=tt+1
print("\033[1;{}m".format(tt),end="")
time.sleep(s)
x.T()
elif (n[f] == "U" or n[f] == "u"):
if(uu==0):
uu=random.choice(list)
uu=uu+1
print("\033[1;{}m".format(uu),end="")
time.sleep(s)
x.U()
elif (n[f] == "V" or n[f] == "v"):
if(vv==0):
vv=random.choice(list)
vv=vv+1
print("\033[1;{}m".format(vv),end="")
time.sleep(s)
x.V()
elif (n[f] == "W" or n[f] == "w"):
if(ww==0):
ww=random.choice(list)
ww=ww+1
print("\033[1;{}m".format(ww),end="")
time.sleep(s)
x.W()
elif (n[f] == "X" or n[f] == "x"):
if(xx==0):
xx=random.choice(list)
xx=xx+1
print("\033[1;{}m".format(xx),end="")
time.sleep(s)
x.X()
elif (n[f] == "Y" or n[f] == "y"):
if(yy==0):
yy=random.choice(list)
yy=yy+1
print("\033[1;{}m".format(yy),end="")
time.sleep(s)
x.Y()
elif (n[f] == "Z" or n[f] == "z"):
if(zz==0):
zz=random.choice(list)
zz=zz+1
print("\033[1;{}m".format(zz),end="")
time.sleep(s)
x.Z()
elif(n[f]==" "):
x.loop()
x.loop()
print()
time.sleep(6)
print("\n"*8)
print('THANK YOU ', end='', flush=True)
for x in range(8):
for frame in r'-\|/-\|/':
print('\b', frame, sep='', end='', flush=True)
time.sleep(0.2)
print('\b ')
thread.join()
| 26.322009 | 129 | 0.306285 |
599abd70ab2405fa33e84f2920872f4103dff83c | 273 | py | Python | tests/conftest.py | eddyvdaker/FlaskSimpleStarter | 4992492ac1788d80e5914188f994b3e0ed1e75f4 | [
"MIT"
] | null | null | null | tests/conftest.py | eddyvdaker/FlaskSimpleStarter | 4992492ac1788d80e5914188f994b3e0ed1e75f4 | [
"MIT"
] | null | null | null | tests/conftest.py | eddyvdaker/FlaskSimpleStarter | 4992492ac1788d80e5914188f994b3e0ed1e75f4 | [
"MIT"
] | null | null | null | import pytest
from src.app import create_app
| 12.409091 | 32 | 0.6337 |
599c63fc42e3f63659183c30e8778ab397e4a872 | 2,533 | py | Python | amd64-linux/lib/pmon.py | qiyancos/Simics-3.0.31 | 9bd52d5abad023ee87a37306382a338abf7885f1 | [
"BSD-4-Clause",
"FSFAP"
] | 1 | 2020-06-15T10:41:18.000Z | 2020-06-15T10:41:18.000Z | amd64-linux/lib/pmon.py | qiyancos/Simics-3.0.31 | 9bd52d5abad023ee87a37306382a338abf7885f1 | [
"BSD-4-Clause",
"FSFAP"
] | null | null | null | amd64-linux/lib/pmon.py | qiyancos/Simics-3.0.31 | 9bd52d5abad023ee87a37306382a338abf7885f1 | [
"BSD-4-Clause",
"FSFAP"
] | 3 | 2020-08-10T10:25:02.000Z | 2021-09-12T01:12:09.000Z | # This file implements the PMON firmware's LEON2 boot setup. It does not
# implement the serial port boot loading, only the initial setup.
# The PMON firmware for the LEON2 comes with a number of preprocessor defines
# that the user typically changes to match the hardware configuration.
# The PMON emulation function takes all these parameters as function arguments,
# with the exception of the clock frequency, that is picked from the cpu.
import conf
from sim_core import *
| 51.693878 | 83 | 0.70075 |
599d3203f355bf0108b50dc6b8026b093b4736fc | 395 | py | Python | scripts/test_web3.py | AeneasHe/eth-brownie-enhance | e53995924ffb93239b9fab6c1c1a07e9166dd1c6 | [
"MIT"
] | 1 | 2021-10-04T23:34:14.000Z | 2021-10-04T23:34:14.000Z | scripts/test_web3.py | AeneasHe/eth-brownie-enhance | e53995924ffb93239b9fab6c1c1a07e9166dd1c6 | [
"MIT"
] | null | null | null | scripts/test_web3.py | AeneasHe/eth-brownie-enhance | e53995924ffb93239b9fab6c1c1a07e9166dd1c6 | [
"MIT"
] | null | null | null | import wpath
from web3 import Web3
from web3 import Web3, HTTPProvider, IPCProvider, WebsocketProvider
w3 = get_web3_by_http_rpc()
eth = w3.eth
r = eth.getBalance("0x3d32aA995FdD334c671C2d276345DE6fe2F46D88")
print(r)
| 18.809524 | 67 | 0.721519 |
599f0418376070df049179da7c8e1b8f17a142f2 | 834 | py | Python | models/sklearn_model.py | Ailln/stock-prediction | 9de77de5047446ffceeed83cb610c7edd2cb1ad3 | [
"MIT"
] | 11 | 2020-07-11T06:14:29.000Z | 2021-12-02T08:48:53.000Z | models/sklearn_model.py | HaveTwoBrush/stock-prediction | 9de77de5047446ffceeed83cb610c7edd2cb1ad3 | [
"MIT"
] | null | null | null | models/sklearn_model.py | HaveTwoBrush/stock-prediction | 9de77de5047446ffceeed83cb610c7edd2cb1ad3 | [
"MIT"
] | 8 | 2020-04-15T14:29:47.000Z | 2021-12-19T09:26:53.000Z | from sklearn import svm
from sklearn import ensemble
from sklearn import linear_model
| 37.909091 | 77 | 0.681055 |
59a09df4f04358386749f3598f84da0352793936 | 189 | py | Python | venv/Lib/site-packages/shiboken2/_config.py | gabistoian/Hide-Text-in-image | 88b5ef0bd2bcb0e222cfbc7abf6ac2b869f72ec5 | [
"X11"
] | null | null | null | venv/Lib/site-packages/shiboken2/_config.py | gabistoian/Hide-Text-in-image | 88b5ef0bd2bcb0e222cfbc7abf6ac2b869f72ec5 | [
"X11"
] | null | null | null | venv/Lib/site-packages/shiboken2/_config.py | gabistoian/Hide-Text-in-image | 88b5ef0bd2bcb0e222cfbc7abf6ac2b869f72ec5 | [
"X11"
] | null | null | null | shiboken_library_soversion = str(5.15)
version = "5.15.2.1"
version_info = (5, 15, 2.1, "", "")
__build_date__ = '2022-01-07T13:13:47+00:00'
__setup_py_package_version__ = '5.15.2.1'
| 15.75 | 44 | 0.671958 |
59a0a3b7aa59f29b5ba0e35ea23ff02112e179f9 | 1,023 | py | Python | 00Python/day05/basic02.py | HaoZhang95/PythonAndMachineLearning | b897224b8a0e6a5734f408df8c24846a98c553bf | [
"MIT"
] | 937 | 2019-05-08T08:46:25.000Z | 2022-03-31T12:56:07.000Z | 00Python/day05/basic02.py | Sakura-gh/Python24 | b97e18867264a0647d5645c7d757a0040e755577 | [
"MIT"
] | 47 | 2019-09-17T10:06:02.000Z | 2022-03-11T23:46:52.000Z | 00Python/day05/basic02.py | Sakura-gh/Python24 | b97e18867264a0647d5645c7d757a0040e755577 | [
"MIT"
] | 354 | 2019-05-10T02:15:26.000Z | 2022-03-30T05:52:57.000Z | """
list
sort()
reversed(list) list
"""
import random
a_list = []
for i in range(10):
a_list.append(random.randint(0, 200))
print(a_list)
a_list.sort()
print(a_list)
a_list.sort(reverse=True) #
print(a_list)
new_list = reversed(a_list) # [12,10,7,9] -> [9,7,10,12]
print(new_list)
"""
"""
school = [[], [], []]
teacher_list = list("ABCDEFGH")
for name in teacher_list:
index = random.randint(0,2)
school[index].append(name)
print(school)
"""
"", '', """"""
list[]
(),
tuple
"""
a_tuple = (1, 3.14, "Hello", True)
empty_tuple = ()
empty_tuple2 = tuple()
#
b_tuple = (1) # type = int
c_tuple = (1,) # type = tuple
"""
tuple
listcount index
"""
print(a_tuple[2])
# a_tuple[1] = "" tuple
print(a_tuple.count(1)) # 12 Ture1
print(a_tuple.index(3.14))
| 18.267857 | 60 | 0.641251 |
59a69dfbb3f7dfb97929bbbc436b9c105fe9fa48 | 1,643 | py | Python | ThreeBotPackages/unlock_service/scripts/restore.py | threefoldfoundation/tft-stellar | b36460e8dba547923778273b53fe4f0e06996db0 | [
"Apache-2.0"
] | 7 | 2020-02-05T16:10:46.000Z | 2021-04-28T10:39:20.000Z | ThreeBotPackages/unlock_service/scripts/restore.py | threefoldfoundation/tft-stellar | b36460e8dba547923778273b53fe4f0e06996db0 | [
"Apache-2.0"
] | 379 | 2020-01-13T10:22:21.000Z | 2022-03-23T08:59:57.000Z | ThreeBotPackages/unlock_service/scripts/restore.py | threefoldfoundation/tft-stellar | b36460e8dba547923778273b53fe4f0e06996db0 | [
"Apache-2.0"
] | 3 | 2020-01-24T09:56:44.000Z | 2020-08-03T21:02:38.000Z | #!/usr/bin/env python
# pylint: disable=no-value-for-parameter
import click
import os
import sys
import requests
import json
UNLOCK_SERVICE_DEFAULT_HOSTS = {"test": "https://testnet.threefold.io", "public": "https://tokenservices.threefold.io"}
if __name__ == "__main__":
import_unlockhash_transaction_data()
| 37.340909 | 120 | 0.684114 |
59a7951eb259bc0943a926370fa409960f8cba7c | 4,984 | py | Python | pgdiff/diff/PgDiffConstraints.py | Onapsis/pgdiff | ee9f618bc339cbfaf7967103e95f9650273550f8 | [
"MIT"
] | 2 | 2020-05-11T16:42:48.000Z | 2020-08-27T04:11:49.000Z | diff/PgDiffConstraints.py | Gesha3809/PgDiffPy | 00466429d0385eb999c32addcbe6e2746782cb5d | [
"MIT"
] | 1 | 2018-04-11T18:19:33.000Z | 2018-04-13T15:18:40.000Z | diff/PgDiffConstraints.py | Gesha3809/PgDiffPy | 00466429d0385eb999c32addcbe6e2746782cb5d | [
"MIT"
] | 1 | 2018-04-11T15:09:22.000Z | 2018-04-11T15:09:22.000Z | from PgDiffUtils import PgDiffUtils | 41.190083 | 99 | 0.58427 |
59a8688939bcf65bd9fa72756ce61831127d2530 | 7,715 | py | Python | experiments/old_code/result_scripts.py | hytsang/cs-ranking | 241626a6a100a27b96990b4f199087a6dc50dcc0 | [
"Apache-2.0"
] | null | null | null | experiments/old_code/result_scripts.py | hytsang/cs-ranking | 241626a6a100a27b96990b4f199087a6dc50dcc0 | [
"Apache-2.0"
] | null | null | null | experiments/old_code/result_scripts.py | hytsang/cs-ranking | 241626a6a100a27b96990b4f199087a6dc50dcc0 | [
"Apache-2.0"
] | 1 | 2018-10-30T08:57:14.000Z | 2018-10-30T08:57:14.000Z | import inspect
import logging
import os
from itertools import product
import numpy as np
import pandas as pd
from skopt import load, dump
from csrank.constants import OBJECT_RANKING
from csrank.util import files_with_same_name, create_dir_recursively, rename_file_if_exist
from experiments.util import dataset_options_dict, rankers_dict, lp_metric_dict
DIR_NAME = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
if __name__ == '__main__':
configure_logging()
dataset_options = dataset_options_dict[OBJECT_RANKING]
ranker_options = rankers_dict[OBJECT_RANKING]
metric_names = list(lp_metric_dict[OBJECT_RANKING].keys())
remove_redundant_results()
create_concise_results()
# create_concise_results(result_directory='logs_new_experiments', directory='logs_new_experiments')
| 43.835227 | 119 | 0.608425 |
59a98cedbef2ddabf9e787d32a317a09b1db8b5e | 13,108 | py | Python | notochord/features/BagOfWords.py | jroose/notochord | da9a6ff5d0fabbf0694d0bee1b81a240b66fa006 | [
"MIT"
] | null | null | null | notochord/features/BagOfWords.py | jroose/notochord | da9a6ff5d0fabbf0694d0bee1b81a240b66fa006 | [
"MIT"
] | null | null | null | notochord/features/BagOfWords.py | jroose/notochord | da9a6ff5d0fabbf0694d0bee1b81a240b66fa006 | [
"MIT"
] | null | null | null | from .. import schema, App, QueryCache, batcher, grouper, insert_ignore, export, lookup, persist, lookup_or_persist, ABCArgumentGroup, WorkOrderArgs, filter_widgets, temptable_scope, FeatureCache
from ..ObjectStore import ABCObjectStore
from sqlalchemy import Column, Integer, String, Float, ForeignKey, UnicodeText, Unicode, LargeBinary, Boolean, Index
import collections
import csv
import os
import re
import sqlalchemy
import sys
import tempfile
import time
import stat
from sklearn.feature_extraction.text import CountVectorizer
re_word = re.compile(r'[a-zA-Z]+')
__all__ = []
if __name__ == "__main__":
A = BagOfWords.from_args(sys.argv[1:])
A.run()
| 51.403922 | 251 | 0.580333 |
59ac1cf688342acfde23c07e10ca2e33caf1f078 | 450 | py | Python | trains/ATIO.py | Columbine21/TFR-Net | 1da01577542e7f477fdf7323ec0696aebc632357 | [
"MIT"
] | 7 | 2021-11-19T01:32:01.000Z | 2021-12-16T11:42:44.000Z | trains/ATIO.py | Columbine21/TFR-Net | 1da01577542e7f477fdf7323ec0696aebc632357 | [
"MIT"
] | 2 | 2021-11-25T08:28:08.000Z | 2021-12-29T08:42:55.000Z | trains/ATIO.py | Columbine21/TFR-Net | 1da01577542e7f477fdf7323ec0696aebc632357 | [
"MIT"
] | 1 | 2021-12-02T09:42:51.000Z | 2021-12-02T09:42:51.000Z | """
AIO -- All Trains in One
"""
from trains.baselines import *
from trains.missingTask import *
__all__ = ['ATIO']
| 19.565217 | 59 | 0.52 |
59ac4ecc150b88338555999e74b36af7366e76c2 | 271 | py | Python | method/boardInfo.py | gary920209/LightDance-RPi | 41d3ef536f3874fd5dbe092f5c9be42f7204427d | [
"MIT"
] | 2 | 2020-11-14T17:13:55.000Z | 2020-11-14T17:42:39.000Z | method/boardInfo.py | gary920209/LightDance-RPi | 41d3ef536f3874fd5dbe092f5c9be42f7204427d | [
"MIT"
] | null | null | null | method/boardInfo.py | gary920209/LightDance-RPi | 41d3ef536f3874fd5dbe092f5c9be42f7204427d | [
"MIT"
] | null | null | null | import os
from .baseMethod import BaseMethod
# BoardInfo
| 19.357143 | 78 | 0.553506 |
59ad06dd6ba9abadeea6a1f889a37f3edb2cafd7 | 4,928 | py | Python | split_data.py | Anchorboy/PR_FinalProject | e744723c9c9dd55e6995ae5929eb45f90c70819b | [
"MIT"
] | null | null | null | split_data.py | Anchorboy/PR_FinalProject | e744723c9c9dd55e6995ae5929eb45f90c70819b | [
"MIT"
] | null | null | null | split_data.py | Anchorboy/PR_FinalProject | e744723c9c9dd55e6995ae5929eb45f90c70819b | [
"MIT"
] | null | null | null | import os
import cv2
import random
import shutil
import numpy as np
if __name__ == "__main__":
current_base = os.path.abspath('.')
input_base = os.path.join(current_base, 'data')
split_img(input_base)
# train_all, test_all = read_data()
# print train_all | 36.503704 | 126 | 0.631494 |
59adc6e4725be00b3a4565680e9bf5a9aec1470e | 2,507 | py | Python | src/eval_command.py | luoyan407/n-reference | f486b639dc824d296fe0e5ab7a4959e2aef7504c | [
"MIT"
] | 7 | 2020-07-14T02:50:13.000Z | 2021-05-11T05:50:51.000Z | src/eval_command.py | luoyan407/n-reference | f486b639dc824d296fe0e5ab7a4959e2aef7504c | [
"MIT"
] | 1 | 2020-12-29T07:25:00.000Z | 2021-01-05T01:15:47.000Z | src/eval_command.py | luoyan407/n-reference | f486b639dc824d296fe0e5ab7a4959e2aef7504c | [
"MIT"
] | 3 | 2021-02-25T13:58:01.000Z | 2021-08-10T05:49:27.000Z | import os, sys
srcFolder = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'src')
sys.path.append(srcFolder)
from metrics import nss
from metrics import auc
from metrics import cc
from utils import *
import numpy as np
import argparse
parser = argparse.ArgumentParser(description='Evaluate predicted saliency map')
parser.add_argument('--output', type=str, default='')
parser.add_argument('--fixation_folder', type=str, default='')
parser.add_argument('--salmap_folder', type=str, default='')
parser.add_argument('--split_file', type=str, default='')
parser.add_argument('--fxt_loc_name', type=str, default='fixationPts')
parser.add_argument('--fxt_size', type=str, default='',
help='fixation resolution: (600, 800) | (480, 640) | (320, 640)')
parser.add_argument('--appendix', type=str, default='')
parser.add_argument('--file_extension', type=str, default='jpg')
args = parser.parse_args()
if args.fxt_size != '':
spl_tokens = args.fxt_size.split()
args.fxt_size = (int(spl_tokens[0]), int(spl_tokens[1]))
else:
args.fxt_size = (480, 640)
fixation_folder = args.fixation_folder
salmap_folder = args.salmap_folder
fxtimg_type = detect_images_type(fixation_folder)
split_file = args.split_file
if split_file != '' and os.path.isfile(split_file):
npzfile = np.load(split_file)
salmap_names = [os.path.join(salmap_folder, x) for x in npzfile['val_imgs']]
gtsal_names = [os.path.join(fixation_folder, x[:x.find('.')+1]+fxtimg_type) for x in npzfile['val_imgs']]
fxtpts_names = [os.path.join(fixation_folder, '{}mat'.format(x[:x.find('.')+1])) for x in npzfile['val_imgs']]
else:
salmap_names = load_allimages_list(salmap_folder)
gtsal_names = []
fxtpts_names = []
for sn in salmap_names:
file_name = sn.split('/')[-1]
gtsal_names.append(os.path.join(fixation_folder,'{}{}'.format(file_name[:file_name.find('.')+1], fxtimg_type)))
fxtpts_names.append(os.path.join(fixation_folder,'{}mat'.format(file_name[:file_name.find('.')+1])))
nss_score, _ = nss.compute_score(salmap_names, fxtpts_names, image_size=args.fxt_size, fxt_field_in_mat=args.fxt_loc_name)
cc_score, _ = cc.compute_score(salmap_names, gtsal_names, image_size=args.fxt_size)
auc_score, _ = auc.compute_score(salmap_names, fxtpts_names, image_size=args.fxt_size, fxt_field_in_mat=args.fxt_loc_name)
with open(args.output, 'a') as f:
f.write('{:0.4f}, {:0.4f}, {:0.4f}{}\n'.format(
nss_score, auc_score, cc_score, args.appendix)) | 45.581818 | 122 | 0.717591 |
59aea2d28a91ba70d32d02acede77adbfb29d245 | 482 | py | Python | hieroskopia/utils/evaluator.py | AlbCM/hieroskopia | 59ab7c9c4bb9315b84cd3b184dfc82c3d565e556 | [
"MIT"
] | null | null | null | hieroskopia/utils/evaluator.py | AlbCM/hieroskopia | 59ab7c9c4bb9315b84cd3b184dfc82c3d565e556 | [
"MIT"
] | null | null | null | hieroskopia/utils/evaluator.py | AlbCM/hieroskopia | 59ab7c9c4bb9315b84cd3b184dfc82c3d565e556 | [
"MIT"
] | null | null | null | from pandas import Series
| 30.125 | 90 | 0.690871 |
59af05716663597c09c673680d272fcbf76c4851 | 294 | py | Python | Graficos/grafico_barras.py | brendacgoncalves97/Graficos | 250715bf8a0be9b9d39116be396d84512c79d45f | [
"MIT"
] | 1 | 2021-07-14T13:33:02.000Z | 2021-07-14T13:33:02.000Z | Graficos/grafico_barras.py | brendacgoncalves97/Graficos | 250715bf8a0be9b9d39116be396d84512c79d45f | [
"MIT"
] | null | null | null | Graficos/grafico_barras.py | brendacgoncalves97/Graficos | 250715bf8a0be9b9d39116be396d84512c79d45f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Importao da biblioteca
import matplotlib.pyplot as plt
x = [1, 2, 3, 4, 5]
y = [2, 3, 7, 1, 0]
titulo = "Grfico de barras"
eixoX = "EixoX"
eixoY = "EixoY"
# Legendas
plt.title(titulo)
plt.xlabel(eixoX)
plt.ylabel(eixoY)
plt.bar(x, y)
plt.show() | 16.333333 | 32 | 0.602041 |
59afd173c9893de34534a54b0f3445d6fe88b945 | 7,189 | py | Python | fonts/Org_01.py | cnobile2012/Python-TFT | 812a87e6f694eae338c3d9579ea98eae636f8f99 | [
"MIT"
] | null | null | null | fonts/Org_01.py | cnobile2012/Python-TFT | 812a87e6f694eae338c3d9579ea98eae636f8f99 | [
"MIT"
] | null | null | null | fonts/Org_01.py | cnobile2012/Python-TFT | 812a87e6f694eae338c3d9579ea98eae636f8f99 | [
"MIT"
] | null | null | null | # Org_v01 by Orgdot (www.orgdot.com/aliasfonts). A tiny,
# stylized font with all characters within a 6 pixel height.
Org_01Bitmaps = [
0xE8, 0xA0, 0x57, 0xD5, 0xF5, 0x00, 0xFD, 0x3E, 0x5F, 0x80, 0x88, 0x88,
0x88, 0x80, 0xF4, 0xBF, 0x2E, 0x80, 0x80, 0x6A, 0x40, 0x95, 0x80, 0xAA,
0x80, 0x5D, 0x00, 0xC0, 0xF0, 0x80, 0x08, 0x88, 0x88, 0x00, 0xFC, 0x63,
0x1F, 0x80, 0xF8, 0xF8, 0x7F, 0x0F, 0x80, 0xF8, 0x7E, 0x1F, 0x80, 0x8C,
0x7E, 0x10, 0x80, 0xFC, 0x3E, 0x1F, 0x80, 0xFC, 0x3F, 0x1F, 0x80, 0xF8,
0x42, 0x10, 0x80, 0xFC, 0x7F, 0x1F, 0x80, 0xFC, 0x7E, 0x1F, 0x80, 0x90,
0xB0, 0x2A, 0x22, 0xF0, 0xF0, 0x88, 0xA8, 0xF8, 0x4E, 0x02, 0x00, 0xFD,
0x6F, 0x0F, 0x80, 0xFC, 0x7F, 0x18, 0x80, 0xF4, 0x7D, 0x1F, 0x00, 0xFC,
0x21, 0x0F, 0x80, 0xF4, 0x63, 0x1F, 0x00, 0xFC, 0x3F, 0x0F, 0x80, 0xFC,
0x3F, 0x08, 0x00, 0xFC, 0x2F, 0x1F, 0x80, 0x8C, 0x7F, 0x18, 0x80, 0xF9,
0x08, 0x4F, 0x80, 0x78, 0x85, 0x2F, 0x80, 0x8D, 0xB1, 0x68, 0x80, 0x84,
0x21, 0x0F, 0x80, 0xFD, 0x6B, 0x5A, 0x80, 0xFC, 0x63, 0x18, 0x80, 0xFC,
0x63, 0x1F, 0x80, 0xFC, 0x7F, 0x08, 0x00, 0xFC, 0x63, 0x3F, 0x80, 0xFC,
0x7F, 0x29, 0x00, 0xFC, 0x3E, 0x1F, 0x80, 0xF9, 0x08, 0x42, 0x00, 0x8C,
0x63, 0x1F, 0x80, 0x8C, 0x62, 0xA2, 0x00, 0xAD, 0x6B, 0x5F, 0x80, 0x8A,
0x88, 0xA8, 0x80, 0x8C, 0x54, 0x42, 0x00, 0xF8, 0x7F, 0x0F, 0x80, 0xEA,
0xC0, 0x82, 0x08, 0x20, 0x80, 0xD5, 0xC0, 0x54, 0xF8, 0x80, 0xF1, 0xFF,
0x8F, 0x99, 0xF0, 0xF8, 0x8F, 0x1F, 0x99, 0xF0, 0xFF, 0x8F, 0x6B, 0xA4,
0xF9, 0x9F, 0x10, 0x8F, 0x99, 0x90, 0xF0, 0x55, 0xC0, 0x8A, 0xF9, 0x90,
0xF8, 0xFD, 0x63, 0x10, 0xF9, 0x99, 0xF9, 0x9F, 0xF9, 0x9F, 0x80, 0xF9,
0x9F, 0x20, 0xF8, 0x88, 0x47, 0x1F, 0x27, 0xC8, 0x42, 0x00, 0x99, 0x9F,
0x99, 0x97, 0x8C, 0x6B, 0xF0, 0x96, 0x69, 0x99, 0x9F, 0x10, 0x2E, 0x8F,
0x2B, 0x22, 0xF8, 0x89, 0xA8, 0x0F, 0xE0 ]
Org_01Glyphs = [
[ 0, 0, 0, 6, 0, 1 ], # 0x20 ' '
[ 0, 1, 5, 2, 0, -4 ], # 0x21 '!'
[ 1, 3, 1, 4, 0, -4 ], # 0x22 '"'
[ 2, 5, 5, 6, 0, -4 ], # 0x23 '#'
[ 6, 5, 5, 6, 0, -4 ], # 0x24 '$'
[ 10, 5, 5, 6, 0, -4 ], # 0x25 '%'
[ 14, 5, 5, 6, 0, -4 ], # 0x26 '&'
[ 18, 1, 1, 2, 0, -4 ], # 0x27 '''
[ 19, 2, 5, 3, 0, -4 ], # 0x28 '('
[ 21, 2, 5, 3, 0, -4 ], # 0x29 ')'
[ 23, 3, 3, 4, 0, -3 ], # 0x2A '#'
[ 25, 3, 3, 4, 0, -3 ], # 0x2B '+'
[ 27, 1, 2, 2, 0, 0 ], # 0x2C ','
[ 28, 4, 1, 5, 0, -2 ], # 0x2D '-'
[ 29, 1, 1, 2, 0, 0 ], # 0x2E '.'
[ 30, 5, 5, 6, 0, -4 ], # 0x2F '/'
[ 34, 5, 5, 6, 0, -4 ], # 0x30 '0'
[ 38, 1, 5, 2, 0, -4 ], # 0x31 '1'
[ 39, 5, 5, 6, 0, -4 ], # 0x32 '2'
[ 43, 5, 5, 6, 0, -4 ], # 0x33 '3'
[ 47, 5, 5, 6, 0, -4 ], # 0x34 '4'
[ 51, 5, 5, 6, 0, -4 ], # 0x35 '5'
[ 55, 5, 5, 6, 0, -4 ], # 0x36 '6'
[ 59, 5, 5, 6, 0, -4 ], # 0x37 '7'
[ 63, 5, 5, 6, 0, -4 ], # 0x38 '8'
[ 67, 5, 5, 6, 0, -4 ], # 0x39 '9'
[ 71, 1, 4, 2, 0, -3 ], # 0x3A ':'
[ 72, 1, 4, 2, 0, -3 ], # 0x3B ''
[ 73, 3, 5, 4, 0, -4 ], # 0x3C '<'
[ 75, 4, 3, 5, 0, -3 ], # 0x3D '='
[ 77, 3, 5, 4, 0, -4 ], # 0x3E '>'
[ 79, 5, 5, 6, 0, -4 ], # 0x3F '?'
[ 83, 5, 5, 6, 0, -4 ], # 0x40 '@'
[ 87, 5, 5, 6, 0, -4 ], # 0x41 'A'
[ 91, 5, 5, 6, 0, -4 ], # 0x42 'B'
[ 95, 5, 5, 6, 0, -4 ], # 0x43 'C'
[ 99, 5, 5, 6, 0, -4 ], # 0x44 'D'
[ 103, 5, 5, 6, 0, -4 ], # 0x45 'E'
[ 107, 5, 5, 6, 0, -4 ], # 0x46 'F'
[ 111, 5, 5, 6, 0, -4 ], # 0x47 'G'
[ 115, 5, 5, 6, 0, -4 ], # 0x48 'H'
[ 119, 5, 5, 6, 0, -4 ], # 0x49 'I'
[ 123, 5, 5, 6, 0, -4 ], # 0x4A 'J'
[ 127, 5, 5, 6, 0, -4 ], # 0x4B 'K'
[ 131, 5, 5, 6, 0, -4 ], # 0x4C 'L'
[ 135, 5, 5, 6, 0, -4 ], # 0x4D 'M'
[ 139, 5, 5, 6, 0, -4 ], # 0x4E 'N'
[ 143, 5, 5, 6, 0, -4 ], # 0x4F 'O'
[ 147, 5, 5, 6, 0, -4 ], # 0x50 'P'
[ 151, 5, 5, 6, 0, -4 ], # 0x51 'Q'
[ 155, 5, 5, 6, 0, -4 ], # 0x52 'R'
[ 159, 5, 5, 6, 0, -4 ], # 0x53 'S'
[ 163, 5, 5, 6, 0, -4 ], # 0x54 'T'
[ 167, 5, 5, 6, 0, -4 ], # 0x55 'U'
[ 171, 5, 5, 6, 0, -4 ], # 0x56 'V'
[ 175, 5, 5, 6, 0, -4 ], # 0x57 'W'
[ 179, 5, 5, 6, 0, -4 ], # 0x58 'X'
[ 183, 5, 5, 6, 0, -4 ], # 0x59 'Y'
[ 187, 5, 5, 6, 0, -4 ], # 0x5A 'Z'
[ 191, 2, 5, 3, 0, -4 ], # 0x5B '['
[ 193, 5, 5, 6, 0, -4 ], # 0x5C '\'
[ 197, 2, 5, 3, 0, -4 ], # 0x5D ']'
[ 199, 3, 2, 4, 0, -4 ], # 0x5E '^'
[ 200, 5, 1, 6, 0, 1 ], # 0x5F '_'
[ 201, 1, 1, 2, 0, -4 ], # 0x60 '`'
[ 202, 4, 4, 5, 0, -3 ], # 0x61 'a'
[ 204, 4, 5, 5, 0, -4 ], # 0x62 'b'
[ 207, 4, 4, 5, 0, -3 ], # 0x63 'c'
[ 209, 4, 5, 5, 0, -4 ], # 0x64 'd'
[ 212, 4, 4, 5, 0, -3 ], # 0x65 'e'
[ 214, 3, 5, 4, 0, -4 ], # 0x66 'f'
[ 216, 4, 5, 5, 0, -3 ], # 0x67 'g'
[ 219, 4, 5, 5, 0, -4 ], # 0x68 'h'
[ 222, 1, 4, 2, 0, -3 ], # 0x69 'i'
[ 223, 2, 5, 3, 0, -3 ], # 0x6A 'j'
[ 225, 4, 5, 5, 0, -4 ], # 0x6B 'k'
[ 228, 1, 5, 2, 0, -4 ], # 0x6C 'l'
[ 229, 5, 4, 6, 0, -3 ], # 0x6D 'm'
[ 232, 4, 4, 5, 0, -3 ], # 0x6E 'n'
[ 234, 4, 4, 5, 0, -3 ], # 0x6F 'o'
[ 236, 4, 5, 5, 0, -3 ], # 0x70 'p'
[ 239, 4, 5, 5, 0, -3 ], # 0x71 'q'
[ 242, 4, 4, 5, 0, -3 ], # 0x72 'r'
[ 244, 4, 4, 5, 0, -3 ], # 0x73 's'
[ 246, 5, 5, 6, 0, -4 ], # 0x74 't'
[ 250, 4, 4, 5, 0, -3 ], # 0x75 'u'
[ 252, 4, 4, 5, 0, -3 ], # 0x76 'v'
[ 254, 5, 4, 6, 0, -3 ], # 0x77 'w'
[ 257, 4, 4, 5, 0, -3 ], # 0x78 'x'
[ 259, 4, 5, 5, 0, -3 ], # 0x79 'y'
[ 262, 4, 4, 5, 0, -3 ], # 0x7A 'z'
[ 264, 3, 5, 4, 0, -4 ], # 0x7B '['
[ 266, 1, 5, 2, 0, -4 ], # 0x7C '|'
[ 267, 3, 5, 4, 0, -4 ], # 0x7D ']'
[ 269, 5, 3, 6, 0, -3 ] ] # 0x7E '~'
Org_01 = [
Org_01Bitmaps,
Org_01Glyphs,
0x20, 0x7E, 7 ]
# Approx. 943 bytes
| 54.462121 | 75 | 0.335791 |
59b00b4f37a6f1b8e5b3f8e0512fea304aa3d6eb | 411 | py | Python | vaas-app/src/vaas/manager/migrations/0005_director_service_mesh_label.py | allegro/vaas | 3d2d1f1a9dae6ac69a13563a37f9bfdf4f986ae2 | [
"Apache-2.0"
] | 251 | 2015-09-02T10:50:51.000Z | 2022-03-16T08:00:35.000Z | vaas-app/src/vaas/manager/migrations/0005_director_service_mesh_label.py | allegro/vaas | 3d2d1f1a9dae6ac69a13563a37f9bfdf4f986ae2 | [
"Apache-2.0"
] | 154 | 2015-09-02T14:54:08.000Z | 2022-03-16T08:34:17.000Z | vaas-app/src/vaas/manager/migrations/0005_director_service_mesh_label.py | allegro/vaas | 3d2d1f1a9dae6ac69a13563a37f9bfdf4f986ae2 | [
"Apache-2.0"
] | 31 | 2015-09-03T07:51:05.000Z | 2020-09-24T09:02:40.000Z | # Generated by Django 3.1.8 on 2021-05-26 11:09
from django.db import migrations, models
| 21.631579 | 63 | 0.610706 |
59b0bbb7000e474ae515947910be3e63863f01d7 | 1,053 | py | Python | api/resources_portal/models/material_share_event.py | arielsvn/resources-portal | f5a25935e45ceb05e2f4738f567eec9ca8793441 | [
"BSD-3-Clause"
] | null | null | null | api/resources_portal/models/material_share_event.py | arielsvn/resources-portal | f5a25935e45ceb05e2f4738f567eec9ca8793441 | [
"BSD-3-Clause"
] | null | null | null | api/resources_portal/models/material_share_event.py | arielsvn/resources-portal | f5a25935e45ceb05e2f4738f567eec9ca8793441 | [
"BSD-3-Clause"
] | null | null | null | from django.db import models
from resources_portal.models.material import Material
from resources_portal.models.user import User
| 25.682927 | 96 | 0.687559 |
59b0fd13274223a0798e641585901c741c9e0720 | 1,941 | py | Python | datasets/nhse_stats/topics/archived_flu.py | nhsengland/publish-o-matic | dc8f16cb83a2360989afa44d887e63b5cde6af29 | [
"MIT"
] | null | null | null | datasets/nhse_stats/topics/archived_flu.py | nhsengland/publish-o-matic | dc8f16cb83a2360989afa44d887e63b5cde6af29 | [
"MIT"
] | 11 | 2015-03-02T16:30:20.000Z | 2016-11-29T12:16:15.000Z | datasets/nhse_stats/topics/archived_flu.py | nhsengland/publish-o-matic | dc8f16cb83a2360989afa44d887e63b5cde6af29 | [
"MIT"
] | 2 | 2020-12-25T20:38:31.000Z | 2021-04-11T07:35:01.000Z | """ Archived flu data
http://webarchive.nationalarchives.gov.uk/20130107105354/http://www.dh.gov.uk/en/Publicationsandstatistics/Statistics/Performancedataandstatistics/DailySituationReports/index.htm
"""
import collections
import calendar
import datetime
import re
import urllib
from lxml.html import fromstring, tostring
import requests
import slugify
from publish.lib.helpers import to_markdown, anchor_to_resource, get_dom, hd
ROOT = "http://webarchive.nationalarchives.gov.uk/20130107105354/http://www.dh.gov.uk/en/Publicationsandstatistics/Statistics/Performancedataandstatistics/DailySituationReports/index.htm"
DESCRIPTION = None | 37.326923 | 187 | 0.725399 |
59b1e67d3ab1f07d0144d6c862fa57ed097c01dd | 213 | py | Python | expenda_api/monthly_budgets/serializers.py | ihsaro/Expenda | 5eb9115da633b025bd7d2f294deaecdc20674281 | [
"Apache-2.0"
] | null | null | null | expenda_api/monthly_budgets/serializers.py | ihsaro/Expenda | 5eb9115da633b025bd7d2f294deaecdc20674281 | [
"Apache-2.0"
] | null | null | null | expenda_api/monthly_budgets/serializers.py | ihsaro/Expenda | 5eb9115da633b025bd7d2f294deaecdc20674281 | [
"Apache-2.0"
] | null | null | null | from rest_framework.serializers import ModelSerializer
from .models import MonthlyBudget
| 21.3 | 54 | 0.760563 |
59ba9203063b76fa754fc6f24d65541dacb224e0 | 2,786 | py | Python | features/steps/new-providers.py | lilydartdev/ppe-inventory | aaec9839fe324a3f96255756c15de45853bbb940 | [
"MIT"
] | 2 | 2020-10-06T11:33:02.000Z | 2021-10-10T13:10:12.000Z | features/steps/new-providers.py | foundry4/ppe-inventory | 1ee782aeec5bd3cd0140480f9bf58396eb11403b | [
"MIT"
] | 1 | 2020-04-23T22:19:17.000Z | 2020-04-23T22:19:17.000Z | features/steps/new-providers.py | foundry4/ppe-inventory | 1ee782aeec5bd3cd0140480f9bf58396eb11403b | [
"MIT"
] | 3 | 2020-05-26T11:41:40.000Z | 2020-06-29T08:53:34.000Z | from behave import *
from google.cloud import datastore
import os
import uuid
import pandas as pd
| 34.395062 | 117 | 0.693108 |
59bafbd060c805be29e0312f879c03efc18325bc | 2,137 | py | Python | params.py | adarshchbs/disentanglement | 77e74409cd0220dbfd9e2809688500dcb2ecf5a5 | [
"MIT"
] | null | null | null | params.py | adarshchbs/disentanglement | 77e74409cd0220dbfd9e2809688500dcb2ecf5a5 | [
"MIT"
] | null | null | null | params.py | adarshchbs/disentanglement | 77e74409cd0220dbfd9e2809688500dcb2ecf5a5 | [
"MIT"
] | null | null | null | import os
gpu_flag = False
gpu_name = 'cpu'
x_dim = 2048
num_class = 87
num_query = 5
batch_size = 84
eval_batch_size = 128
glove_dim = 200
pretrain_lr = 1e-4
num_epochs_pretrain = 20
eval_step_pre = 1
fusion_iter_len = 100000
# num_epochs_pretrain = 30
num_epochs_style = 30
num_epochs_fusion = 50
log_step_pre = 60
folder_path = os.path.dirname(os.path.realpath(__file__)) + '/'
path_class_list = folder_path + 'extra/common_class_list.txt'
dir_saved_model = folder_path + 'saved_model_qd/'
dir_saved_feature = folder_path + 'saved_features_qd/'
dir_dataset = folder_path + 'dataset/'
dir_extra = folder_path + 'extra/'
os.makedirs( dir_saved_model, exist_ok = True)
os.makedirs( dir_saved_feature, exist_ok = True )
os.makedirs( dir_extra, exist_ok = True )
path_model_image = dir_saved_model + 'resnet_50_image.pt'
path_model_sketchy = dir_saved_model + 'resnet_50_sketchy.pt'
path_z_encoder_sketchy = dir_saved_model + 'z_encoder_sketch.pt'
path_s_encoder_sketchy = dir_saved_model + 's_encoder_sketch.pt'
path_adv_model_sketchy = dir_saved_model + 'adv_sketch.pt'
path_recon_model_sketchy = dir_saved_model + 'reconstruck_sketch.pt'
path_z_encoder_image = dir_saved_model + 'z_encoder_image.pt'
path_s_encoder_image = dir_saved_model + 's_encoder_image.pt'
path_adv_model_image = dir_saved_model + 'adv_image.pt'
path_recon_model_image = dir_saved_model + 'reconstruck_image.pt'
path_fusion_model = dir_saved_model + 'fusion_model.pt'
path_image_dataset = dir_dataset + 'images/'
path_sketchy_dataset = dir_dataset + 'sketchy/'
path_quickdraw_dataset = dir_dataset + 'quick_draw/'
path_image_features = dir_saved_feature + 'image_features.p'
path_sketchy_features = dir_saved_feature + 'sketchy_features.p'
path_quickdraw_features = dir_saved_feature + 'quick_draw_features.p'
path_image_file_list = dir_extra + 'images_file_list.p'
path_sketchy_file_list = dir_extra + 'sketchy_file_list.p'
path_quickdraw_file_list = dir_extra + 'quick_draw_file_list.p'
path_model = folder_path + 'resnet_50_da.pt'
path_sketch_z_encoder = folder_path + 'sketch_encoder.pt'
path_glove_vector = folder_path + 'glove_vector'
| 29.273973 | 69 | 0.801591 |
59bb523ee1c7f0f47bac3fc8d75ede697eb27fb4 | 882 | py | Python | remodet_repository_wdh_part/Projects/Rtpose/solverParam.py | UrwLee/Remo_experience | a59d5b9d6d009524672e415c77d056bc9dd88c72 | [
"MIT"
] | null | null | null | remodet_repository_wdh_part/Projects/Rtpose/solverParam.py | UrwLee/Remo_experience | a59d5b9d6d009524672e415c77d056bc9dd88c72 | [
"MIT"
] | null | null | null | remodet_repository_wdh_part/Projects/Rtpose/solverParam.py | UrwLee/Remo_experience | a59d5b9d6d009524672e415c77d056bc9dd88c72 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import print_function
import caffe
from caffe import params as P
from google.protobuf import text_format
#import inputParam
import os
import sys
import math
sys.path.append('../')
from username import USERNAME
sys.dont_write_bytecode = True
# #################################################################################
caffe_root = "/home/{}/work/repository".format(USERNAME)
# Projects name
Project = "RtPose"
ProjectName = "Rtpose_COCO"
Results_dir = "/home/{}/Models/Results".format(USERNAME)
# Pretrained_Model = "/home/{}/Models/PoseModels/pose_iter_440000.caffemodel".format(USERNAME)
# Pretrained_Model = "/home/{}/Models/PoseModels/VGG19_3S_0_iter_20000.caffemodel".format(USERNAME)
Pretrained_Model = "/home/{}/Models/PoseModels/DarkNet_3S_0_iter_450000_merge.caffemodel".format(USERNAME)
gpus = "0"
solver_mode = P.Solver.GPU
| 36.75 | 106 | 0.709751 |