hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9bb08d27951cdcbd92a25a4408ad1a1b8fb55f34 | 1,345 | py | Python | test/test_CommandHead.py | jcandan/WonderPy | ee82322b082e94015258b34b27f23501f8130fa2 | [
"MIT"
] | 46 | 2018-07-31T20:30:41.000Z | 2022-03-23T17:14:51.000Z | test/test_CommandHead.py | jcandan/WonderPy | ee82322b082e94015258b34b27f23501f8130fa2 | [
"MIT"
] | 24 | 2018-08-01T09:59:29.000Z | 2022-02-26T20:57:51.000Z | test/test_CommandHead.py | jcandan/WonderPy | ee82322b082e94015258b34b27f23501f8130fa2 | [
"MIT"
] | 24 | 2018-08-01T19:14:31.000Z | 2021-02-18T13:26:40.000Z | import unittest
from mock import Mock
from test.robotTestUtil import RobotTestUtil
if __name__ == '__main__':
unittest.main()
| 40.757576 | 82 | 0.646097 |
9bb204788fee823d3cdd79e26af5c6bd4b825e8a | 3,866 | py | Python | feature_options.py | soarsmu/HERMES | 9b38eedd1f7fcc3321048cc25d15c38268e6fd0b | [
"MIT"
] | 2 | 2022-01-15T11:31:40.000Z | 2022-03-09T11:27:28.000Z | feature_options.py | soarsmu/HERMES | 9b38eedd1f7fcc3321048cc25d15c38268e6fd0b | [
"MIT"
] | null | null | null | feature_options.py | soarsmu/HERMES | 9b38eedd1f7fcc3321048cc25d15c38268e6fd0b | [
"MIT"
] | null | null | null | import click
| 51.546667 | 99 | 0.693999 |
9bb4070df0345465e234b3e6738bbb40c587c512 | 2,038 | py | Python | py_randomprime/__init__.py | UltiNaruto/py-randomprime | 597d3636c2e40e11ed92d4808200ded879ccb244 | [
"MIT"
] | null | null | null | py_randomprime/__init__.py | UltiNaruto/py-randomprime | 597d3636c2e40e11ed92d4808200ded879ccb244 | [
"MIT"
] | 2 | 2021-05-24T18:05:11.000Z | 2021-05-31T08:07:29.000Z | py_randomprime/__init__.py | henriquegemignani/py-randomprime | aac48b44761cbb8d857a4d72e06dfac17efc1fae | [
"MIT"
] | 2 | 2021-08-18T01:17:19.000Z | 2021-11-26T15:08:34.000Z | import copy
import os
import json
from pathlib import Path
from typing import Callable, Optional
from . import rust, version
def patch_iso_raw(config_str: str, notifier: BaseProgressNotifier):
if notifier is None:
raise ValueError("notifier is None")
return rust.patch_iso(config_str, notifier)
def patch_iso(input_iso: Path, output_iso: Path, config: dict, notifier: BaseProgressNotifier):
new_config = copy.copy(config)
new_config["inputIso"] = os.fspath(input_iso)
new_config["outputIso"] = os.fspath(output_iso)
return patch_iso_raw(json.dumps(new_config), notifier)
def symbols_for_file(input_file: Path) -> Optional[dict]:
v = rust.get_iso_mp1_version(os.fspath(input_file))
if v is not None:
return rust.get_mp1_symbols(v)
__version__ = version.version
VERSION = version.version | 29.536232 | 111 | 0.723749 |
9bb514fb57dd5b2a6965770909c4eb7274835dca | 3,453 | py | Python | secistsploit/modules/auxiliary/whatweb.py | reneaicisneros/SecistSploit | b4e1bb0a213bee39c3bb79ab36e03e19122b80c0 | [
"MIT"
] | 15 | 2018-12-06T16:03:32.000Z | 2021-06-23T01:17:00.000Z | secistsploit/modules/auxiliary/whatweb.py | reneaicisneros/SecistSploit | b4e1bb0a213bee39c3bb79ab36e03e19122b80c0 | [
"MIT"
] | null | null | null | secistsploit/modules/auxiliary/whatweb.py | reneaicisneros/SecistSploit | b4e1bb0a213bee39c3bb79ab36e03e19122b80c0 | [
"MIT"
] | 6 | 2019-03-01T04:10:00.000Z | 2020-02-26T08:43:54.000Z | # -*- coding: UTF-8 -*-
import os
from secistsploit.core.exploit import *
from secistsploit.core.http.http_client import HTTPClient
| 34.188119 | 141 | 0.435274 |
9bb942cefeb3547baf593097bb2c4998d052f1b8 | 3,285 | py | Python | pygnss/__init__.py | nmerlene/pygnss | 9dc59e57cf5a4bdf0ca56c2b6a23d622ffda4c5a | [
"MIT"
] | null | null | null | pygnss/__init__.py | nmerlene/pygnss | 9dc59e57cf5a4bdf0ca56c2b6a23d622ffda4c5a | [
"MIT"
] | null | null | null | pygnss/__init__.py | nmerlene/pygnss | 9dc59e57cf5a4bdf0ca56c2b6a23d622ffda4c5a | [
"MIT"
] | null | null | null | from pathlib import Path
import logging
import xarray
from time import time
from typing import Union
#
from .io import opener
from .rinex2 import rinexnav2, _scan2
from .rinex3 import rinexnav3, _scan3
# for NetCDF compression. too high slows down with little space savings.
COMPLVL = 1
def readrinex(rinexfn: Path, outfn: Path=None, use: Union[str, list, tuple]=None, verbose: bool=True) -> xarray.Dataset:
"""
Reads OBS, NAV in RINEX 2,3. Plain ASCII text or GZIP .gz.
"""
nav = None
obs = None
rinexfn = Path(rinexfn).expanduser()
# %% detect type of Rinex file
if rinexfn.suffix == '.gz':
fnl = rinexfn.stem.lower()
else:
fnl = rinexfn.name.lower()
if fnl.endswith('n') or fnl.endswith('n.rnx'):
nav = rinexnav(rinexfn, outfn)
elif fnl.endswith('o') or fnl.endswith('o.rnx'):
obs = rinexobs(rinexfn, outfn, use=use, verbose=verbose)
elif rinexfn.suffix.endswith('.nc'):
nav = rinexnav(rinexfn)
obs = rinexobs(rinexfn)
else:
raise ValueError(f"I dont know what type of file you're trying to read: {rinexfn}")
return obs, nav
def getRinexVersion(fn: Path) -> float:
"""verify RINEX version"""
fn = Path(fn).expanduser()
with opener(fn) as f:
ver = float(f.readline()[:9]) # yes :9
return ver
# %% Navigation file
def rinexnav(fn: Path, ofn: Path=None, group: str='NAV') -> xarray.Dataset:
""" Read RINEX 2,3 NAV files in ASCII or GZIP"""
fn = Path(fn).expanduser()
if fn.suffix == '.nc':
try:
return xarray.open_dataset(fn, group=group)
except OSError:
logging.error(f'Group {group} not found in {fn}')
return
ver = getRinexVersion(fn)
if int(ver) == 2:
nav = rinexnav2(fn)
elif int(ver) == 3:
nav = rinexnav3(fn)
else:
raise ValueError(f'unknown RINEX verion {ver} {fn}')
if ofn:
ofn = Path(ofn).expanduser()
print('saving NAV data to', ofn)
wmode = 'a' if ofn.is_file() else 'w'
nav.to_netcdf(ofn, group=group, mode=wmode)
return nav
# %% Observation File
def rinexobs(fn: Path, ofn: Path=None, use: Union[str, list, tuple]=None,
group: str='OBS', verbose: bool=False) -> xarray.Dataset:
"""
Read RINEX 2,3 OBS files in ASCII or GZIP
"""
fn = Path(fn).expanduser()
if fn.suffix == '.nc':
try:
logging.debug(f'loading {fn} with xarray')
return xarray.open_dataset(fn, group=group)
except OSError:
logging.error(f'Group {group} not found in {fn}')
return
tic = time()
ver = getRinexVersion(fn)
if int(ver) == 2:
obs = _scan2(fn, use, verbose)
elif int(ver) == 3:
obs = _scan3(fn, use, verbose)
else:
raise ValueError(f'unknown RINEX verion {ver} {fn}')
print(f"finished in {time()-tic:.2f} seconds")
if ofn:
ofn = Path(ofn).expanduser()
print('saving OBS data to', ofn)
wmode = 'a' if ofn.is_file() else 'w'
enc = {k: {'zlib': True, 'complevel': COMPLVL, 'fletcher32': True}
for k in obs.data_vars}
obs.to_netcdf(ofn, group=group, mode=wmode, encoding=enc)
return obs
| 28.318966 | 120 | 0.595129 |
9bb96ea949af7533581d8e4cca76f381e779a9b0 | 5,201 | py | Python | classroom/pref_graph.py | norabelrose/whisper | 79642bab696f3e166b6af61a447602e8e5d58270 | [
"MIT"
] | null | null | null | classroom/pref_graph.py | norabelrose/whisper | 79642bab696f3e166b6af61a447602e8e5d58270 | [
"MIT"
] | null | null | null | classroom/pref_graph.py | norabelrose/whisper | 79642bab696f3e166b6af61a447602e8e5d58270 | [
"MIT"
] | null | null | null | from typing import TYPE_CHECKING
import networkx as nx
from .fas import eades_fas
if TYPE_CHECKING: # Prevent circular import
from .pref_dag import PrefDAG
def add_indiff(self, a: str, b: str, **attr):
"""Try to dd the indifference relation `a ~ b`, and throw an error if the expected
coherence properties of the graph would be violated."""
if attr.setdefault('weight', 0.0) != 0.0:
raise CoherenceViolation("Indifferences cannot have nonzero weight")
self.add_edge(a, b, **attr)
def add_edge(self, a: str, b: str, **attr):
"""Add an edge to the graph, and check for coherence violations. Usually you
should use the `add_pref` or `add_indiff` wrapper methods instead of this method."""
if attr.get('weight', 1) < 0:
raise CoherenceViolation("Preferences must have non-negative weight")
super().add_edge(a, b, **attr)
add_pref = add_edge
def draw(self):
"""Displays a visualization of the graph using `matplotlib`. Strict preferences
are shown as solid arrows, and indifferences are dashed lines."""
strict_subgraph = self.strict_prefs
pos = nx.drawing.spring_layout(strict_subgraph)
nx.draw_networkx_nodes(strict_subgraph, pos)
nx.draw_networkx_edges(strict_subgraph, pos)
nx.draw_networkx_edges(self.indifferences, pos, arrowstyle='-', style='dashed')
nx.draw_networkx_labels(strict_subgraph, pos)
def acyclic_subgraph(self) -> 'PrefDAG':
"""Return an acyclic subgraph of this graph as a `PrefDAG`. The algorithm will try
to remove as few preferences as possible, but it is not guaranteed to be optimal.
If the graph is already acyclic, the returned `PrefDAG` will be isomorphic to this graph."""
from .pref_dag import PrefDAG
fas = set(eades_fas(self.strict_prefs))
return PrefDAG((
(u, v, d) for u, v, d in self.edges(data=True) # type: ignore
if (u, v) not in fas
))
def is_quasi_transitive(self) -> bool:
"""Return whether the strict preferences are acyclic."""
return nx.is_directed_acyclic_graph(self.strict_prefs)
def pref_prob(self, a: str, b: str, eps: float = 5e-324) -> float:
"""Return the probability that `a` is preferred to `b`."""
a_weight = self.pref_weight(a, b)
denom = a_weight + self.pref_weight(b, a)
# If there's no strict preference between a and b, then the
# probability that A is preferred to B is 1/2.
return (a_weight + eps) / (denom + 2 * eps)
def pref_weight(self, a: str, b: str, default: float = 0.0) -> float:
"""
Return the weight of the preference `a > b`, or 0.0 if there is no such
preference. Preferences with no explicit weight are assumed to have weight 1.
"""
attrs = self.edges.get((a, b), None)
return attrs.get('weight', 1.0) if attrs is not None else default
def unlink(self, a: str, b: str):
"""Remove the preference relation between `a` and `b`."""
try:
self.remove_edge(a, b)
except nx.NetworkXError:
# Try removing the edge in the other direction.
try:
self.remove_edge(b, a)
except nx.NetworkXError:
raise KeyError(f"No preference relation between {a} and {b}")
class CoherenceViolation(Exception):
"""Raised when an operation would violate the coherence of the graph."""
pass
| 42.284553 | 104 | 0.635455 |
9bbc0decb0390376acbaa65e5a7c58faddf9f153 | 516 | py | Python | scaffolder/templates/django/views.py | javidgon/wizard | a75a4c10f84c756c2466c9afaaadf3b2c0cf3a43 | [
"MIT"
] | null | null | null | scaffolder/templates/django/views.py | javidgon/wizard | a75a4c10f84c756c2466c9afaaadf3b2c0cf3a43 | [
"MIT"
] | null | null | null | scaffolder/templates/django/views.py | javidgon/wizard | a75a4c10f84c756c2466c9afaaadf3b2c0cf3a43 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django.views import generic
from .models import {% for model in app.models %}{{ model.name }}{% if not loop.last %}, {% endif %}{% endfor %}
{% for model in app.models %}class {{ model.name }}IndexView(generic.ListView):
model = {{ model.name }}
template_name = '{{ model.name | lower }}s/index.html'
class {{ model.name }}DetailView(generic.DetailView):
model = {{ model.name }}
template_name = '{{ model.name | lower }}s/detail.html'
{% endfor %}
| 30.352941 | 112 | 0.656977 |
9bbcdfbd01a5563f9c4786b31c8c24dcfa3b565b | 683 | py | Python | hisitter/reviews/permissions.py | babysitter-finder/backend | 5c37c6876ca13b5794ac44e0342b810426acbc76 | [
"MIT"
] | 1 | 2021-02-25T01:02:40.000Z | 2021-02-25T01:02:40.000Z | hisitter/reviews/permissions.py | babysitter-finder/backend | 5c37c6876ca13b5794ac44e0342b810426acbc76 | [
"MIT"
] | null | null | null | hisitter/reviews/permissions.py | babysitter-finder/backend | 5c37c6876ca13b5794ac44e0342b810426acbc76 | [
"MIT"
] | 1 | 2020-11-23T20:57:47.000Z | 2020-11-23T20:57:47.000Z | """ Reviews permissions."""
# Python
import logging
# Django Rest Framework
from rest_framework.permissions import BasePermission
| 28.458333 | 75 | 0.628111 |
9bbd9c4b8b498fde19563e3848c89d37d52b9838 | 1,678 | py | Python | pk.py | CnybTseng/SOSNet | 9f1e96380388dde75fe0737ec0b3516669054205 | [
"MIT"
] | null | null | null | pk.py | CnybTseng/SOSNet | 9f1e96380388dde75fe0737ec0b3516669054205 | [
"MIT"
] | null | null | null | pk.py | CnybTseng/SOSNet | 9f1e96380388dde75fe0737ec0b3516669054205 | [
"MIT"
] | null | null | null | import sys
import torch
import timeit
sys.path.append('../JDE')
from mot.models.backbones import ShuffleNetV2
from sosnet import SOSNet
if __name__ == '__main__':
print('SOSNet PK ShuffleNetV2')
model1 = ShuffleNetV2(
stage_repeat={'stage2': 4, 'stage3': 8, 'stage4': 4},
stage_out_channels={'conv1': 24, 'stage2': 48, 'stage3': 96,
'stage4': 192, 'conv5': 1024}).cuda().eval()
arch={
'conv1': {'out_channels': 64},
'stage2': {'out_channels': 256, 'repeate': 2, 'out': True},
'stage3': {'out_channels': 384, 'repeate': 2, 'out': True},
'stage4': {'out_channels': 512, 'repeate': 2, 'out': True},
'conv5': {'out_channels': 1024}}
model2 = SOSNet(arch).cuda().eval()
x = torch.rand(1, 3, 224, 224).cuda()
loops = 1000
with torch.no_grad():
start = timeit.default_timer()
for _ in range(loops):
y = model1(x)
torch.cuda.synchronize()
end = timeit.default_timer()
latency = (end - start) / loops
print('ShuffleNetV2 latency: {} seconds.'.format(latency))
for yi in y:
print(yi.shape)
with torch.no_grad():
start = timeit.default_timer()
for _ in range(loops):
y = model2(x)
torch.cuda.synchronize()
end = timeit.default_timer()
latency = (end - start) / loops
print('SOSNet latency: {} seconds.'.format(latency))
for yi in y:
print(yi.shape)
with torch.autograd.profiler.profile(use_cuda=True, record_shapes=True) as prof:
model2(x)
print(prof.key_averages().table()) | 37.288889 | 85 | 0.567342 |
9bbda2f39a11084b661e8fe58491f418c2a36b6f | 2,255 | py | Python | test/generate_netmhcpan_functions.py | til-unc/mhcgnomes | 0bfbe193daeb7cd38d958222f6071dd657e9fb6e | [
"Apache-2.0"
] | 6 | 2020-10-27T15:31:32.000Z | 2020-11-29T03:26:06.000Z | test/generate_netmhcpan_functions.py | til-unc/mhcgnomes | 0bfbe193daeb7cd38d958222f6071dd657e9fb6e | [
"Apache-2.0"
] | 4 | 2020-10-27T14:57:16.000Z | 2020-11-04T21:56:39.000Z | test/generate_netmhcpan_functions.py | pirl-unc/mhcgnomes | 0bfbe193daeb7cd38d958222f6071dd657e9fb6e | [
"Apache-2.0"
] | null | null | null | import pandas as pd
NETMHCPAN_3_0_DEST = "test_netmhcpan_3_0_alleles.py"
NETMHCPAN_3_0_SOURCE = "netmhcpan_3_0_alleles.txt"
NETMHCPAN_4_0_DEST = "test_netmhcpan_4_0_alleles.py"
NETMHCPAN_4_0_SOURCE = "netmhcpan_4_0_alleles.txt"
special_chars = " *:-,/."
netmhcpan_3_0_alleles = generate(
src=NETMHCPAN_3_0_SOURCE,
dst=NETMHCPAN_3_0_DEST)
generate(
src=NETMHCPAN_4_0_SOURCE,
dst=NETMHCPAN_4_0_DEST,
exclude=netmhcpan_3_0_alleles)
| 35.234375 | 107 | 0.501552 |
9bbde6aa054a0343fb01e156fb53162fe6c254c5 | 96 | py | Python | python/tests/test_linked_list.py | Leenhazaimeh/data-structures-and-algorithms | d55d55bf8c98e768cb929326b5ec8c18fb5c8384 | [
"MIT"
] | null | null | null | python/tests/test_linked_list.py | Leenhazaimeh/data-structures-and-algorithms | d55d55bf8c98e768cb929326b5ec8c18fb5c8384 | [
"MIT"
] | 10 | 2021-07-29T18:56:48.000Z | 2021-09-11T19:11:00.000Z | python/tests/test_linked_list.py | Leenhazaimeh/data-structures-and-algorithms | d55d55bf8c98e768cb929326b5ec8c18fb5c8384 | [
"MIT"
] | 3 | 2021-08-16T06:16:37.000Z | 2021-12-05T14:29:51.000Z | # from linked_list.linked_list import LinkedList
# def test_import():
# assert LinkedList
| 16 | 48 | 0.75 |
9bbf5d23053e93f4be3618d38f8307dfe71dd5b9 | 2,156 | py | Python | 美团爬取商家信息/paquxinxi.py | 13060923171/Crawl-Project2 | effab1bf31979635756fc272a7bcc666bb499be2 | [
"MIT"
] | 14 | 2020-10-27T05:52:20.000Z | 2021-11-07T20:24:55.000Z | 美团爬取商家信息/paquxinxi.py | 13060923171/Crawl-Project2 | effab1bf31979635756fc272a7bcc666bb499be2 | [
"MIT"
] | 1 | 2021-09-17T07:40:00.000Z | 2021-09-17T07:40:00.000Z | 美团爬取商家信息/paquxinxi.py | 13060923171/Crawl-Project2 | effab1bf31979635756fc272a7bcc666bb499be2 | [
"MIT"
] | 8 | 2020-11-18T14:23:12.000Z | 2021-11-12T08:55:08.000Z | import requests
import re
import json
headers = {
"Origin": "https://bj.meituan.com",
"Host": "apimobile.meituan.com",
"Referer": "https://bj.meituan.com/s/%E7%81%AB%E9%94%85/",
"Cookie": "uuid=692a53319ce54d0c91f3.1597223761.1.0.0; ci=1; rvct=1; _lxsdk_cuid=173e1f47707c8-0dcd4ff30b4ae3-3323765-e1000-173e1f47707c8; _lxsdk_s=173e1f47708-21d-287-4d9%7C%7C35",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36"
}
if __name__ == '__main__':
#URLoffse32limitqpoi/pcsearch/11id
for i in range(0,33,32):
url = "https://apimobile.meituan.com/group/v4/poi/pcsearch/1?uuid=692a53319ce54d0c91f3.1597223761.1.0.0&userid=-1&limit=32&offset={}&cateId=-1&q=%E7%81%AB%E9%94%85".format(i)
get_parse(url) | 33.169231 | 185 | 0.590909 |
9bc11053555c82b404c0a0cf86d08e3626d9e05f | 4,071 | py | Python | entity_resolution/EntityClass.py | GeoJamesJones/ArcGIS-Senzing-Prototype | ebe7f1c3f516525f4bfbf5b4f1446e8c6612a67b | [
"MIT"
] | null | null | null | entity_resolution/EntityClass.py | GeoJamesJones/ArcGIS-Senzing-Prototype | ebe7f1c3f516525f4bfbf5b4f1446e8c6612a67b | [
"MIT"
] | null | null | null | entity_resolution/EntityClass.py | GeoJamesJones/ArcGIS-Senzing-Prototype | ebe7f1c3f516525f4bfbf5b4f1446e8c6612a67b | [
"MIT"
] | null | null | null | from __future__ import annotations
import json
from typing import List, Dict
from entity_resolution import EntityResolution
| 36.675676 | 121 | 0.561778 |
32ca34b8eacf24dc530fada37a04db8272ab0be6 | 523 | py | Python | langcreator/system.py | xzripper/LanguageCreator | 65421063161166d3e4f97e4b874909259b665fce | [
"MIT"
] | 2 | 2021-12-12T16:48:20.000Z | 2021-12-31T17:48:21.000Z | langcreator/system.py | xzripper/LanguageCreator | 65421063161166d3e4f97e4b874909259b665fce | [
"MIT"
] | null | null | null | langcreator/system.py | xzripper/LanguageCreator | 65421063161166d3e4f97e4b874909259b665fce | [
"MIT"
] | null | null | null | import subprocess
import sys
import os
subprocess = subprocess
sys = sys
os = os
def output(command: str, remlstc: bool) -> str:
"""
Get output from console command.
If remlstc is True, it's return an output without a useless newline.
:param command: The command.
:param remlstc: Remove last character from output.
"""
return subprocess.check_output(command, shell=True, encoding='cp866')[:-1] if remlstc else subprocess.check_output(command, shell=True, encoding='cp866')
| 27.526316 | 158 | 0.692161 |
32cada166139a42c2081b8a48a2bcd39a15cb5ab | 2,612 | py | Python | create_categories.py | Botomatik/JackBot | 58651d8b5a5bcead2a2eb79849019cb4f972b7cd | [
"MIT"
] | null | null | null | create_categories.py | Botomatik/JackBot | 58651d8b5a5bcead2a2eb79849019cb4f972b7cd | [
"MIT"
] | null | null | null | create_categories.py | Botomatik/JackBot | 58651d8b5a5bcead2a2eb79849019cb4f972b7cd | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Program to batch create categories.
The program expects a generator containing a list of page titles to be used as
base.
The following command line parameters are supported:
-always (not implemented yet) Don't ask, just do the edit.
-overwrite (not implemented yet).
-parent The name of the parent category.
-basename The base to be used for the new category names.
Example:
create_categories.py
-lang:commons
-family:commons
-links:User:Multichill/Wallonia
-parent:"Cultural heritage monuments in Wallonia"
-basename:"Cultural heritage monuments in"
"""
__version__ = '$Id$'
#
# (C) Multichill, 2011
# (C) xqt, 2011
#
# Distributed under the terms of the MIT license.
#
#
import os, sys, re, codecs
import urllib, httplib, urllib2
import catlib
import time
import socket
import StringIO
import wikipedia as pywikibot
import config
import pagegenerators
def main(args):
'''
Main loop. Get a generator and options.
'''
generator = None
parent = u''
basename = u''
always = False
genFactory = pagegenerators.GeneratorFactory()
for arg in pywikibot.handleArgs():
if arg == '-always':
always = True
elif arg.startswith('-parent:'):
parent = arg [len('-parent:'):].strip()
elif arg.startswith('-basename'):
basename = arg [len('-basename:'):].strip()
else:
genFactory.handleArg(arg)
generator = genFactory.getCombinedGenerator()
if generator:
for page in generator:
createCategory(page, parent, basename)
else:
pywikibot.output(u'No pages to work on')
pywikibot.output(u'All done')
if __name__ == "__main__":
try:
main(sys.argv[1:])
finally:
pywikibot.stopme()
| 25.359223 | 78 | 0.630551 |
32cae26d8eb99a201dc12930e81a1edb58d4cace | 10,287 | py | Python | avod/core/losses.py | Zengyi-Qin/TLNet | 11fa48160158b550ad2dc810ed564eebe17e8f5e | [
"Apache-2.0"
] | 114 | 2019-03-13T01:42:22.000Z | 2022-03-31T07:56:04.000Z | avod/core/losses.py | Zengyi-Qin/TLNet | 11fa48160158b550ad2dc810ed564eebe17e8f5e | [
"Apache-2.0"
] | 12 | 2019-03-26T08:18:13.000Z | 2021-05-19T14:36:27.000Z | avod/core/losses.py | Zengyi-Qin/TLNet | 11fa48160158b550ad2dc810ed564eebe17e8f5e | [
"Apache-2.0"
] | 22 | 2019-03-22T10:44:49.000Z | 2021-04-01T00:11:07.000Z | """Classification and regression loss functions for object detection.
Localization losses:
* WeightedL2LocalizationLoss
* WeightedSmoothL1LocalizationLoss
Classification losses:
* WeightedSoftmaxClassificationLoss
* WeightedSigmoidClassificationLoss
"""
from abc import ABCMeta
from abc import abstractmethod
import tensorflow as tf
from avod.core import ops
| 44.150215 | 129 | 0.636726 |
32cd6811a8df581555a9e17bfebdb7625e6646ac | 19,282 | py | Python | routing/views.py | iqqmuT/tsari | 343ef5cf08ee24bdb710e94c0b6fb334264e5677 | [
"MIT"
] | null | null | null | routing/views.py | iqqmuT/tsari | 343ef5cf08ee24bdb710e94c0b6fb334264e5677 | [
"MIT"
] | 2 | 2020-02-11T22:09:10.000Z | 2020-06-05T18:02:28.000Z | routing/views.py | iqqmuT/tsari | 343ef5cf08ee24bdb710e94c0b6fb334264e5677 | [
"MIT"
] | null | null | null | import json
from datetime import datetime, timedelta
from dateutil import parser as dateparser
from django.contrib.auth.decorators import user_passes_test
from django.db.models import Q
from django.http import HttpResponseNotFound, JsonResponse
from django.shortcuts import render
from django.utils import timezone
from avdb.models import \
Convention, \
Equipment, \
EquipmentType, \
Location, \
LocationType, \
TransportOrder, \
TransportOrderLine
import logging
logger = logging.getLogger(__name__)
# TO data structure
# {
# 'disabled': False,
# 'equipment': 1,
# 'week': '2018-05-28T09:00:00+00:00',
# 'from': {
# 'location': 9,
# 'load_out': '2018-06-19T08:00:00+00:00'
# },
# 'to': {
# 'location': 4,
# 'convention': 7,
# 'load_in': '2018-06-19T09:00:00+00:00'
# },
# 'name': 'First TO',
# 'notes': 'Notes',
# 'unitNotes': 'Uniittinotes'
# }
# Save JSON request
def _save_to_data(to_data):
"""Saves TransportOrder data."""
to = _get_or_create_to(to_data)
if to is None:
# could not create TO
return None
#week = dateparser.parse(to_data['week'])
#monday = _get_previous_monday(week)
#sunday = _get_next_sunday(week)
# create new TransportOrderLine
tol = TransportOrderLine(
equipment=Equipment.objects.get(pk=to_data['equipment']),
transport_order=to,
)
tol.save()
return tol
def _disable_all_tos(year):
"""Disables all TransportOrders from given year."""
start = datetime(year, 1, 1)
end = datetime(year, 12, 31, 23, 59, 59)
tos = TransportOrder.objects.filter(
Q(from_loc_load_out__range=(start, end)) | Q(to_loc_load_in__range=(start, end)) | Q(from_convention__load_out__range=(start, end)) | Q(to_convention__load_in__range=(start, end))
)
for to in tos:
to.disabled = True
to.save()
def _get_or_create_to(to_data):
"""Gets or creates TransportOrder with given data."""
from_location = None
from_convention = None
from_load_out = None
if 'from' in to_data.keys():
if 'convention' in to_data['from'].keys() and to_data['from']['convention'] is not None:
id = to_data['from']['convention']
from_convention = Convention.objects.get(pk=id)
if 'location' in to_data['from'].keys() and to_data['from']['location'] is not None:
id = to_data['from']['location']
from_location = Location.objects.get(pk=id)
if from_convention is None and 'load_out' in to_data['from'].keys() and _is_valid_datetime(to_data['from']['load_out']):
from_load_out = dateparser.parse(to_data['from']['load_out'])
to_location = None
to_convention = None
to_load_in = None
if 'from' in to_data.keys():
if 'convention' in to_data['to'].keys() and to_data['to']['convention'] is not None:
id = to_data['to']['convention']
to_convention = Convention.objects.get(pk=id)
if 'location' in to_data['to'].keys() and to_data['to']['location'] is not None:
id = to_data['to']['location']
to_location = Location.objects.get(pk=id)
if to_convention is None and 'load_in' in to_data['to'].keys() and _is_valid_datetime(to_data['to']['load_in']):
to_load_in = dateparser.parse(to_data['to']['load_in'])
if from_location is None or to_location is None:
# can't create TransportOrder with empty Locations
return None
to, created = TransportOrder.objects.get_or_create(
from_convention=from_convention,
to_convention=to_convention,
from_loc=from_location,
to_loc=to_location,
from_loc_load_out=from_load_out,
to_loc_load_in=to_load_in,
)
# update other fields
if 'name' in to_data.keys():
to.name = to_data['name']
if 'notes' in to_data.keys():
to.notes = to_data['notes']
if 'unitNotes' in to_data.keys():
to.unit_notes = to_data['unitNotes']
to.disabled = False
to.save()
return to
def _get_previous_monday(d):
"""Returns previous monday from given datetime."""
monday = d - timedelta(days=d.weekday())
# set time to 00:00:00
return datetime(monday.year, monday.month, monday.day, 0, 0, 0)
convention_cache = {}
location_cache = {}
def _get_other_locations():
"""Returns all locations except convention venues."""
if 'all' not in location_cache.keys():
conv_venue = LocationType.objects.get(name='Convention venue')
location_cache['all'] = Location.objects.exclude(loc_type=conv_venue)
return location_cache['all']
def _find_tols(equipment_id, start, end):
"""Returns existing TransportOrderLines matching with given arguments.
Matches only if load_in is matching between start and end."""
#logger.error('Trying to find TOL')
#logger.error(equipment_id)
#logger.error(start_time)
#logger.error(end_time)
tols = TransportOrderLine.objects.filter(
equipment__id=equipment_id).filter(
Q(transport_order__to_loc_load_in__range=(start, end)) | Q(transport_order__to_convention__load_in__range=(start, end))
#Q(transport_order__from_loc_load_out__range=(start, end)) | Q(transport_order__to_loc_load_in__range=(start, end)) | Q(transport_order__from_convention__load_out__range=(start, end)) | Q(transport_order__to_convention__load_in__range=(start, end))
)
return tols
def _remove_tols(equipment_id, year):
"""Removes all TransportOrderLines for given equipment id and from that year."""
start = datetime(year, 1, 1)
end = datetime(year, 12, 31, 23, 59, 59)
TransportOrderLine.objects.filter(
equipment__id=equipment_id,
transport_order__from_loc_load_out__range=(start, end),
).delete()
TransportOrderLine.objects.filter(
equipment__id=equipment_id,
transport_order__to_loc_load_in__range=(start, end),
).delete()
TransportOrderLine.objects.filter(
equipment__id=equipment_id,
transport_order__from_convention__load_out__range=(start, end),
).delete()
TransportOrderLine.objects.filter(
equipment__id=equipment_id,
transport_order__to_convention__load_in__range=(start, end),
).delete()
| 36.041121 | 256 | 0.587283 |
32cf5c6af409ad539e05135e062b11460576c4f6 | 5,575 | py | Python | my_ner.py | shouxieai/nlp-bilstm_crf-ner | 907381325eeb0a2c29004e1c617bea7312579ba8 | [
"Apache-2.0"
] | 16 | 2021-12-14T10:51:25.000Z | 2022-03-30T10:10:09.000Z | my_ner.py | shouxieai/nlp-bilstm-ner | 907381325eeb0a2c29004e1c617bea7312579ba8 | [
"Apache-2.0"
] | 1 | 2022-03-23T04:28:50.000Z | 2022-03-23T04:28:50.000Z | my_ner.py | shouxieai/nlp-bilstm-ner | 907381325eeb0a2c29004e1c617bea7312579ba8 | [
"Apache-2.0"
] | 2 | 2021-12-08T02:48:01.000Z | 2021-12-13T13:03:25.000Z | import os
from torch.utils.data import Dataset,DataLoader
import torch
import torch.nn as nn
from sklearn.metrics import f1_score
def build_corpus(split, make_vocab=True, data_dir="data"):
""""""
assert split in ['train', 'dev', 'test']
word_lists = []
tag_lists = []
with open(os.path.join(data_dir, split+".char.bmes"), 'r', encoding='utf-8') as f:
word_list = []
tag_list = []
for line in f:
if line != '\n':
word, tag = line.strip('\n').split()
word_list.append(word)
tag_list.append(tag)
else:
word_lists.append(word_list)
tag_lists.append(tag_list)
word_list = []
tag_list = []
word_lists = sorted(word_lists, key=lambda x: len(x), reverse=True)
tag_lists = sorted(tag_lists, key=lambda x: len(x), reverse=True)
# make_vocabTrueword2idtag2id
if make_vocab:
word2id = build_map(word_lists)
tag2id = build_map(tag_lists)
word2id['<UNK>'] = len(word2id)
word2id['<PAD>'] = len(word2id)
tag2id['<PAD>'] = len(tag2id)
return word_lists, tag_lists, word2id, tag2id
else:
return word_lists, tag_lists
if __name__ == "__main__":
device = "cuda:0" if torch.cuda.is_available() else "cpu"
train_word_lists, train_tag_lists, word_2_index, tag_2_index = build_corpus("train")
dev_word_lists, dev_tag_lists = build_corpus("dev", make_vocab=False)
test_word_lists, test_tag_lists = build_corpus("test", make_vocab=False)
corpus_num = len(word_2_index)
class_num = len(tag_2_index)
train_batch_size = 5
dev_batch_size = len(dev_word_lists)
epoch = 100
lr = 0.001
embedding_num = 128
hidden_num = 129
bi = True
train_dataset = MyDataset(train_word_lists,train_tag_lists,word_2_index, tag_2_index)
train_dataloader = DataLoader(train_dataset,batch_size=train_batch_size,shuffle=False,collate_fn=train_dataset.batch_data_pro)
dev_dataset = MyDataset(dev_word_lists, dev_tag_lists, word_2_index, tag_2_index)
dev_dataloader = DataLoader(dev_dataset, batch_size=dev_batch_size, shuffle=False,collate_fn=dev_dataset.batch_data_pro)
model = MyModel(embedding_num,hidden_num,corpus_num,bi,class_num,word_2_index["<PAD>"])
model = model.to(device)
opt = torch.optim.Adam(model.parameters(),lr = lr)
for e in range(epoch):
model.train()
for data , tag, da_len in train_dataloader:
loss = model.forward(data,da_len,tag)
loss.backward()
opt.step()
opt.zero_grad()
model.eval() # F1,,,
for dev_data , dev_tag, dev_da_len in dev_dataloader:
test_loss = model.forward(dev_data,dev_da_len,dev_tag)
score = f1_score(dev_tag.reshape(-1).cpu().numpy(),model.pre.cpu().numpy(),average="micro")
print(score)
break
| 32.794118 | 130 | 0.63139 |
32cf7fd469a0aec109e44e66849bad3789086158 | 245 | py | Python | test.py | karttur/geoimagine03-support | 3971db215382bd16f207eca3ef1d9d81e4298b41 | [
"BSD-3-Clause"
] | null | null | null | test.py | karttur/geoimagine03-support | 3971db215382bd16f207eca3ef1d9d81e4298b41 | [
"BSD-3-Clause"
] | null | null | null | test.py | karttur/geoimagine03-support | 3971db215382bd16f207eca3ef1d9d81e4298b41 | [
"BSD-3-Clause"
] | null | null | null | '''
Created on 28 Jan 2021
@author: thomasgumbricht
'''
from string import whitespace
def CheckWhitespace(s):
'''
'''
return True in [c in s for c in whitespace]
s = 'dumsnut'
print (CheckWhitespace(s)) | 13.611111 | 51 | 0.591837 |
32cfbeee160a6e50ceb471701c99ace872cbfe2b | 362 | py | Python | leetcode/409.py | windniw/just-for-fun | 54e5c2be145f3848811bfd127f6a89545e921570 | [
"Apache-2.0"
] | 1 | 2019-08-28T23:15:25.000Z | 2019-08-28T23:15:25.000Z | leetcode/409.py | windniw/just-for-fun | 54e5c2be145f3848811bfd127f6a89545e921570 | [
"Apache-2.0"
] | null | null | null | leetcode/409.py | windniw/just-for-fun | 54e5c2be145f3848811bfd127f6a89545e921570 | [
"Apache-2.0"
] | null | null | null |
"""
link: https://leetcode.com/problems/longest-palindrome
problem: s
solution: map
"""
| 18.1 | 54 | 0.558011 |
32cfc631e8d4a50ff93f3a9a349602c8342fb97a | 847 | py | Python | nickenbot/config.py | brlafreniere/nickenbot | f13ec78057ec25823eb16df6ffab3a32eddfd3ca | [
"MIT"
] | 1 | 2016-08-10T12:20:58.000Z | 2016-08-10T12:20:58.000Z | nickenbot/config.py | brlafreniere/nickenbot | f13ec78057ec25823eb16df6ffab3a32eddfd3ca | [
"MIT"
] | null | null | null | nickenbot/config.py | brlafreniere/nickenbot | f13ec78057ec25823eb16df6ffab3a32eddfd3ca | [
"MIT"
] | null | null | null | import yaml
import os
import sys
current_dir = os.path.dirname(os.path.realpath(__file__))
project_dir = os.path.realpath(os.path.join(current_dir, ".."))
| 27.322581 | 95 | 0.615112 |
32d046c8c2ed3ece0b08aa280a40083f8b7d16ab | 2,277 | py | Python | qna/views.py | channprj/KU-PL | 7fc3719b612a819ed1bd443695d7f13f509ee596 | [
"MIT"
] | null | null | null | qna/views.py | channprj/KU-PL | 7fc3719b612a819ed1bd443695d7f13f509ee596 | [
"MIT"
] | null | null | null | qna/views.py | channprj/KU-PL | 7fc3719b612a819ed1bd443695d7f13f509ee596 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.shortcuts import redirect
from django.shortcuts import get_object_or_404
from django.utils import timezone
from .forms import QuestionForm
from .forms import AnswerForm
from .models import Question
from .models import Answer
| 35.030769 | 101 | 0.665349 |
32d33f3c862ddf8043ee8ce09e1a526264e7c51a | 1,648 | py | Python | python/tests/test_oci.py | miku/labe | 2d784f418e24ab6fef9f76791c9fdd02dd505657 | [
"MIT"
] | null | null | null | python/tests/test_oci.py | miku/labe | 2d784f418e24ab6fef9f76791c9fdd02dd505657 | [
"MIT"
] | null | null | null | python/tests/test_oci.py | miku/labe | 2d784f418e24ab6fef9f76791c9fdd02dd505657 | [
"MIT"
] | 1 | 2021-09-16T10:51:00.000Z | 2021-09-16T10:51:00.000Z | """
Unit tests for labe. Most not mocked yet, hence slow.
"""
import collections
import socket
import pytest
import requests
from labe.oci import get_figshare_download_link, get_terminal_url
def no_internet(host="8.8.8.8", port=53, timeout=3):
"""
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return False
except socket.error as ex:
return True
| 30.518519 | 88 | 0.662621 |
32d559b8ce0d7d1c7f26302620ef00f9255a82dc | 26,404 | py | Python | pyNastran/bdf/cards/test/test_dynamic.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 293 | 2015-03-22T20:22:01.000Z | 2022-03-14T20:28:24.000Z | pyNastran/bdf/cards/test/test_dynamic.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 512 | 2015-03-14T18:39:27.000Z | 2022-03-31T16:15:43.000Z | pyNastran/bdf/cards/test/test_dynamic.py | ACea15/pyNastran | 5ffc37d784b52c882ea207f832bceb6b5eb0e6d4 | [
"BSD-3-Clause"
] | 136 | 2015-03-19T03:26:06.000Z | 2022-03-25T22:14:54.000Z | """tests dynamic cards and dynamic load cards"""
import unittest
from io import StringIO
import numpy as np
import pyNastran
from pyNastran.bdf.bdf import BDF, read_bdf, CrossReferenceError
from pyNastran.bdf.cards.test.utils import save_load_deck
#ROOT_PATH = pyNastran.__path__[0]
if __name__ == '__main__': # pragma: no cover
unittest.main()
| 34.069677 | 97 | 0.549879 |
32d6f22794e1af28d1b004461271504fb7680002 | 4,691 | py | Python | src/kv/benchmark/runbench.py | showapicxt/iowow | a29ac5b28f1b6c2817061c2a43b7222176458876 | [
"MIT"
] | 242 | 2015-08-13T06:38:10.000Z | 2022-03-17T13:49:56.000Z | src/kv/benchmark/runbench.py | showapicxt/iowow | a29ac5b28f1b6c2817061c2a43b7222176458876 | [
"MIT"
] | 44 | 2018-04-08T07:12:02.000Z | 2022-03-04T06:15:01.000Z | src/kv/benchmark/runbench.py | showapicxt/iowow | a29ac5b28f1b6c2817061c2a43b7222176458876 | [
"MIT"
] | 18 | 2016-01-14T09:50:34.000Z | 2022-01-26T23:07:40.000Z | import subprocess
import argparse
import os
import random
from collections import OrderedDict
from parse import parse
from bokeh.io import export_png
from bokeh.plotting import figure, output_file, show, save
from bokeh.models import ColumnDataSource, FactorRange
from bokeh.transform import factor_cmap
from bokeh.layouts import gridplot
from bokeh.embed import components
parser = argparse.ArgumentParser(description='IWKV Benchmarks')
parser.add_argument(
'-b', '--basedir', help='Base directory with benchmark executables', default='.', nargs='?')
args = parser.parse_args()
basedir = os.path.abspath(args.basedir)
print('Base directory:', basedir)
benchmarks = [
'iwkv',
'lmdb',
'bdb',
'wiredtiger',
'kyc',
'tc'
#'leveldb'
]
runs = []
runs += [{'b': 'fillrandom2', 'n': n, 'vz': vz, 'rs': 2853624176, 'sizestats': True}
for n in (int(1e6),)
for vz in (1000,)]
runs += [{'b': 'fillrandom2,readrandom,deleterandom', 'n': n, 'vz': vz, 'kz': kz, 'rs': 2105940112}
for n in (int(2e6),)
for vz in (40, 400,)
for kz in (16, 1024,)]
runs += [{'b': 'fillseq,overwrite,deleteseq', 'n': n, 'kz': kz, 'rs': 570078848}
for n in (int(2e6),)
for vz in (400,)
for kz in (16, 1024,)]
runs += [{'b': 'fillrandom2,readrandom,readseq,readreverse', 'n': n, 'vz': vz, 'rs': 1513135152}
for n in (int(10e6),)
for vz in (200,)]
runs += [{'b': 'fillrandom2', 'n': n, 'vz': vz, 'rs': 3434783568}
for n in (int(10e3),)
for vz in ((200 * 1024),)]
results = OrderedDict()
if __name__ == '__main__':
main()
| 31.273333 | 99 | 0.568322 |
32d7c7852b8b937ddf9034af3749422522ced7eb | 2,792 | py | Python | tests/utils/test_parser.py | ccechatelier/bcdi | cbe3b7960414b03f8e98336c3fcd7b367de441ca | [
"CECILL-B"
] | 18 | 2020-04-30T08:48:39.000Z | 2022-03-30T14:42:01.000Z | tests/utils/test_parser.py | ccechatelier/bcdi | cbe3b7960414b03f8e98336c3fcd7b367de441ca | [
"CECILL-B"
] | 78 | 2019-06-30T03:45:58.000Z | 2022-03-23T15:04:44.000Z | tests/utils/test_parser.py | ccechatelier/bcdi | cbe3b7960414b03f8e98336c3fcd7b367de441ca | [
"CECILL-B"
] | 16 | 2019-07-03T17:18:53.000Z | 2022-01-12T15:54:56.000Z | # -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-05/2021 : DESY PHOTON SCIENCE
# authors:
# Jerome Carnis, carnis_jerome@yahoo.fr
from pathlib import Path
import unittest
from bcdi.utils.parser import ConfigParser
here = Path(__file__).parent
CONFIG = str(here.parents[1] / "conf/config_postprocessing.yml")
if __name__ == "__main__":
run_tests(TestConfigParser)
| 34.469136 | 87 | 0.690544 |
32d936fc21c284d747f6a37882f102cf2a32a1e5 | 567 | py | Python | src/directory-starter/README_text.py | hannahweber244/directory-starter | 0cb12b6e9dfe9c3a6eb5029d7d0b6cb5da52b44b | [
"MIT"
] | null | null | null | src/directory-starter/README_text.py | hannahweber244/directory-starter | 0cb12b6e9dfe9c3a6eb5029d7d0b6cb5da52b44b | [
"MIT"
] | null | null | null | src/directory-starter/README_text.py | hannahweber244/directory-starter | 0cb12b6e9dfe9c3a6eb5029d7d0b6cb5da52b44b | [
"MIT"
] | null | null | null | """
# [REPO NAME]
## Table of contents
[Here you can use a table of contents to keep your README structured.]
## Overview
[Here you give a short overview over the motivation behind your project and what problem it solves.]
## How to use it
[Here you can explain how your tool/project is usable.]
### Requirements and dependencies
[If there are any requirements or dependencies to use what you developed, you can put those here.]
## Additional information
[Here you can include an overview over the structure of your code, additional information, tests etc.]
""" | 31.5 | 102 | 0.75485 |
32da7030ea8ed7c10970c252248ba50cc03bff1f | 152 | py | Python | kfdda/models/__init__.py | ll1l11/pymysql-test | de5747366bbf23ecb0b1f01059b3a69c8ac4936d | [
"MIT"
] | null | null | null | kfdda/models/__init__.py | ll1l11/pymysql-test | de5747366bbf23ecb0b1f01059b3a69c8ac4936d | [
"MIT"
] | null | null | null | kfdda/models/__init__.py | ll1l11/pymysql-test | de5747366bbf23ecb0b1f01059b3a69c8ac4936d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from ..core import db
from ..helpers import JSONSerializer
| 19 | 42 | 0.710526 |
32db89f97cc25f33ad056f8860c98d1fafd8baab | 2,652 | py | Python | chapt05/triangle.py | ohlogic/PythonOpenGLSuperBible4Glut | a0d01caaeb811002c191c28210268b5fcbb8b379 | [
"MIT"
] | null | null | null | chapt05/triangle.py | ohlogic/PythonOpenGLSuperBible4Glut | a0d01caaeb811002c191c28210268b5fcbb8b379 | [
"MIT"
] | null | null | null | chapt05/triangle.py | ohlogic/PythonOpenGLSuperBible4Glut | a0d01caaeb811002c191c28210268b5fcbb8b379 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# Demonstrates OpenGL color triangle
# Ben Smith
# benjamin.coder.smith@gmail.com
#
# based heavily on ccube.cpp
# OpenGL SuperBible
# Program by Richard S. Wright Jr.
import math
from OpenGL.GL import *
from OpenGL.GLUT import *
from OpenGL.GLU import *
ESCAPE = b'\033'
xRot = 0.0
yRot = 0.0
# Called to draw scene
# Main program entry point
if __name__ == '__main__':
glutInit()
glutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH)
glutInitWindowSize(640, 480)
glutInitWindowPosition(0, 0)
window = glutCreateWindow("RGB Triangle")
glutDisplayFunc(DrawGLScene)
# Uncomment this line to get full screen.
#glutFullScreen()
#glutIdleFunc(DrawGLScene)
#glutTimerFunc( int(1.0/60.0), update, 0)
glutReshapeFunc(ReSizeGLScene)
glutKeyboardFunc(keyPressed)
#glutSpecialFunc (specialkeyPressed);
# Initialize our window.
InitGL(640, 480)
# Start Event Processing Engine
glutMainLoop()
| 21.737705 | 83 | 0.562217 |
32e013bad1fb65c5a409199a8b804f1d0f72e07c | 1,379 | py | Python | sdpremote/utils/user.py | gudn/sdpremote | 431234420ea1e0c752432eac6000c11a75851375 | [
"MIT"
] | null | null | null | sdpremote/utils/user.py | gudn/sdpremote | 431234420ea1e0c752432eac6000c11a75851375 | [
"MIT"
] | null | null | null | sdpremote/utils/user.py | gudn/sdpremote | 431234420ea1e0c752432eac6000c11a75851375 | [
"MIT"
] | null | null | null | import binascii
from base64 import b64decode
from typing import Optional
from fastapi import Depends, Header, status
from fastapi.exceptions import HTTPException
| 26.018868 | 74 | 0.649021 |
32e2062c20d3f7d54552e963b99e3b7f219ffa2e | 19,175 | py | Python | ScreenTrainer.py | ZihaoChen0319/CMB-Segmentation | 99c5788baacc280ca5dbe02f3e18403e399fb238 | [
"Apache-2.0"
] | null | null | null | ScreenTrainer.py | ZihaoChen0319/CMB-Segmentation | 99c5788baacc280ca5dbe02f3e18403e399fb238 | [
"Apache-2.0"
] | null | null | null | ScreenTrainer.py | ZihaoChen0319/CMB-Segmentation | 99c5788baacc280ca5dbe02f3e18403e399fb238 | [
"Apache-2.0"
] | null | null | null | import torch.nn as nn
import os
import torch.optim as optim
from tqdm import tqdm
import numpy as np
import torch
import torch.nn.functional as nnf
import SimpleITK as sitk
import json
from scipy import ndimage
import medpy.io as mio
from Utils import find_binary_object
from MyDataloader import get_train_cases, get_cmbdataloader
from MyNetwork import ScreenNet
from MyLoss import FocalLoss
from PairwiseMeasures_modified import PairwiseMeasures
| 50.460526 | 140 | 0.557445 |
32e36a60281e09d72c79ad1807ea74035aa73e60 | 534 | py | Python | examples/earthquakes/main.py | admariner/beneath | a6aa2c220e4a646be792379528ae673f4bef440b | [
"MIT"
] | 65 | 2021-04-27T13:13:09.000Z | 2022-01-24T00:26:06.000Z | examples/earthquakes/main.py | admariner/beneath | a6aa2c220e4a646be792379528ae673f4bef440b | [
"MIT"
] | 22 | 2021-10-06T10:30:40.000Z | 2021-12-10T11:36:55.000Z | examples/earthquakes/main.py | admariner/beneath | a6aa2c220e4a646be792379528ae673f4bef440b | [
"MIT"
] | 4 | 2021-04-24T15:29:51.000Z | 2022-03-30T16:20:12.000Z | import beneath
from generators import earthquakes
with open("schemas/earthquake.graphql", "r") as file:
EARTHQUAKES_SCHEMA = file.read()
if __name__ == "__main__":
p = beneath.Pipeline(parse_args=True)
p.description = "Continually pings the USGS earthquake API"
earthquakes = p.generate(earthquakes.generate_earthquakes)
p.write_table(
earthquakes,
"earthquakes",
schema=EARTHQUAKES_SCHEMA,
description="Earthquakes fetched from https://earthquake.usgs.gov/",
)
p.main()
| 28.105263 | 76 | 0.700375 |
32e3ce811bff9ec736c02ce8188ebe9e69d6a483 | 5,073 | py | Python | examples/tf_vision/tensorflow_saved_model_service.py | siddharthgee/multi-model-server | bd795b402330b491edd5d2a235b8b8c2ef9fcb58 | [
"Apache-2.0"
] | null | null | null | examples/tf_vision/tensorflow_saved_model_service.py | siddharthgee/multi-model-server | bd795b402330b491edd5d2a235b8b8c2ef9fcb58 | [
"Apache-2.0"
] | null | null | null | examples/tf_vision/tensorflow_saved_model_service.py | siddharthgee/multi-model-server | bd795b402330b491edd5d2a235b8b8c2ef9fcb58 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
# http://www.apache.org/licenses/LICENSE-2.0
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""
TensorflowSavedModelService defines an API for running a tensorflow saved model
"""
import json
import os
import tensorflow as tf
from model_handler import ModelHandler
def check_input_shape(inputs, signature):
"""
Check input data shape consistency with signature.
Parameters
----------
inputs : List of dicts
Input data in this format [{input_name: input_tensor, input2_name: input2_tensor}, {...}]
signature : dict
Dictionary containing model signature.
"""
assert isinstance(inputs, list), 'Input data must be a list.'
for input_dict in inputs:
assert isinstance(input_dict, dict), 'Each request must be dict of input_name: input_tensor.'
assert len(input_dict) == len(signature["inputs"]), \
"Input number mismatches with " \
"signature. %d expected but got %d." \
% (len(signature['inputs']), len(input_dict))
for tensor_name, sig_input in zip(input_dict, signature["inputs"]):
assert len(input_dict[tensor_name].shape) == len(sig_input["data_shape"]), \
'Shape dimension of input %s mismatches with ' \
'signature. %d expected but got %d.' \
% (sig_input['data_name'],
len(sig_input['data_shape']),
len(input_dict[tensor_name].shape))
for idx in range(len(input_dict[tensor_name].shape)):
if idx != 0 and sig_input['data_shape'][idx] != 0:
assert sig_input['data_shape'][idx] == input_dict[tensor_name].shape[idx], \
'Input %s has different shape with ' \
'signature. %s expected but got %s.' \
% (sig_input['data_name'], sig_input['data_shape'],
input_dict[tensor_name].shape)
| 38.431818 | 118 | 0.640647 |
32e4f05624819cc83857abc3b6af4086f2c2a88e | 167 | py | Python | setup.py | kimballa/arduino-dbg | 639d73b6d96996218cf9aafde52f3683c9d93775 | [
"BSD-3-Clause"
] | null | null | null | setup.py | kimballa/arduino-dbg | 639d73b6d96996218cf9aafde52f3683c9d93775 | [
"BSD-3-Clause"
] | null | null | null | setup.py | kimballa/arduino-dbg | 639d73b6d96996218cf9aafde52f3683c9d93775 | [
"BSD-3-Clause"
] | null | null | null | # Minimal setup.py
#
# Enables installing requirements as declared in setup.cfg.
# From this directory, run:
# pip install .
from setuptools import setup
setup()
| 18.555556 | 59 | 0.736527 |
32e63e4af47da5e138ff28bb64adb55087df265e | 7,113 | py | Python | apps/etl/models.py | diudiu/featurefactory | ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e | [
"MIT"
] | null | null | null | apps/etl/models.py | diudiu/featurefactory | ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e | [
"MIT"
] | null | null | null | apps/etl/models.py | diudiu/featurefactory | ee02ad9e3ea66e2eeafe6e11859801f0420c7d9e | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
common models
"""
from django.db import models
from apps.common.models import BaseModel
from apps.datasource.models import DsInterfaceInfo
| 39.082418 | 111 | 0.707578 |
32e861d95e4d1e621303b5ebac3624de50614805 | 4,007 | py | Python | mazegen/solver.py | alekratz/mazegen | 2799a5cf790cec4bab94a147315cc8541c5efec7 | [
"MIT"
] | null | null | null | mazegen/solver.py | alekratz/mazegen | 2799a5cf790cec4bab94a147315cc8541c5efec7 | [
"MIT"
] | null | null | null | mazegen/solver.py | alekratz/mazegen | 2799a5cf790cec4bab94a147315cc8541c5efec7 | [
"MIT"
] | null | null | null | import random
from typing import Optional
from .grid import *
| 32.056 | 96 | 0.520839 |
32e9f9206385a627a8ad3b33526b3f3d199fd0d3 | 78 | py | Python | practice.py | dajimmy1120/AvatarGAN | be264914223490ee9c23e59ad5a414da1aef4824 | [
"Apache-2.0"
] | null | null | null | practice.py | dajimmy1120/AvatarGAN | be264914223490ee9c23e59ad5a414da1aef4824 | [
"Apache-2.0"
] | null | null | null | practice.py | dajimmy1120/AvatarGAN | be264914223490ee9c23e59ad5a414da1aef4824 | [
"Apache-2.0"
] | null | null | null | from keras_segmentation.pretrained import pspnet_101_voc12
pspnet_101_voc12() | 26 | 58 | 0.897436 |
32ea368fa5ba2732d1c51618d8edfc516b6eb773 | 1,224 | py | Python | example/RunModel/Abaqus_Model_Example/process_odb.py | volpatto/UQpy | acbe1d6e655e98917f56b324f019881ea9ccca82 | [
"MIT"
] | null | null | null | example/RunModel/Abaqus_Model_Example/process_odb.py | volpatto/UQpy | acbe1d6e655e98917f56b324f019881ea9ccca82 | [
"MIT"
] | null | null | null | example/RunModel/Abaqus_Model_Example/process_odb.py | volpatto/UQpy | acbe1d6e655e98917f56b324f019881ea9ccca82 | [
"MIT"
] | null | null | null | from odbAccess import *
from abaqusConstants import *
from textRepr import *
import timeit
import numpy as np
import os
import sys
start_time = timeit.default_timer()
index = sys.argv[-1]
# print(index)
# index = float(index)
index = int(index)
# print(index)
odbFile = os.path.join(os.getcwd(), "single_element_simulation_" + str(index) + ".odb")
odb = openOdb(path=odbFile)
step1 = odb.steps.values()[0]
his_key = 'Element PART-1-1.1 Int Point 1 Section Point 1'
region = step1.historyRegions[his_key]
LE22 = region.historyOutputs['LE22'].data
S22 = region.historyOutputs['S22'].data
# t = np.array(LE22)[:, 0]
x = np.array(LE22)[:, 1]
y = np.array(S22)[:, 1]
fnm = os.path.join(os.getcwd(), 'Output', 'output_element_{0}.csv'.format(index))
if not os.path.exists(os.path.dirname(fnm)):
try:
os.makedirs(os.path.dirname(fnm))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
output_file = open(fnm, 'wb')
for k in range(len(x)):
output_file.write('%13.6e, %13.6e\n' % (x[k], y[k]))
output_file.close()
elapsed = timeit.default_timer() - start_time
print('Finished running odb_process_script. It took ' + str(elapsed) + ' s to run.')
| 27.818182 | 87 | 0.684641 |
32eaa0a294af2308ff208fed9c050fd370b31fec | 8,526 | py | Python | analysis_methods/shuff_time.py | gbrookshire/simulated_rhythmic_sampling | 5c9ed507847a75dbe38d10d78b54441ae83f5831 | [
"MIT"
] | null | null | null | analysis_methods/shuff_time.py | gbrookshire/simulated_rhythmic_sampling | 5c9ed507847a75dbe38d10d78b54441ae83f5831 | [
"MIT"
] | null | null | null | analysis_methods/shuff_time.py | gbrookshire/simulated_rhythmic_sampling | 5c9ed507847a75dbe38d10d78b54441ae83f5831 | [
"MIT"
] | null | null | null | """
Tools to perform analyses by shuffling in time, as in Landau & Fries (2012) and
Fiebelkorn et al. (2013).
"""
import os
import yaml
import numpy as np
import statsmodels.api as sm
from statsmodels.stats.multitest import multipletests
from .utils import avg_repeated_timepoints, dft
# Load the details of the behavioral studies
_pathname = os.path.dirname(os.path.abspath(__file__))
_behav_fname = os.path.join(_pathname, '../behav_details.yaml')
behav_details = yaml.safe_load(open(_behav_fname))
def landau(x, t, fs, k_perm):
"""
Analyze the data as in Landau & Fries (2012)
Parameters
----------
x : nd.array
Array of Hit (1) or Miss (0) for each trial
t : nd.array
Time-stamp (SOA) for each trial
Returns
-------
res : dict
The results of the randomization test as returned by
`time_shuffled_perm`, plus these items:
t : np.ndarray
The time-stamps of the individual trials
t_agg : np.ndarray
The time-steps for the aggregated accuracy time-series
x_agg : np.ndarray
The aggregated accuracy time-series
p_corr : np.ndarray
P-values corrected for multiple comparisons using Bonforroni
correction
"""
def landau_spectrum_trialwise(x_perm):
""" Helper to compute spectrum on shuffled data
"""
_, x_avg = avg_repeated_timepoints(t, x_perm)
f, y = landau_spectrum(x_avg, fs)
return f, y
# Compute the results
res = time_shuffled_perm(landau_spectrum_trialwise, x, k_perm)
res['t'] = t
res['t_agg'], res['x_agg'] = avg_repeated_timepoints(t, x)
# Correct for multiple comparisons across frequencies
_, p_corr, _, _ = multipletests(res['p'], method='bonferroni')
res['p_corr'] = p_corr
return res
def landau_spectrum(x, fs, detrend_ord=1):
"""
Get the spectrum of behavioral data as in Landau & Fries (2012)
The paper doesn't specifically mention detrending, but A.L. says they
always detrend with a 2nd-order polynomial. That matches the data --
without detrending, there should have been a peak at freq=0 due to the
offset from mean accuracy being above 0.
2021-06-14: AL tells me they used linear detrending.
The paper says the data were padded before computing the FFT, but doesn't
specify the padding or NFFT. I've chosen a value to match the frequency
resolution in the plots.
Parameters
----------
x : np.ndarray
The data time-series
Returns
-------
f : np.ndarray
The frequencies of the amplitude spectrum
y : np.ndarray
The amplitude spectrum
"""
details = behav_details['landau']
# Detrend the data
x = sm.tsa.tsatools.detrend(x, order=detrend_ord)
# Window the data
x = window(x, np.hanning(len(x)))
# Get the spectrum
f, y = dft(x, fs, details['nfft'])
return f, y
def fiebelkorn(x, t, k_perm):
"""
Search for statistically significant behavioral oscillations as in
Fiebelkorn et al. (2013)
Parameters
----------
x : np.ndarray
A sequence of accuracy (Hit: 1, Miss: 0) for each trial
t : np.ndarray
The time-stamps for each trial
k_perm : int
The number of times to randomly shuffle the data when computing the
permuted surrogate distribution
Returns
-------
res : dict
The results as given by `time_shuffled_perm`plus these items:
t : np.ndarray
The original time-stamps of the raw data
p_corr : np.ndarray
P-values for each frequency, corrected for multiple comparisons
using FDR
"""
# Compute the results
res = time_shuffled_perm(lambda xx: fiebelkorn_spectrum(xx, t), x, k_perm)
res['t'] = t
# Correct for multiple comparisons across frequencies
_, p_corr, _, _ = multipletests(res['p'], method='fdr_bh')
res['p_corr'] = p_corr
return res
def fiebelkorn_binning(x_trial, t_trial):
"""
Given accuracy and time-points, find the time-smoothed average accuracy
Parameters
----------
x_trial : np.ndarray
Accuracy (Hit: 1, Miss: 0) of each trial
t_trial : np.ndarray
The time-stamp of each trial
Returns
-------
x_bin : np.ndarray
The average accuracy within each time bin
t_bin : np.ndarray
The centers of each time bin
"""
details = behav_details['fiebelkorn']
# Time-stamps of the center of each bin
t_bin = np.arange(details['t_start'],
details['t_end'] + 1e-10,
details['bin_step'])
# Accuracy within each bin
x_bin = []
for i_bin in range(len(t_bin)):
bin_center = t_bin[i_bin]
bin_start = bin_center - (details['bin_width'] / 2)
bin_end = bin_center + (details['bin_width'] / 2)
bin_sel = (bin_start <= t_trial) & (t_trial <= bin_end)
x_bin_avg = np.mean(x_trial[bin_sel])
x_bin.append(x_bin_avg)
x_bin = np.array(x_bin)
return x_bin, t_bin
def fiebelkorn_spectrum(x, t):
"""
Compute the spectrum of accuracy data as in Fiebelkorn et al. (2013)
Parameters
----------
x : np.ndarray
The data for each trial
t : np.ndarray
The time-stamp for each trial
Returns
-------
f : np.ndarray
The frequencies of the resulting spectrum
y : np.ndarray
The amplitude spectrum
"""
details = behav_details['fiebelkorn']
# Get the moving average of accuracy
x_bin, t_bin = fiebelkorn_binning(x, t)
# Detrend the binned data
x_bin = sm.tsa.tsatools.detrend(x_bin, order=2)
# Window the data
x_bin = window(x_bin, np.hanning(len(x_bin)))
# Get the spectrum
f, y = dft(x_bin, 1 / details['bin_step'], details['nfft'])
# Only keep frequencies that were reported in the paper
f_keep = f <= details['f_max']
f = f[f_keep]
y = y[f_keep]
return f, y
def time_shuffled_perm(analysis_fnc, x, k_perm):
"""
Run a permutation test by shuffling the time-stamps of individual trials.
Parameters
----------
analysis_fnc : function
The function that will be used to generate the spectrum
x : np.ndarray
The data time-series
k_perm : int
How many permutations to run
Returns
-------
res : dict
Dictionary of the results of the randomization analysis
x : np.ndarray
The raw data
x_perm : np.ndarray
The shuffled data
f : np.ndarray
The frequencies of the resulting spectrum
y_emp : np.ndarray
The spectrum of the empirical (unshuffled) data
y_avg : np.ndarray
The spectra of the shuffled permutations
y_cis : np.ndarray
Confidence intervals for the spectra, at the 2.5th, 95th, and
97.5th percentile
p : np.ndarray
P-values (uncorrected for multiple comparisons) for each frequency
"""
# Compute the empirical statistics
f, y_emp = analysis_fnc(x)
# Run a bootstrapped permutation test.
# Create a surrogate distribution by randomly shuffling resps in time.
x_perm = []
y_perm = []
x_shuff = x.copy()
for k in range(k_perm):
np.random.shuffle(x_shuff)
_, y_perm_k = analysis_fnc(x_shuff)
y_perm.append(y_perm_k)
if k < 10: # Keep a few permutations for illustration
x_perm.append(x_shuff.copy())
# Find statistically significant oscillations
# Sometimes we get p=0 if no perms are larger than emp. Note that in this
# case, a Bonferroni correction doesn't have any effect on the p-values.
p = np.mean(np.vstack([y_perm, y_emp]) > y_emp, axis=0)
# Get summary of simulated spectra
y_avg = np.mean(y_perm, 1)
y_cis = np.percentile(y_perm, [2.5, 95, 97.5], 1)
# Bundle the results together
res = {}
res['x'] = x
res['x_perm'] = np.array(x_perm)
res['f'] = f
res['y_emp'] = y_emp
res['y_perm'] = np.array(y_perm)
res['y_avg'] = y_avg
res['y_cis'] = y_cis
res['p'] = p
return res
def window(x, win):
""" Apply a window to a segment of data
Parameters
----------
x : np.ndarray
The data
win : np.ndarray
The window
Returns
-------
x : np.ndarray
The windowed data
"""
return np.multiply(win, x.T).T
| 29 | 79 | 0.62327 |
32eb29b8500dc60a31bfc242ef317ed9ccbd65b5 | 1,411 | py | Python | configs/common/ARM_A7.py | baz21/g5 | e81b0df094c5ff80fbbcc37618e81e206a3c9de9 | [
"BSD-3-Clause"
] | null | null | null | configs/common/ARM_A7.py | baz21/g5 | e81b0df094c5ff80fbbcc37618e81e206a3c9de9 | [
"BSD-3-Clause"
] | null | null | null | configs/common/ARM_A7.py | baz21/g5 | e81b0df094c5ff80fbbcc37618e81e206a3c9de9 | [
"BSD-3-Clause"
] | null | null | null |
from m5.objects import *
# https://en.wikipedia.org/wiki/Raspberry_Pi
# https://en.wikipedia.org/wiki/ARM_Cortex-A7
# Instruction Cache
# Data Cache
# L2 Cache
# L3 Cache, NONE
# TLB Cache, NONE
# end
| 20.75 | 57 | 0.649894 |
32ebbb19735d64f55f4b8caaf8724aa49e1ddf29 | 172 | py | Python | webapp/models/__init__.py | xaldey/otus_blog | 32600506d447c0b76c7e0323389d17428d197181 | [
"Apache-2.0"
] | null | null | null | webapp/models/__init__.py | xaldey/otus_blog | 32600506d447c0b76c7e0323389d17428d197181 | [
"Apache-2.0"
] | null | null | null | webapp/models/__init__.py | xaldey/otus_blog | 32600506d447c0b76c7e0323389d17428d197181 | [
"Apache-2.0"
] | null | null | null | from .create_db import Session, engine, Base
from .models import User, Post, Tag
__all__ = [
"Session",
"engine",
"Base",
"User",
"Post",
"Tag",
]
| 14.333333 | 44 | 0.569767 |
32ef88405f3f3c3db42531c5dfa16c38dbb4d202 | 1,405 | py | Python | Easy/112.PathSum.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 39 | 2020-07-04T11:15:13.000Z | 2022-02-04T22:33:42.000Z | Easy/112.PathSum.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 1 | 2020-07-15T11:53:37.000Z | 2020-07-15T11:53:37.000Z | Easy/112.PathSum.py | YuriSpiridonov/LeetCode | 2dfcc9c71466ffa2ebc1c89e461ddfca92e2e781 | [
"MIT"
] | 20 | 2020-07-14T19:12:53.000Z | 2022-03-02T06:28:17.000Z | """
Given a binary tree and a sum, determine if the tree has a root-to-leaf path
such that adding up all the values along the path equals the given sum.
Note: A leaf is a node with no children.
Example:
Given the below binary tree and sum = 22,
5
/ \
4 8
/ / \
11 13 4
/ \ \
7 2 1
return true, as there exist a root-to-leaf path 5->4->11->2 which sum is 22.
"""
#Difficulty: Easy
#114 / 114 test cases passed.
#Runtime: 44 ms
#Memory Usage: 15.6 MB
#Runtime: 44 ms, faster than 72.99% of Python3 online submissions for Path Sum.
#Memory Usage: 15.6 MB, less than 43.57% of Python3 online submissions for Path Sum.
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
| 28.673469 | 84 | 0.577936 |
32f125ad1d76b4e0fde9ddfeb972aeb7353e40c7 | 42 | py | Python | downloads.py | Jamal135/fine-grained-sentiment-app | 4754cefd77ccfa99b15a7721c3471aeacec650c9 | [
"MIT"
] | null | null | null | downloads.py | Jamal135/fine-grained-sentiment-app | 4754cefd77ccfa99b15a7721c3471aeacec650c9 | [
"MIT"
] | null | null | null | downloads.py | Jamal135/fine-grained-sentiment-app | 4754cefd77ccfa99b15a7721c3471aeacec650c9 | [
"MIT"
] | null | null | null | import nltk
nltk.download('vader_lexicon') | 21 | 30 | 0.833333 |
32f16560a7eafdb17a4951c61d182a0eaa97e4e4 | 880 | py | Python | src/Tools/Button.py | hieuhdh/Multi-tasking-program | 2f064a554f647247c84979b7a27f0797d1e1b5af | [
"MIT"
] | null | null | null | src/Tools/Button.py | hieuhdh/Multi-tasking-program | 2f064a554f647247c84979b7a27f0797d1e1b5af | [
"MIT"
] | null | null | null | src/Tools/Button.py | hieuhdh/Multi-tasking-program | 2f064a554f647247c84979b7a27f0797d1e1b5af | [
"MIT"
] | null | null | null | from tkinter import*
from tkinter import Button, font
from tkinter.font import BOLD
import tkinter.ttk as ttk
from tkhtmlview import HTMLLabel
from tkhtmlview import HTMLText
| 40 | 303 | 0.659091 |
32f6cfa5b601a97d41e10a68ea610b54a023b9f0 | 864 | py | Python | src/test.py | ayieko168/Arduino-Oscilloscope | 5a0634437010f4303c86aef141f33cc6a628b3dc | [
"MIT"
] | null | null | null | src/test.py | ayieko168/Arduino-Oscilloscope | 5a0634437010f4303c86aef141f33cc6a628b3dc | [
"MIT"
] | null | null | null | src/test.py | ayieko168/Arduino-Oscilloscope | 5a0634437010f4303c86aef141f33cc6a628b3dc | [
"MIT"
] | null | null | null | import pyqtgraph as pg
import pyqtgraph.exporters
import numpy as np
import math
from time import sleep
f = 10
t = 0
Samples = 1000
# while True:
# y2 = np.sin( 2* np.pi * f * t)
# print(y)
# t+=0.01
# sleep(0.25)
# define the data
theTitle = "pyqtgraph plot"
y2 = []
# create plot
plt = pg.plot()
plt.showGrid(x=True,y=True)
dat2 = []
c2 = pg.PlotCurveItem(dat2)
plt.addItem(c2)
timer = pg.QtCore.QTimer ()
timer.timeout.connect(update)
timer.start(0.1)
## Start Qt event loop.
if __name__ == '__main__':
import sys
if sys.flags.interactive != 1 or not hasattr(pg.QtCore, 'PYQT_VERSION'):
pg.QtGui.QApplication.exec_() | 16.941176 | 76 | 0.618056 |
32f73e3a96427c84bfa7bd842e7e9ab6eeb893b6 | 931 | py | Python | Aula07/Exercicio2.py | PabloSchumacher/TrabalhosPython | 828edd35eb40442629211bc9f1477f75fb025d74 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | Aula07/Exercicio2.py | PabloSchumacher/TrabalhosPython | 828edd35eb40442629211bc9f1477f75fb025d74 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | Aula07/Exercicio2.py | PabloSchumacher/TrabalhosPython | 828edd35eb40442629211bc9f1477f75fb025d74 | [
"bzip2-1.0.6",
"MIT"
] | null | null | null | #--- Exercicio 2 - Dicionrios
#--- Escreva um programa que leia os dados de 11 jogadores
#--- Jogador: Nome, Posicao, Numero, PernaBoa
#--- Crie um dicionario para armazenar os dados
#--- Imprima todos os jogadores e seus dados
lista_jogador = []
for i in range(0,11):
dicionario_jogador = {'Nome':'', 'Posicao':'', 'Numero':'','Pernaboa':''}
dicionario_jogador['Nome'] = input(f'Digite o nome do {i+1} jogador: ')
dicionario_jogador['Posicao'] = input(f'Digite a posio do {i+1} jogador: ')
dicionario_jogador['Numero'] = int(input(f'Digite o nmero do {i+1} jogador: '))
dicionario_jogador['Pernaboa'] = input(f'Digite o perna boa do {i+1} jogador: ')
lista_jogador.append(dicionario_jogador)
for j in lista_jogador:
print(f"Nome: {dicionario_jogador['Nome']} - Posio: {dicionario_jogador['Posicao']} - Numero: {dicionario_jogador['Numero']} - Pernaboa: {dicionario_jogador['Pernaboa']}")
| 46.55 | 177 | 0.688507 |
32f8e7bf61b54b514d134bdb102d258bdc2af2ce | 669 | py | Python | Tiny ImageNet Challenge/train_data.py | Vishal-V/Mastering-TensorFlow-2.x | 83e18cf84dc5c391c5f902978ee5a80e1be4a31d | [
"MIT"
] | 3 | 2020-05-15T16:57:39.000Z | 2020-09-16T20:53:58.000Z | Tiny ImageNet Challenge/train_data.py | Vishal-V/Mastering-Tensorflow | 83e18cf84dc5c391c5f902978ee5a80e1be4a31d | [
"MIT"
] | null | null | null | Tiny ImageNet Challenge/train_data.py | Vishal-V/Mastering-Tensorflow | 83e18cf84dc5c391c5f902978ee5a80e1be4a31d | [
"MIT"
] | 4 | 2020-03-30T16:11:41.000Z | 2020-09-15T20:28:27.000Z | # Iterate over epochs.
for epoch in range(3):
print(f'Epoch {epoch+1}')
# Iterate over the batches of the dataset.
for step, x_batch_train in enumerate(train_data):
with tf.GradientTape() as tape:
reconstructed = autoencoder(x_batch_train)
# Compute reconstruction loss
loss = mse_loss(x_batch_train, reconstructed)
#loss += sum(autoencoder.losses) # Add KLD regularization loss
grads = tape.gradient(loss, autoencoder.trainable_variables)
optimizer.apply_gradients(zip(grads, autoencoder.trainable_variables))
loss_metric(loss)
if step % 100 == 0:
print(f'Step {step}: mean loss = {loss_metric.result()}') | 35.210526 | 74 | 0.707025 |
32f92084cffe12b7f31fc3604eb9852e4502b8d7 | 1,422 | py | Python | utils/generate_topics.py | ahoho/scholar | fe1b7ba590563e245e7765d100cfff091ba20c54 | [
"Apache-2.0"
] | null | null | null | utils/generate_topics.py | ahoho/scholar | fe1b7ba590563e245e7765d100cfff091ba20c54 | [
"Apache-2.0"
] | null | null | null | utils/generate_topics.py | ahoho/scholar | fe1b7ba590563e245e7765d100cfff091ba20c54 | [
"Apache-2.0"
] | null | null | null | ################################################################
# Generate top-N words for topics, one per line, to stdout
################################################################
import os
import sys
import argparse
import numpy as np
import file_handling as fh
if __name__ == "__main__":
main()
| 25.392857 | 70 | 0.563291 |
32fc43425ea47a93c10fa87eeeea81ca0922ca0c | 918 | py | Python | AutomateboringStuff/3. Functions/try_nd_except.py | gabriel-marchetti/Exercicios-Python | 0f1eac7eee48081cf899d25bed0ec5dbc70a3542 | [
"MIT"
] | 2 | 2021-12-21T23:28:02.000Z | 2021-12-21T23:28:03.000Z | AutomateboringStuff/3. Functions/try_nd_except.py | gabriel-marchetti/Exercicios-Python | 0f1eac7eee48081cf899d25bed0ec5dbc70a3542 | [
"MIT"
] | 1 | 2021-12-22T12:05:11.000Z | 2021-12-22T13:02:52.000Z | AutomateboringStuff/3. Functions/try_nd_except.py | gabriel-marchetti/Exercicios-Python | 0f1eac7eee48081cf899d25bed0ec5dbc70a3542 | [
"MIT"
] | null | null | null | # Quando tivermos um programa onde claramente temos um caso
# indesejvel, ento podemos usar a funo do python dita
# try_and_except.
# Vamos supor que desejamos fazer uma funo que faa uma
# diviso, ento podemos fazer a seguinte estrutura de
# cdigo
# veja que nesse caso, se dermos o argumento zero, ento
# iremos ganhar um erro no terminal
# por conta disso existem dois mtodos que podemos usar
# para resolver esse caso
print(spam(2))
print(spam(12))
print(spam(0))
print(spam(1))
# Outro modo de escrevermos atravs de:
try:
print(spam(2))
print(spam(12))
print(spam(0))
print(spam(1))
except ZeroDivisionError:
print('Error: Invalid argument.')
| 20.863636 | 59 | 0.713508 |
32fcb908b2dfd2baf6aec8baabfb5d1f269220d0 | 1,577 | py | Python | src/plyer_lach/platforms/android/email.py | locksmith47/turing-sim-kivy | f57de9d52494245c56f67dd7e63121434bb0553f | [
"MIT"
] | null | null | null | src/plyer_lach/platforms/android/email.py | locksmith47/turing-sim-kivy | f57de9d52494245c56f67dd7e63121434bb0553f | [
"MIT"
] | null | null | null | src/plyer_lach/platforms/android/email.py | locksmith47/turing-sim-kivy | f57de9d52494245c56f67dd7e63121434bb0553f | [
"MIT"
] | null | null | null | from jnius import autoclass, cast
from kivy.logger import Logger
from plyer_lach.facades import Email
from plyer_lach.platforms.android import activity
Intent = autoclass('android.content.Intent')
AndroidString = autoclass('java.lang.String')
URI = autoclass('android.net.Uri')
| 36.674419 | 89 | 0.606848 |
32ff2b91e7cdacd12f1c52a76ec14a6214fafa45 | 452 | py | Python | main.py | rishi-chauhan/sudoku | 2b07954b2f3ab5146ab0f96eb4d0509a3ea45eb2 | [
"MIT"
] | null | null | null | main.py | rishi-chauhan/sudoku | 2b07954b2f3ab5146ab0f96eb4d0509a3ea45eb2 | [
"MIT"
] | null | null | null | main.py | rishi-chauhan/sudoku | 2b07954b2f3ab5146ab0f96eb4d0509a3ea45eb2 | [
"MIT"
] | null | null | null | """Main class for sudoku game. Run this to solve the game."""
from board import Board
# ENTRIES contains the values of each cell
ENTRIES = [0, 0, 0, 2, 6, 0, 7, 0, 1, 6, 8, 0, 0, 7, 0, 0, 9, 0, 1,
9, 0, 0, 0, 4, 5, 0, 0, 8, 2, 0, 1, 0, 0, 0, 4, 0, 0,
0, 4, 6, 0, 2, 9, 0, 0, 0, 5, 0, 0, 0, 3, 0, 2, 8, 0,
0, 9, 3, 0, 0, 0, 7, 4, 0, 4, 0, 0, 5, 0, 0, 3, 6, 7,
0, 3, 0, 1, 8, 0, 0, 0]
board = Board(ENTRIES)
| 37.666667 | 67 | 0.446903 |
fd00768ed39187f9b978abbf6c4d123c662329a9 | 121 | py | Python | fuzzer/fuzzing_strategies/base_strategy/base_strategy.py | Dyfox100/Libstemmer_Fuzzer | 263d6e64e007116a348d994851aa05e4c0c35358 | [
"MIT"
] | null | null | null | fuzzer/fuzzing_strategies/base_strategy/base_strategy.py | Dyfox100/Libstemmer_Fuzzer | 263d6e64e007116a348d994851aa05e4c0c35358 | [
"MIT"
] | null | null | null | fuzzer/fuzzing_strategies/base_strategy/base_strategy.py | Dyfox100/Libstemmer_Fuzzer | 263d6e64e007116a348d994851aa05e4c0c35358 | [
"MIT"
] | null | null | null | import abc
| 17.285714 | 47 | 0.710744 |
fd009473c74aa4ae5995e6b6bc84914f1edd33ca | 2,215 | py | Python | netbox/dcim/migrations/0100_application.py | fireman0865/PingBox | 0f00eaf88b88e9441fffd5173a1501e56c13db03 | [
"Apache-2.0"
] | 1 | 2021-09-23T00:06:51.000Z | 2021-09-23T00:06:51.000Z | netbox/dcim/migrations/0100_application.py | fireman0865/PingBox | 0f00eaf88b88e9441fffd5173a1501e56c13db03 | [
"Apache-2.0"
] | 2 | 2021-06-08T21:05:10.000Z | 2021-09-08T01:46:58.000Z | netbox/dcim/migrations/0100_application.py | fireman0865/PingBox | 0f00eaf88b88e9441fffd5173a1501e56c13db03 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.2.10 on 2020-03-04 09:21
from django.db import migrations, models
import django.db.models.deletion
| 55.375 | 219 | 0.628442 |
fd00db8ee275e84aadc9a08c115a590eab1c8a65 | 1,934 | py | Python | pam_notify.py | aNNufriy/pamNotifier | 088ec0cb87c026a0fbc8e6275fc891bf653af645 | [
"MIT"
] | 1 | 2020-03-21T21:37:57.000Z | 2020-03-21T21:37:57.000Z | pam_notify.py | aNNufriy/pamNotifier | 088ec0cb87c026a0fbc8e6275fc891bf653af645 | [
"MIT"
] | null | null | null | pam_notify.py | aNNufriy/pamNotifier | 088ec0cb87c026a0fbc8e6275fc891bf653af645 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import smtplib
import time
import syslog
import telegram
import yaml
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
# Author:: Alexander Schedrov (schedrow@gmail.com)
# Copyright:: Copyright (c) 2019 Alexander Schedrov
# License:: MIT
| 33.344828 | 97 | 0.635471 |
fd032c799cd2f082ede61113614415437237b7bc | 40,263 | py | Python | src/eventail/async_service/pika/base.py | allo-media/eventail | aed718d733709f1a522fbfec7083ddd8ed7b5039 | [
"MIT"
] | 2 | 2019-12-12T15:08:25.000Z | 2020-05-19T08:52:06.000Z | src/eventail/async_service/pika/base.py | allo-media/eventail | aed718d733709f1a522fbfec7083ddd8ed7b5039 | [
"MIT"
] | 10 | 2021-01-19T15:03:51.000Z | 2022-03-08T15:48:22.000Z | src/eventail/async_service/pika/base.py | allo-media/eventail | aed718d733709f1a522fbfec7083ddd8ed7b5039 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# MIT License
#
# Copyright (c) 2018-2019 Groupe Allo-Media
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
"""
A base class implementing AM service architecture and its requirements.
Inspired from pika complete examples.
"""
import functools
import json
import logging
import os
import signal
import socket
import traceback
from contextlib import contextmanager
from typing import Any, Callable, Dict, Generator, List, Optional, Sequence, Tuple
import cbor
import pika
from eventail.gelf import GELF
from eventail.log_criticity import ALERT, EMERGENCY, ERROR, WARNING
LOGGER = logging.getLogger("async_service")
JSON_MODEL = Dict[str, Any]
HEADER = Dict[str, str]
| 39.014535 | 108 | 0.634304 |
fd0394b6bd7363e7ed4aa89ca0603954bd731b42 | 889 | py | Python | CLI/mainmenue.py | MeatBoyed/PasswordBank2 | f4367b22902ce1282772b184899e3d6e899c1cca | [
"MIT"
] | 1 | 2021-02-08T17:45:28.000Z | 2021-02-08T17:45:28.000Z | CLI/mainmenue.py | MeatBoyed/PasswordBank2 | f4367b22902ce1282772b184899e3d6e899c1cca | [
"MIT"
] | null | null | null | CLI/mainmenue.py | MeatBoyed/PasswordBank2 | f4367b22902ce1282772b184899e3d6e899c1cca | [
"MIT"
] | null | null | null | from .mock_api.utils import GetSelection
from .viewAccounts import ViewAccounts
from .addAccount import AddAccount
| 26.939394 | 137 | 0.418448 |
fd03c109230a47c1540cdcf65dcdedac9302a120 | 7,342 | py | Python | dataset.py | Intelligent-Computing-Lab-Yale/Energy-Separation-Training | 9336862a10c915a482d427e8a36367f648e7dd40 | [
"MIT"
] | 2 | 2022-03-31T02:36:52.000Z | 2022-03-31T06:13:25.000Z | dataset.py | Intelligent-Computing-Lab-Yale/Energy-Separation-Training | 9336862a10c915a482d427e8a36367f648e7dd40 | [
"MIT"
] | null | null | null | dataset.py | Intelligent-Computing-Lab-Yale/Energy-Separation-Training | 9336862a10c915a482d427e8a36367f648e7dd40 | [
"MIT"
] | null | null | null | import torch
import torchvision
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
import os
| 40.120219 | 96 | 0.578725 |
fd04dad88b99035b710b66d225ec5a6739f0249b | 25,604 | py | Python | tests/st/ops/cpu/test_scatter_arithmetic_op.py | PowerOlive/mindspore | bda20724a94113cedd12c3ed9083141012da1f15 | [
"Apache-2.0"
] | 3,200 | 2020-02-17T12:45:41.000Z | 2022-03-31T20:21:16.000Z | tests/st/ops/cpu/test_scatter_arithmetic_op.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 176 | 2020-02-12T02:52:11.000Z | 2022-03-28T22:15:55.000Z | tests/st/ops/cpu/test_scatter_arithmetic_op.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | 621 | 2020-03-09T01:31:41.000Z | 2022-03-30T03:43:19.000Z | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor, Parameter
from mindspore.ops import operations as P
context.set_context(mode=context.GRAPH_MODE, device_target="CPU")
class TestScatterSubNet(nn.Cell):
def __init__(self, lock, inputx, indices, updates):
super(TestScatterSubNet, self).__init__()
self.scatter_sub = P.ScatterSub(use_locking=lock)
self.inputx = Parameter(inputx, name="inputx")
self.indices = Parameter(indices, name="indices")
self.updates = Parameter(updates, name="updates")
def construct(self):
out = self.scatter_sub(self.inputx, self.indices, self.updates)
return out
def scatter_sub_net(inputx, indices, updates):
lock = True
net = TestScatterSubNet(lock, inputx, indices, updates)
return net()
def scatter_sub_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterSubNet(lock, inputx, indices, updates)
return net()
class TestScatterMulNet(nn.Cell):
def scatter_mul_net(inputx, indices, updates):
lock = True
net = TestScatterMulNet(lock, inputx, indices, updates)
return net()
def scatter_mul_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterMulNet(lock, inputx, indices, updates)
return net()
class TestScatterDivNet(nn.Cell):
def scatter_div_net(inputx, indices, updates):
lock = True
net = TestScatterDivNet(lock, inputx, indices, updates)
return net()
def scatter_div_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterDivNet(lock, inputx, indices, updates)
return net()
class TestScatterMaxNet(nn.Cell):
def scatter_max_net(inputx, indices, updates):
lock = True
net = TestScatterMaxNet(lock, inputx, indices, updates)
return net()
def scatter_max_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterMaxNet(lock, inputx, indices, updates)
return net()
class TestScatterMinNet(nn.Cell):
def scatter_min_net(inputx, indices, updates):
lock = True
net = TestScatterMinNet(lock, inputx, indices, updates)
return net()
def scatter_min_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterMinNet(lock, inputx, indices, updates)
return net()
class TestScatterUpdateNet(nn.Cell):
def scatter_update_net(inputx, indices, updates):
lock = True
net = TestScatterUpdateNet(lock, inputx, indices, updates)
return net()
def scatter_update_use_locking_false_net(inputx, indices, updates):
lock = False
net = TestScatterUpdateNet(lock, inputx, indices, updates)
return net()
| 39.757764 | 82 | 0.594712 |
fd06722fb8cfe07ace7e4c46b654df0346766b26 | 4,181 | py | Python | nn_similarity_index/cwt_kernel_mat.py | forgi86/xfer | 56d98a66d6adb2466d1a73b52f3b27193930a008 | [
"Apache-2.0"
] | 244 | 2018-08-31T18:35:29.000Z | 2022-03-20T01:12:50.000Z | nn_similarity_index/cwt_kernel_mat.py | forgi86/xfer | 56d98a66d6adb2466d1a73b52f3b27193930a008 | [
"Apache-2.0"
] | 26 | 2018-08-29T15:31:21.000Z | 2021-06-24T08:05:53.000Z | nn_similarity_index/cwt_kernel_mat.py | forgi86/xfer | 56d98a66d6adb2466d1a73b52f3b27193930a008 | [
"Apache-2.0"
] | 57 | 2018-09-11T13:40:35.000Z | 2022-02-22T14:43:34.000Z | # Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# ==============================================================================
import os
os.environ["OMP_NUM_THREADS"] = "1"
import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torchvision
import torchvision.transforms as transforms
import torchvision.models as models
import numpy as np
from abc import ABC
import os
import argparse
from sketched_kernels import SketchedKernels
from utils import *
if __name__ == "__main__":
# Get arguments from the command line
parser = argparse.ArgumentParser(description='PyTorch CWT sketching kernel matrices')
parser.add_argument('--datapath', type=str,
help='absolute path to the dataset')
parser.add_argument('--modelname', type=str,
help='model name')
parser.add_argument('--pretrained', action='store_true',
help='whether to load a pretrained ImageNet model')
parser.add_argument('--seed', default=0, type=int,
help='random seed for sketching')
parser.add_argument('--task', default='cifar10', type=str, choices=['cifar10', 'cifar100', 'svhn', 'stl10'],
help='the name of the dataset, cifar10 or cifar100 or svhn or stl10')
parser.add_argument('--split', default='train', type=str,
help='split of the dataset, train or test')
parser.add_argument('--bsize', default=512, type=int,
help='batch size for computing the kernel')
parser.add_argument('--M', '--num-buckets-sketching', default=512, type=int,
help='number of buckets in Sketching')
parser.add_argument('--T', '--num-buckets-per-sample', default=1, type=int,
help='number of buckets each data sample is sketched to')
parser.add_argument('--freq_print', default=10, type=int,
help='frequency for printing the progress')
args = parser.parse_args()
# Set the backend and the random seed for running our code
device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.manual_seed(args.seed)
if device == 'cuda':
cudnn.benchmark = True
torch.cuda.manual_seed(args.seed)
# The size of images for training and testing ImageNet models
imgsize = 224
# Generate a dataloader that iteratively reads data
# Load a model, either pretrained or not
loader = load_dataset(args.task, args.split, args.bsize, args.datapath, imgsize)
net = load_model(device, args.modelname, pretrained=True)
# Set the model to be in the evaluation mode. VERY IMPORTANT!
# This step to fix the running statistics in batchnorm layers,
# and disable dropout layers
net.eval()
csm = SketchedKernels(net, loader, imgsize, device, args.M, args.T, args.freq_print)
csm.compute_sketched_kernels()
# Compute sketched kernel matrices for each layer
for layer_id in range(len(csm.kernel_matrices)):
nkme = (csm.kernel_matrices[layer_id].sum() ** 0.5) / csm.n_samples
print("The norm of the kernel mean embedding of layer {:d} is {:.4f}".format(layer_id, nkme))
del net, loader
torch.cuda.empty_cache()
# Save the sketched kernel matrices
savepath = 'sketched_kernel_mat/'
if not os.path.isdir(savepath):
os.mkdir(savepath)
save_filename = '{}_{}_{}_{}.npy'.format(args.modelname, args.split, args.task, args.seed)
np.save(savepath + save_filename, csm.kernel_matrices)
| 40.201923 | 112 | 0.648649 |
fd067b6667868f936c5b7ba2c71c491e3eeb9190 | 844 | py | Python | venv/Lib/site-packages/traits/observation/events.py | richung99/digitizePlots | 6b408c820660a415a289726e3223e8f558d3e18b | [
"MIT"
] | 1 | 2022-01-18T17:56:51.000Z | 2022-01-18T17:56:51.000Z | venv/Lib/site-packages/traits/observation/events.py | richung99/digitizePlots | 6b408c820660a415a289726e3223e8f558d3e18b | [
"MIT"
] | null | null | null | venv/Lib/site-packages/traits/observation/events.py | richung99/digitizePlots | 6b408c820660a415a289726e3223e8f558d3e18b | [
"MIT"
] | null | null | null | # (C) Copyright 2005-2021 Enthought, Inc., Austin, TX
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in LICENSE.txt and may be redistributed only under
# the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
""" Event objects received by change handlers added using observe.
"""
from traits.observation._dict_change_event import ( # noqa: F401
DictChangeEvent,
)
from traits.observation._list_change_event import ( # noqa: F401
ListChangeEvent,
)
from traits.observation._set_change_event import ( # noqa: F401
SetChangeEvent,
)
from traits.observation._trait_change_event import ( # noqa: F401
TraitChangeEvent,
)
| 29.103448 | 71 | 0.760664 |
fd077dfb9ba449d6f886f45f49324f828fa9d71b | 827 | py | Python | src/run_hid_4_network2.py | Naresh1318/Effect_of_injected_noise_in_deep_NN | 0d001ea2c4d33011204247cb4c066b0da6632c04 | [
"Unlicense"
] | 2 | 2016-09-11T08:47:29.000Z | 2016-11-19T10:29:47.000Z | src/run_hid_4_network2.py | Naresh1318/Effect_of_injected_noise_in_deep_NN | 0d001ea2c4d33011204247cb4c066b0da6632c04 | [
"Unlicense"
] | null | null | null | src/run_hid_4_network2.py | Naresh1318/Effect_of_injected_noise_in_deep_NN | 0d001ea2c4d33011204247cb4c066b0da6632c04 | [
"Unlicense"
] | null | null | null | import mnist_loader
import network2
import numpy as np
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
eta = 0.9
m_b_s = 10
epochs = 30
trials = 10
trial_ev = []
for t in xrange(trials):
net = network2.Network([784, 50, 50, 50, 50, 10], cost=network2.CrossEntropyCost)
net.default_weight_initializer()
_,ev,_,_ = net.SGD(training_data[:1000], epochs, m_b_s, eta, evaluation_data=test_data[:1000],monitor_evaluation_accuracy=True)
print "Trial {} Complete".format(t + 1)
print "Maximum Evaluation Accuracy : {}".format(np.amax(ev))
trial_ev.append(np.amax(ev))
Avg_ev = np.mean(trial_ev)
Max_ev = np.amax(trial_ev)
print "Average Evaluation Accuracy for {} trials is {}".format(trials,Avg_ev)
print "Maximum Evaluation Accuracy for {} trials is {}".format(trials,Max_ev) | 33.08 | 131 | 0.732769 |
fd08718d6dac06e0024584cff9f9907168ac0518 | 1,918 | py | Python | wsm/backend/asyncwhois/base.py | Rayologist/windows-sshd-manager | 4f78a0cdaa12fe3c2a785aca31066c3be886878b | [
"Apache-2.0"
] | 9 | 2022-02-09T09:09:43.000Z | 2022-02-09T09:10:06.000Z | wsm/backend/asyncwhois/base.py | Rayologist/windows-sshd-manager | 4f78a0cdaa12fe3c2a785aca31066c3be886878b | [
"Apache-2.0"
] | null | null | null | wsm/backend/asyncwhois/base.py | Rayologist/windows-sshd-manager | 4f78a0cdaa12fe3c2a785aca31066c3be886878b | [
"Apache-2.0"
] | null | null | null | from abc import ABC, abstractmethod
from typing import List, Any
from ipaddress import IPv4Address
from dataclasses import dataclass, FrozenInstanceError
from types import SimpleNamespace
from enum import Enum, auto
| 22.302326 | 77 | 0.688738 |
fd0c1d5bae5b02c0610c8254bb0ed033a6e6d1e5 | 1,079 | py | Python | optaux/helper_functions/check_nonvalidated_auxs.py | coltonlloyd/OptAux | 3ee1f8cdfa32f1a732ad41d5f854659159694160 | [
"MIT"
] | 1 | 2019-06-05T10:41:06.000Z | 2019-06-05T10:41:06.000Z | optaux/helper_functions/check_nonvalidated_auxs.py | coltonlloyd/OptAux | 3ee1f8cdfa32f1a732ad41d5f854659159694160 | [
"MIT"
] | null | null | null | optaux/helper_functions/check_nonvalidated_auxs.py | coltonlloyd/OptAux | 3ee1f8cdfa32f1a732ad41d5f854659159694160 | [
"MIT"
] | null | null | null | import cobra
from optaux import resources
resource_dir = resources.__path__[0]
met_to_rs = {'EX_pydam_e': ['PDX5PS', 'PYDXK', 'PYDXNK'],
'EX_orot_e': ['DHORTS', 'UPPRT', 'URIK2'],
'EX_thr__L_e': ['PTHRpp', 'THRS'],
'EX_pro__L_e': ['AMPTASEPG', 'P5CR'],
'EX_skm_e': ['DHQTi'],
'EX_cys__L_e': ['AMPTASECG', 'CYSS']}
for m, rs in met_to_rs.items():
ijo = cobra.io.load_json_model('%s/iJO1366.json' % resource_dir)
ijo.reactions.EX_o2_e.lower_bound = -20
biomass_reaction = list(ijo.objective.keys())[0]
biomass_reaction.lower_bound = .1
biomass_reaction.upper_bound = .1
for r in rs:
for g in [i.id for i in ijo.reactions.get_by_id(r).genes]:
print(ijo.genes.get_by_id(g).name,
[i.id for i in ijo.genes.get_by_id(g).reactions])
ijo.genes.get_by_id(g).remove_from_model()
ijo.objective = m
ijo.reactions.get_by_id(m).lower_bound = -10
ijo.optimize()
print(m, ijo.solution.f)
ijo.reactions.get_by_id(m).lower_bound = 0 | 33.71875 | 68 | 0.615385 |
fd0f8e0645346f82a2ff9bdf244ca7d9bf72405b | 186 | py | Python | xauto/common/futils.py | sababa11/xauto | 107e59344b4624941387a4dff0d439719075ebf4 | [
"Apache-2.0"
] | null | null | null | xauto/common/futils.py | sababa11/xauto | 107e59344b4624941387a4dff0d439719075ebf4 | [
"Apache-2.0"
] | null | null | null | xauto/common/futils.py | sababa11/xauto | 107e59344b4624941387a4dff0d439719075ebf4 | [
"Apache-2.0"
] | null | null | null | import os
import sys
def get_workdir():
"""
get_workdir() -> workdir: [str]
Returns the current workdir.
"""
return os.path.realpath(os.path.dirname(sys.argv[0]))
| 15.5 | 57 | 0.629032 |
fd105e9dfaa8a1cb5dda8aab7e3ed98167bf73e4 | 10,430 | py | Python | csv-to-mysql.py | LongPhan1912/Youtube-Playlist-Extractor | 80b10e0b459c2cb264113cfaff644f5f28650813 | [
"CC0-1.0"
] | null | null | null | csv-to-mysql.py | LongPhan1912/Youtube-Playlist-Extractor | 80b10e0b459c2cb264113cfaff644f5f28650813 | [
"CC0-1.0"
] | null | null | null | csv-to-mysql.py | LongPhan1912/Youtube-Playlist-Extractor | 80b10e0b459c2cb264113cfaff644f5f28650813 | [
"CC0-1.0"
] | null | null | null | import csv
import MySQLdb
# installing MySQL: https://dev.mysql.com/doc/refman/8.0/en/osx-installation-pkg.html
# how to start, watch: https://www.youtube.com/watch?v=3vsC05rxZ8c
# or read this (absolutely helpful) guide: https://www.datacamp.com/community/tutorials/mysql-python
# this is mainly created to get a database of all the songs in my Favorites playlist
# if you wish to change the topic to 'FILM', 'SPORTS', or 'POLITICS'
# 1/ initially, set up the MySQL connection and craft a cursor
mydb = MySQLdb.connect(host='localhost', user='root', passwd='yourPasswordHere')
cursor = mydb.cursor()
# 2/ create a database:
cursor.execute("CREATE DATABASE mydb")
mydb.commit()
# 3/ after database is created, comment out steps 1/ and 2/ and uncomment step 3/
# mydb = MySQLdb.connect(host='localhost', user='root', passwd='', database="mydb")
# cursor = mydb.cursor()
# from here on out, whenever you call `cursor.execute()`, call `mydb.commit()` right afterwards
# 4/ create a table -- three options available to you
# the table's hardcoded right now so if the columns here are changed then other
# the main music table helps extract info to create sub tables for a specific music category
# def create_custom_table(table_name):
# cursor.execute("CREATE TABLE " + table_name
# + " (tableID INTEGER PRIMARY KEY AUTO_INCREMENT, \
# videoTitle VARCHAR(150) NOT NULL, \
# author VARCHAR(100) NOT NULL, \
# category VARCHAR(100) NOT NULL, \
# videoLink VARCHAR(100) NOT NULL, \
# viewCount BIGINT NOT NULL, \
# likeToDislikeRatio decimal(5, 4) NOT NULL)")
# mydb.commit()
# 5/ from a list of wanted fields, the function searches for the index corresponding to each field on the list
# and stores the index inside a dict (easy to look up and flexible if the order of the columns in the csv file is changed)
wanted_items = ['song_name', 'artist', 'topics', 'video_link', 'view_count', 'like_to_dislike_ratio']
# 6/ fill up our main table with the relevant data
# 7/ fill up our custom table using data from the main music table
# -------------------------------------------------------------------
# -------------------SUPPLEMENTARY FUNCTIONS START-------------------
# -------------------------------------------------------------------
# add a field after table is created (new field placed after a specific column of a table)
# change data type for any given field
# delete all the data from a specified table
# make a table disappear from existence :)
# print out all the songs in the playlist
# 'DESC' means descending order (most popular song on top) and 'ASC' is the opposite
# show the name of all the tables present in the database
# check if a table already exists
# optional / not required function: should you wish to look up the different video topics
# if you want to search for all topics, leave `selected_topic` as an empty string
# ------------------------------------------------------------------
# -------------------SUPPLEMENTARY FUNCTIONS ENDS-------------------
# ------------------------------------------------------------------
# 8/ Create main music table
# 9/ Build a new music table based on the genre you love
if __name__ == "__main__":
main()
| 48.287037 | 136 | 0.661266 |
fd107c2da7b904339edb0406a2c83b2ca10efada | 8,850 | py | Python | coadd_mdetsims/tests/test_shear_bias_meas.py | beckermr/metadetect-coadding-sims | 15ccaec353aa61c69ac9d78d1dfca8ce25bca3cf | [
"BSD-3-Clause"
] | null | null | null | coadd_mdetsims/tests/test_shear_bias_meas.py | beckermr/metadetect-coadding-sims | 15ccaec353aa61c69ac9d78d1dfca8ce25bca3cf | [
"BSD-3-Clause"
] | null | null | null | coadd_mdetsims/tests/test_shear_bias_meas.py | beckermr/metadetect-coadding-sims | 15ccaec353aa61c69ac9d78d1dfca8ce25bca3cf | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import pytest
from ..shear_bias_meas import (
measure_shear_metadetect, estimate_m_and_c,
estimate_m_and_c_patch_avg)
| 34.570313 | 77 | 0.490508 |
fd1270311d2747042f749172a656ddde2d001d75 | 1,221 | py | Python | src/topologies/simple.py | sevenEng/Resolving-Consensus | a508701e19bd4ec0df735f5b094487983272dbb6 | [
"MIT"
] | null | null | null | src/topologies/simple.py | sevenEng/Resolving-Consensus | a508701e19bd4ec0df735f5b094487983272dbb6 | [
"MIT"
] | null | null | null | src/topologies/simple.py | sevenEng/Resolving-Consensus | a508701e19bd4ec0df735f5b094487983272dbb6 | [
"MIT"
] | null | null | null | from mininet.net import Mininet
from mininet.node import Controller, UserSwitch, IVSSwitch, OVSSwitch
from mininet.log import info, setLogLevel
setLogLevel("info")
import importlib
switch_num = 1
host_num = 1
client_num = 1
| 18.784615 | 69 | 0.633907 |
fd1396e2ed5013e365c0832fe7ee283e5e1bda20 | 856 | py | Python | lunchapi/permissions.py | pesusieni999/lunchapplication | 2aa2a4320a2ad85b39b74c5dcc3d960a46cdb6ef | [
"MIT"
] | null | null | null | lunchapi/permissions.py | pesusieni999/lunchapplication | 2aa2a4320a2ad85b39b74c5dcc3d960a46cdb6ef | [
"MIT"
] | null | null | null | lunchapi/permissions.py | pesusieni999/lunchapplication | 2aa2a4320a2ad85b39b74c5dcc3d960a46cdb6ef | [
"MIT"
] | null | null | null | from rest_framework import permissions
__author__ = "Ville Myllynen"
__copyright__ = "Copyright 2017, Ohsiha Project"
__credits__ = ["Ville Myllynen"]
__license__ = "MIT"
__version__ = "1.0"
__maintainer__ = "Ville Myllynen"
__email__ = "ville.myllynen@student.tut.fi"
__status__ = "Development"
| 31.703704 | 73 | 0.712617 |
fd139df441393f7003990ec4900a0d7e845e7545 | 3,994 | py | Python | MsSampleFmpDevicePkg/Tools/ConvertCerToH.py | kuqin12/mu_plus | f78c66d0508a7b884b3f73d7c86648656bf07fbb | [
"BSD-2-Clause"
] | null | null | null | MsSampleFmpDevicePkg/Tools/ConvertCerToH.py | kuqin12/mu_plus | f78c66d0508a7b884b3f73d7c86648656bf07fbb | [
"BSD-2-Clause"
] | null | null | null | MsSampleFmpDevicePkg/Tools/ConvertCerToH.py | kuqin12/mu_plus | f78c66d0508a7b884b3f73d7c86648656bf07fbb | [
"BSD-2-Clause"
] | null | null | null | ##
# Copyright (c) 2016, Microsoft Corporation
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##
##
# This is a sample tool and should not be used in production environments.
#
# This tool takes in sample certificates (.cer files) and outputs a .h file containing the
# certificates.
###
import re
import sys
print "This is a sample tool and should not be used in production environments\n"
raw_input('Press any key to continue . . .\n')
### Parse input parameters ###
if len(sys.argv) == 1 or sys.argv[1] == "-h" or sys.argv[1] == "-H" or sys.argv[1] == "-?":
print "This tool creates Certs.h with one or more certificates\n"
print "usage: ConvertCerToH.py <CertFiles...>"
print "example: ConvertCerToH.py SAMPLE_DEVELOPMENT.cer SAMPLE_PRODUCTION.cer"
print "example: ConvertCerToH.py SAMPLE_DEVELOPMENT1.cer SAMPLE_DEVELOPMENT2.cer SAMPLE_PRODUCTION.cer"
print "example: ConvertCerToH.py SAMPLE_PRODUCTION.cer"
sys.exit(-1)
if len(sys.argv) > 11:
print "Error: Currently limiting number of certificates to 10"
print "usage: ConvertCerToH.py <CertFiles...>"
sys.exit(-1)
### Process Certificates ###
Certs = []
sys.argv.remove(sys.argv[0])
for fileName in sys.argv:
print "Processing", fileName
# Open cert file
file = open(fileName, "rb")
# Read binary file
Cert = file.read()
# Close cert file
file.close()
CertHex = map(hex,map(ord,Cert))
Cert = re.sub(r'\'|\[|\]', "", str(CertHex))
Certs.append(Cert)
### Write certificates to Certs.h ###
# Open header file
HeaderFile = open("Certs.h", "w")
HeaderFile.write("//\n")
HeaderFile.write("// Certs.h\n")
HeaderFile.write("//\n\n")
HeaderFile.write("//\n")
HeaderFile.write("// These are the binary DER encoded Product Key certificates \n")
HeaderFile.write("// used to sign the UEFI capsule payload.\n")
HeaderFile.write("//\n\n")
index = 1
for Cert in Certs:
HeaderFile.write("CONST UINT8 CapsulePublicKeyCert"+str(index)+"[] =\n")
HeaderFile.write("{\n")
HeaderFile.write(Cert)
HeaderFile.write("\n};\n\n")
index = index + 1
HeaderFile.write("CONST CAPSULE_VERIFICATION_CERTIFICATE CapsuleVerifyCertificates[] = {\n")
index = 1
for Cert in Certs:
HeaderFile.write(" {CapsulePublicKeyCert"+str(index)+", sizeof(CapsulePublicKeyCert"+str(index)+")},\n")
index = index + 1
HeaderFile.write("};\n\n")
HeaderFile.write("CONST CAPSULE_VERIFICATION_CERTIFICATE_LIST CapsuleVerifyCertificateList = {\n")
HeaderFile.write(" sizeof(CapsuleVerifyCertificates)/sizeof(CAPSULE_VERIFICATION_CERTIFICATE),\n")
HeaderFile.write(" CapsuleVerifyCertificates\n")
HeaderFile.write("};\n\n")
# Close header file
HeaderFile.close()
print "\nCopy the output file Certs.h to folder MsSampleFmpDevicePkg\Library\CapsuleKeyBaseLib"
| 33.847458 | 107 | 0.73986 |
fd16505a157e42c0095152f4d85cf3c6e225daa0 | 2,899 | py | Python | webtraversallibrary/app_info.py | redfungus/webtraversallibrary | d5013edc061deba40e859dcfcda314c7055ce82a | [
"Apache-2.0"
] | null | null | null | webtraversallibrary/app_info.py | redfungus/webtraversallibrary | d5013edc061deba40e859dcfcda314c7055ce82a | [
"Apache-2.0"
] | null | null | null | webtraversallibrary/app_info.py | redfungus/webtraversallibrary | d5013edc061deba40e859dcfcda314c7055ce82a | [
"Apache-2.0"
] | null | null | null | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Extract version information from several possible sources in a pre-assigned priority:
* environment
* git context
* .gitinfo file
* default value
"""
import os
import subprocess
def get_commit_hash(short: bool = False) -> str:
"""
Extract commit hash from either the environment, or the ``.gitinfo`` file.
If nothing works, returns "unknown".
"""
git_command = ["git", "rev-parse", "HEAD"]
if short:
git_command.insert(2, "--short")
try:
return subprocess.check_output(git_command, universal_newlines=True, stderr=subprocess.DEVNULL).strip()
except subprocess.CalledProcessError:
pass
# This may be a source copy without .git information. Look for .gitinfo file
try:
with open(".gitinfo") as f:
return f.readline().strip()
except OSError:
pass
# As a last resort, return a special value
return "unknown"
def get_branch_name() -> str:
"""
Extract branch name from either the environment, or the ``.gitinfo`` file.
Returns "unknown" if it couldn't be found.
"""
# If explicitly set, take the environment variable
if "BRANCH_NAME" in os.environ:
return os.environ["BRANCH_NAME"]
# Otherwise, try to get the context from current directory git metadata
try:
branch_name = subprocess.check_output(
["bash", "-c", 'git branch --list | grep -e "*" | cut -c 3-'],
universal_newlines=True,
stderr=subprocess.DEVNULL,
).strip()
if branch_name:
# The piped command chain will "succeed" even if "git branch --list" returns nothing
# In that case, branch_name will be empty and should not be returned
return branch_name
except subprocess.CalledProcessError:
pass
# If that's not available either, look for the .gitinfo file that gets added to Docker containers
try:
with open(".gitinfo") as f:
return f.readlines()[-1].strip()
except OSError:
pass
# As a last resort, return a special value
return "unknown"
| 32.573034 | 111 | 0.67506 |
fd166522613f70d68340ce87a9e8c0bff5f78c6b | 4,863 | py | Python | aws_reporting.py | jeberhar/DevOpsLearner | f9dce9cf2dc6e75494c1372a339e9f13e836102d | [
"MIT"
] | null | null | null | aws_reporting.py | jeberhar/DevOpsLearner | f9dce9cf2dc6e75494c1372a339e9f13e836102d | [
"MIT"
] | null | null | null | aws_reporting.py | jeberhar/DevOpsLearner | f9dce9cf2dc6e75494c1372a339e9f13e836102d | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import boto
import boto.ec2
import sys
from boto.ec2.connection import EC2Connection
import pprint
account_string = "YOUR_ACCOUNT_STRING" #change this for each AWS account
####main program execution####
regions = sys.argv[1:]
volume_info = ""
if len(regions) == 0:
regions=['us-east-1']
if len(regions) == 1 and regions[0] == "all":
working_regions = boto.ec2.regions()
#print working_regions #DEBUG: uncomment to view all the regions that will be searched for "all"
else:
working_regions = [ boto.ec2.get_region(x) for x in regions ]
for current_working_region in working_regions:
print "\n================"
print current_working_region.name
print "================"
print "Account Name,Instance Name,Instance ID,Instance Type,Availability Zone,Instance State,Public DNS,Public IP,Private DNS,Key Name,Monitoring,Launch Time,Security Groups,Attached Volume ID,Attached Volume Instance ID,Mounted Device Name,Attached Volume Size"
try:
conn = boto.connect_ec2(region = current_working_region)
#conn = EC2Connection() #same as boto.connect_ec2()
reservations = conn.get_all_instances()
volumes = conn.get_all_volumes()
#print "Volumes array has length of: " + str(len(volumes))
instances = [i for r in reservations for i in r.instances]
#pp = pprint.PrettyPrinter(indent=4)
for r in reservations:
for i in r.instances:
#pp.pprint(i.__dict__)
print_instance(i)
#print_ebs_info(i)
except boto.exception.EC2ResponseError:
print "ERROR -- Could not connect to " + current_working_region.name
pass
| 37.122137 | 267 | 0.584002 |
fd1683f2abe7dd47f1cee08ed7abab26a488dbc0 | 1,832 | py | Python | code/functions/one_hop_majority_vote.py | YatongChen/decoupled_smoothing_on_graph | b5110db92841c00193577adb0f5d3daa70f46845 | [
"MIT"
] | 5 | 2019-02-25T20:05:47.000Z | 2021-06-23T21:38:52.000Z | code/functions/one_hop_majority_vote.py | YatongChen/decoupled_smoothing_on_graphs | b5110db92841c00193577adb0f5d3daa70f46845 | [
"MIT"
] | null | null | null | code/functions/one_hop_majority_vote.py | YatongChen/decoupled_smoothing_on_graphs | b5110db92841c00193577adb0f5d3daa70f46845 | [
"MIT"
] | null | null | null | # one hop helper function | 50.888889 | 231 | 0.69869 |
fd16885dbeb4939e362807cdc853aa44683b010f | 18,284 | py | Python | alphago/alphago.py | noahwaterfieldprice/alphago | 4a7bba6d9758ccf1d2f2d7ae964b5d5d48021ee8 | [
"MIT"
] | 4 | 2018-02-12T09:11:26.000Z | 2022-01-24T20:46:15.000Z | alphago/alphago.py | noahwaterfieldprice/alphago | 4a7bba6d9758ccf1d2f2d7ae964b5d5d48021ee8 | [
"MIT"
] | null | null | null | alphago/alphago.py | noahwaterfieldprice/alphago | 4a7bba6d9758ccf1d2f2d7ae964b5d5d48021ee8 | [
"MIT"
] | 3 | 2018-08-23T15:08:54.000Z | 2020-03-13T14:21:08.000Z | from collections import OrderedDict
import numpy as np
from tqdm import tqdm
import tensorflow as tf
from .player import MCTSPlayer, RandomPlayer, OptimalPlayer
from .evaluator import evaluate
from .mcts_tree import MCTSNode, mcts
from .utilities import sample_distribution
__all__ = ["train_alphago", "self_play", "process_self_play_data",
"process_training_data"]
def train_alphago(game, create_estimator, self_play_iters, training_iters,
checkpoint_path, summary_path, alphago_steps=100,
evaluate_every=1, batch_size=32, mcts_iters=100, c_puct=1.0,
replay_length=100000, num_evaluate_games=500,
win_rate=0.55, verbose=True, restore_step=None,
self_play_file_path=None):
"""Trains AlphaGo on the game.
Parameters
----------
game: object
An object that has the attributes a game needs.
create_estimator: func
Creates a trainable estimator for the game. The estimator should
have a train function.
self_play_iters: int
Number of self-play games to play each self-play step.
training_iters: int
Number of training iters to use for each training step.
checkpoint_path: str
Where to save the checkpoints to.
summary_path: str
Where to save the summaries (tensorboard) to.
alphago_steps: int
Number of steps to run the alphago loop for.
evaluate_every: int
Evaluate the network every evaluate_every steps.
batch_size: int
Batch size to train with.
mcts_iters: int
Number of iterations to run MCTS for.
c_puct: float
Parameter for MCTS. See AlphaGo paper.
replay_length: int
The amount of training data to use. Only train on the most recent
training data.
num_evaluate_games: int
Number of games to evaluate the players for.
win_rate: float
Number between 0 and 1. Only update self-play player when training
player beats self-play player by at least this rate.
verbose: bool
Whether or not to output progress.
restore_step: int or None
If given, restore the network from the checkpoint at this step.
self_play_file_path: str or None
Where to load self play data from, if given.
"""
# TODO: Do self-play, training and evaluating in parallel.
# We use a fixed estimator (the best one that's been trained) to
# generate self-play training data. We then train the training estimator
# on that data. We produce a checkpoint every 1000 training steps. This
# checkpoint is then evaluated against the current best neural network.
# If it beats the current best network by at least 55% then it becomes
# the new best network.
# 1 is the fixed player, and 2 is the training player.
self_play_estimator = create_estimator()
training_estimator = create_estimator()
graph = tf.Graph()
sess = tf.Session(graph=graph)
with graph.as_default():
tf_success_rate = tf.placeholder(
tf.float32, name='success_rate_summary')
success_rate_summary = tf.summary.scalar(
'success_rate_summary', tf_success_rate)
tf_success_rate_random = tf.placeholder(
tf.float32, name='success_rate_random')
success_rate_random_summary = tf.summary.scalar(
'success_rate_random', tf_success_rate_random)
#tf_success_rate_optimal = tf.placeholder(
# tf.float32, name='success_rate_optimal')
#success_rate_optimal_summary = tf.summary.scalar(
# 'success_rate_optimal', tf_success_rate_optimal)
#merged_summary = tf.summary.merge([success_rate_summary,
# success_rate_random_summary,
# success_rate_optimal_summary])
merged_summary = tf.summary.merge([success_rate_summary,
success_rate_random_summary])
sess.run(tf.global_variables_initializer())
writer = tf.summary.FileWriter(summary_path)
if restore_step:
restore_path = compute_checkpoint_name(restore_step, checkpoint_path)
self_play_estimator.restore(restore_path)
training_estimator.restore(restore_path)
all_losses = []
self_play_data = None
initial_step = restore_step + 1 if restore_step else 0
for alphago_step in range(initial_step, initial_step + alphago_steps):
self_play_data = generate_self_play_data(
game, self_play_estimator, mcts_iters, c_puct, self_play_iters,
verbose=verbose, data=self_play_data)
training_data = process_training_data(self_play_data, replay_length)
if len(training_data) < 100:
continue
optimise_estimator(training_estimator, training_data, batch_size,
training_iters, writer=writer, verbose=verbose)
# Evaluate the players and choose the best.
if alphago_step % evaluate_every == 0:
success_rate, success_rate_random = \
evaluate_model(game, self_play_estimator,
training_estimator, mcts_iters, c_puct,
num_evaluate_games, verbose=verbose)
summary = sess.run(merged_summary,
feed_dict=
{tf_success_rate: success_rate,
tf_success_rate_random: success_rate_random})
writer.add_summary(summary, training_estimator.global_step)
checkpoint_model(training_estimator, alphago_step, checkpoint_path)
# If training player beats self-play player by a large enough
# margin, then it becomes the new best estimator.
if success_rate > win_rate:
# Create a new self player, with the weights of the most
# recent training_estimator.
if verbose:
print("Updating self-play player.")
print("Restoring from step: {}".format(alphago_step))
self_play_estimator = create_estimator()
restore_path = compute_checkpoint_name(alphago_step,
checkpoint_path)
self_play_estimator.restore(restore_path)
return all_losses
def checkpoint_model(player, step, path):
"""Checkpoint the training player.
"""
checkpoint_name = compute_checkpoint_name(step, path)
player.save(checkpoint_name)
def generate_self_play_data(game, estimator, mcts_iters, c_puct, num_iters,
data=None, verbose=True):
"""Generates self play data for a number of iterations for a given
estimator. Saves to save_file_path, if given.
"""
# if save_file_path is not None:
# with open(save_file_path, 'r') as f:
# data = json.load(save_file_path)
# index = max(data.keys()) + 1
if data is not None:
index = max(data.keys()) + 1
else:
data = OrderedDict()
index = 0
# Collect self-play training data using the best estimator.
disable_tqdm = False if verbose else True
for _ in tqdm(range(num_iters), disable=disable_tqdm):
data[index] = self_play(
game, estimator.create_estimate_fn(), mcts_iters, c_puct)
index += 1
# if save_file_path is not None:
# with open(save_file_path, 'w') as f:
# json.dump(data, f)
return data
def self_play(game, estimator, mcts_iters, c_puct):
"""Plays a single game using MCTS to choose actions for both players.
Parameters
----------
game: Game
An object representing the game to be played.
estimator: func
An estimate function.
mcts_iters: int
Number of iterations to run MCTS for.
c_puct: float
Parameter for MCTS.
Returns
-------
game_state_list: list
A list of game states encountered in the self-play game. Starts
with the initial state and ends with a terminal state.
action_probs_list: list
A list of action probability dictionaries, as returned by MCTS
each time the algorithm has to take an action. The ith action
probabilities dictionary corresponds to the ith game_state, and
action_probs_list has length one less than game_state_list,
since we don't have to move in a terminal state.
"""
node = MCTSNode(game.initial_state, game.current_player(game.initial_state))
game_state_list = [node.game_state]
action_probs_list = []
action_list = []
move_count = 0
while not node.is_terminal:
# TODO: Choose this better.
tau = 1
if move_count >= 10:
tau = 1 / (move_count - 10 + 1)
# First run MCTS to compute action probabilities.
action_probs = mcts(node, game, estimator, mcts_iters, c_puct, tau=tau)
# Choose the action according to the action probabilities.
action = sample_distribution(action_probs)
action_list.append(action)
# Play the action
node = node.children[action]
# Add the action probabilities and game state to the list.
action_probs_list.append(action_probs)
game_state_list.append(node.game_state)
move_count += 1
data = process_self_play_data(game_state_list, action_list,
action_probs_list, game, game.action_indices)
return data
def process_training_data(self_play_data, replay_length=None):
"""Takes self play data and returns a list of tuples (state,
action_probs, utility) suitable for training an estimator.
Parameters
----------
self_play_data: dict
Dictionary with keys given by an index (int) and values given by a
log of the game. This is a list of tuples as in generate self play
data.
replay_length: int or None
If given, only return the last replay_length (state, probs, utility)
tuples.
"""
training_data = []
for index, game_log in self_play_data.items():
for (state, action, probs_vector, z) in game_log:
training_data.append((state, probs_vector, z))
print("Training data length: {}".format(len(training_data)))
print("Self play data length: {}".format(len(self_play_data)))
if replay_length is not None:
training_data = training_data[-replay_length:]
return training_data
def process_self_play_data(states_, actions_, action_probs_, game,
action_indices):
"""Takes a list of states and action probabilities, as returned by
play, and creates training data from this. We build up a list
consisting of (state, probs, z) tuples, where player is the player
in state 'state', and 'z' is the utility to 'player' in 'last_state'.
We omit the terminal state from the list as there are no probabilities to
train. TODO: Potentially include the terminal state in order to train the
value. # TODO: why the underscores in the parameter names?
Parameters
----------
states_: list
A list of n states, with the last being terminal.
actions_: list
A list of n-1 actions, being the action taken in the corresponding
state.
action_probs_: list
A list of n-1 dictionaries containing action probabilities. The ith
dictionary applies to the ith state, representing the probabilities
returned by play of taking each available action in the state.
game: Game
An object representing the game to be played.
action_indices: dict
A dictionary mapping actions (in the form of the legal_actions
function) to action indices (to be used for training the neural
network).
Returns
-------
training_data: list
A list consisting of (state, action, probs, z) tuples, where player
is the player in state 'state', and 'z' is the utility to 'player' in
'last_state'.
"""
# Get the outcome for the game. This should be the last state in states_.
last_state = states_.pop()
outcome = game.utility(last_state)
# Now action_probs_ and states_ are the same length.
training_data = []
for state, action, probs in zip(states_, actions_, action_probs_):
# Get the player in the state, and the value to this player of the
# terminal state.
player = game.current_player(state)
z = outcome[player]
# Convert the probs dictionary to a numpy array using action_indices.
probs_vector = np.zeros(len(action_indices))
for a, prob in probs.items():
probs_vector[action_indices[a]] = prob
non_nan_state = np.nan_to_num(state)
training_data.append((non_nan_state, action, probs_vector, z))
return training_data
| 39.152034 | 80 | 0.644553 |
fd16ab23714a63a2de7b1edeefc67fc983b65d54 | 835 | py | Python | SRCTF/SRCTF/django_reuse/reuse/migrations/0002_auto_20160829_1452.py | yinyueacm/work-uga | d8fd104b8c5600e1715491fc5eeffaf5c0b5896c | [
"MIT"
] | 1 | 2019-01-11T03:20:34.000Z | 2019-01-11T03:20:34.000Z | SRCTF/SRCTF/django_reuse/reuse/migrations/0002_auto_20160829_1452.py | yinyueacm/yinyue-thesis | d8fd104b8c5600e1715491fc5eeffaf5c0b5896c | [
"MIT"
] | null | null | null | SRCTF/SRCTF/django_reuse/reuse/migrations/0002_auto_20160829_1452.py | yinyueacm/yinyue-thesis | d8fd104b8c5600e1715491fc5eeffaf5c0b5896c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-08-29 14:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
| 28.793103 | 118 | 0.602395 |
fd17609bb6160f46ba3a4f3809db927e0e55536c | 5,976 | py | Python | complexity_science/ca/ca/ca1d.py | KristerJazz/complexity-science | 2340b7467d50c45d06fa93db7603f7f2778d3c4c | [
"BSD-3-Clause"
] | null | null | null | complexity_science/ca/ca/ca1d.py | KristerJazz/complexity-science | 2340b7467d50c45d06fa93db7603f7f2778d3c4c | [
"BSD-3-Clause"
] | 2 | 2021-03-28T16:23:00.000Z | 2021-04-05T08:10:35.000Z | complexity_science/ca/ca/ca1d.py | KristerJazz/complexity-science | 2340b7467d50c45d06fa93db7603f7f2778d3c4c | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from .rule_manager import RuleManager
| 29.15122 | 138 | 0.54836 |
fd1902e85156fc45744e6e48892733db33d5f755 | 4,373 | py | Python | extract_skip_thought.py | youngfly11/ReferCOCO-Pretraining-Detectron2 | 8c8536a4d822b3cf9140380442a440d42e948c38 | [
"Apache-2.0"
] | 2 | 2020-08-14T08:00:53.000Z | 2020-11-21T11:01:55.000Z | extract_skip_thought.py | youngfly11/ReferCOCO-Pretraining-Detectron2 | 8c8536a4d822b3cf9140380442a440d42e948c38 | [
"Apache-2.0"
] | null | null | null | extract_skip_thought.py | youngfly11/ReferCOCO-Pretraining-Detectron2 | 8c8536a4d822b3cf9140380442a440d42e948c38 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3.6
# -*- coding: utf-8 -*-
# @Time : 2020/6/25 22:41
# @Author : Yongfei Liu
# @Email : liuyf3@shanghaitech.edu.cn
import numpy as np
import os.path as osp
import os
import pickle
from collections import OrderedDict
import torch
import json
from detectron2.data.datasets.builtin_meta import COCO_CATEGORIES
if __name__ == '__main__':
extract_embedding()
| 30.58042 | 104 | 0.607363 |
fd1912c311e861ca371e1043073ef9f199c996c4 | 4,909 | py | Python | pyutil_mongo/cfg.py | chhsiao1981/pyutil_mongo | facea2376b48dd7157d4633ab8128c8daf7e59ef | [
"MIT"
] | null | null | null | pyutil_mongo/cfg.py | chhsiao1981/pyutil_mongo | facea2376b48dd7157d4633ab8128c8daf7e59ef | [
"MIT"
] | null | null | null | pyutil_mongo/cfg.py | chhsiao1981/pyutil_mongo | facea2376b48dd7157d4633ab8128c8daf7e59ef | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Attributes:
config (dict): Description
logger (logging.Logger): Description
"""
import logging
import pymongo
logger = None
config = {}
def init(the_logger: logging.Logger, mongo_maps: list):
"""init
Args:
the_logger (logging.Logger): Description
mongo_maps (list): list of MongoDB info
Returns:
TYPE: Description
"""
global logger
logger = the_logger
return restart_mongo(mongo_maps=mongo_maps)
def restart_mongo(collection_name="", db_name="", mongo_maps=None):
"""restarting mongo
Args:
collection_name (str, optional): collection-name
db_name (str, optional): db-name
mongo_maps (None, optional): mongo-maps
Returns:
TYPE: Description
"""
'''
initialize mongo
'''
global config
if mongo_maps is None:
mongo_maps = [each['mongo_map'] for each in config.values()]
if len(mongo_maps) == 0:
return
errs = []
for idx, mongo_map in enumerate(mongo_maps):
each_err = _init_mongo_map_core(mongo_map, collection_name=collection_name, db_name=db_name)
if each_err:
errs.append(each_err)
logger.error('(%s/%s): e: %s', idx, len(mongo_maps), each_err)
if not errs:
return None
err_str = ','.join(['%s' % (each) for each in errs])
return Exception(err_str)
def _init_mongo_map_core(mongo_map: MongoMap, collection_name="", db_name=""):
"""Summary
Args:
mongo_map (MongoMap): Description
collection_name (str, optional): Description
db_name (str, optional): Description
Returns:
TYPE: Description
"""
global config
global logger
mongo_map_db_name, hostname, mongo_db_name, collection_map, ensure_index, ensure_unique_index = mongo_map.db_name, mongo_map.hostname, mongo_map.mongo_db_name, mongo_map.collection_map, mongo_map.ensure_index, mongo_map.ensure_unique_index
if db_name != '' and mongo_map_db_name != db_name:
return
if collection_name != '' and collection_name not in collection_map:
return
if collection_name == '' and mongo_map_db_name in config:
return Exception('db already in config: db_name: %s config: %s', mongo_map_db_name, config[mongo_map_db_name])
if ensure_index is None:
ensure_index = {}
if ensure_unique_index is None:
ensure_unique_index = {}
# mongo_server_url
mongo_server_url = 'mongodb://%s/%s' % (hostname, mongo_db_name)
# mongo-server-client
mongo_kwargs = {}
if mongo_map.ssl:
mongo_kwargs.update({
'ssl': True,
'authSource': '$external',
'authMechanism': 'MONGODB-X509',
'ssl_certfile': mongo_map.cert,
'ssl_ca_certs': mongo_map.ca,
})
mongo_server_client = pymongo.MongoClient(
mongo_server_url,
**mongo_kwargs,
)[mongo_db_name]
# config-by-db-name
config_by_db_name = {'mongo_map': mongo_map, 'db': {}, 'url': mongo_server_url}
# collection
for (key, val) in collection_map.items():
logger.info('mongo: %s => %s', key, val)
config_by_db_name['db'][key] = mongo_server_client[val]
# enure index
for key, val in ensure_index.items():
logger.info('to ensure_index: key: %s', key)
config_by_db_name['db'][key].create_index(val, background=True)
# enure unique index
for key, val in ensure_unique_index.items():
logger.info('to ensure_unique_index: key: %s', key)
config_by_db_name['db'][key].create_index(val, background=True, unique=True)
config[mongo_map_db_name] = config_by_db_name
def clean():
"""Reset config
"""
global config
config = {}
| 28.375723 | 243 | 0.646771 |
fd196b9ee70ef729b16290f14477ca8c3d79df73 | 172 | py | Python | exhaustive_search/05_multiple-arrays/05-01-01.py | fumiyanll23/algo-method | d86ea1d399cbc5a1db0ae49d0c82e41042f661ab | [
"MIT"
] | null | null | null | exhaustive_search/05_multiple-arrays/05-01-01.py | fumiyanll23/algo-method | d86ea1d399cbc5a1db0ae49d0c82e41042f661ab | [
"MIT"
] | null | null | null | exhaustive_search/05_multiple-arrays/05-01-01.py | fumiyanll23/algo-method | d86ea1d399cbc5a1db0ae49d0c82e41042f661ab | [
"MIT"
] | null | null | null | # input
N, M = map(int, input().split())
As = [*map(int, input().split())]
Bs = [*map(int, input().split())]
# compute
# output
print(sum(A > B for B in Bs for A in As))
| 17.2 | 41 | 0.569767 |
fd19ca0b5b114583ad7ed023250d660c51840010 | 141 | py | Python | solutions/1639.py | nxexox/acm.timus | 9548d2a0b54fdd99bd60071f3be2fb7f897a7303 | [
"MIT"
] | null | null | null | solutions/1639.py | nxexox/acm.timus | 9548d2a0b54fdd99bd60071f3be2fb7f897a7303 | [
"MIT"
] | null | null | null | solutions/1639.py | nxexox/acm.timus | 9548d2a0b54fdd99bd60071f3be2fb7f897a7303 | [
"MIT"
] | null | null | null | #!/usr/bin/python
a, b = [int(i) for i in input().split()]
c = a * b
if c % 2 == 0:
print('[:=[first]')
else:
print('[second]=:]')
| 14.1 | 40 | 0.475177 |
fd19e6853abb785c9505d8a5f4aaf9326f1ad438 | 374 | py | Python | lstmcpipe/config/__init__.py | cta-observatory/lst-i-rf | 7a634e0b3b07dda2b20df47875d97616eab65821 | [
"MIT"
] | 2 | 2021-02-01T17:30:46.000Z | 2021-02-22T13:59:49.000Z | lstmcpipe/config/__init__.py | cta-observatory/lst-i-rf | 7a634e0b3b07dda2b20df47875d97616eab65821 | [
"MIT"
] | 106 | 2021-04-16T21:15:20.000Z | 2022-03-31T23:02:50.000Z | lstmcpipe/config/__init__.py | cta-observatory/lstmcpipe | 7a634e0b3b07dda2b20df47875d97616eab65821 | [
"MIT"
] | 3 | 2022-03-02T09:23:09.000Z | 2022-03-03T16:00:25.000Z | import os
from shutil import which
from .pipeline_config import load_config
__all__ = ["load_config"]
| 28.769231 | 87 | 0.671123 |
fd1a5012b7966cdeb8f03d71591cc6d8a74c6420 | 2,337 | py | Python | tests/stakkr_compose_test.py | dwade75/stakkr | ae77607e84b5b305ae8f5a14eb8f22237d943a29 | [
"Apache-2.0"
] | null | null | null | tests/stakkr_compose_test.py | dwade75/stakkr | ae77607e84b5b305ae8f5a14eb8f22237d943a29 | [
"Apache-2.0"
] | null | null | null | tests/stakkr_compose_test.py | dwade75/stakkr | ae77607e84b5b305ae8f5a14eb8f22237d943a29 | [
"Apache-2.0"
] | null | null | null | import os
import sys
import stakkr.stakkr_compose as sc
import subprocess
import unittest
base_dir = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, base_dir + '/../')
# https://docs.python.org/3/library/unittest.html#assert-methods
if __name__ == "__main__":
unittest.main()
| 31.16 | 86 | 0.670946 |
fd1dc3801dfc8c9bd5d0968ee738990d98e2881b | 2,792 | py | Python | DQIC/backtesting/run.py | bladezzw/DeepQuantInChina | ce74a9bf8db91e3545ccc3e7af81f80796a536fa | [
"MIT"
] | 8 | 2019-04-14T03:05:19.000Z | 2020-02-13T18:35:41.000Z | DQIC/backtesting/run.py | bladezzw/DeepQuantInChina | ce74a9bf8db91e3545ccc3e7af81f80796a536fa | [
"MIT"
] | null | null | null | DQIC/backtesting/run.py | bladezzw/DeepQuantInChina | ce74a9bf8db91e3545ccc3e7af81f80796a536fa | [
"MIT"
] | 2 | 2019-05-08T08:23:50.000Z | 2020-01-23T03:54:41.000Z | import os,sys
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
sys.path.append(BASE_DIR)
import datetime
import time
from backtesting.backtest import Backtest
from backtesting.data import HistoricCSVDataHandler
from backtesting.execution import SimulatedExecutionHandler
from backtesting.portfolio import Portfolio,Portfolio_For_futures
from Strategy.strategy import MovingAverageCrossStrategy
def exc_time(func, *args, **kwargs):
"""
:param func: a function
:param args: arguements of the function
:param kwargs: arguements of the function
:return: time costed on running the function
"""
return wrapper(func, *args, **kwargs)
if __name__ == "__main__":
csv_dir = r"~/data"
symbol_list = ['000001.SH']
initial_capital = 5000.0
start_date = datetime.datetime(2012,1,1,0,0,0) # .
heartbeat = 0.0
#default: MACS = MovingAverageCrossStrategy(short_window=10,long_window=30)
MACS = MovingAverageCrossStrategy
commission = 5
exchangeID = ''
lever = 10 # ,(,,:rb,110)
backtest_type = 'futures'
if backtest_type == 'stock':
backtest = Backtest(csv_dir=csv_dir,
symbol_list=symbol_list,
initial_capital=initial_capital,
heartbeat=heartbeat,
start_date=start_date,
data_handler=HistoricCSVDataHandler,
execution_handler=SimulatedExecutionHandler,
portfolio=Portfolio,
strategy=MACS,
commission=commission,
exchangeID=None,
lever=1
)
elif backtest_type == 'futures':
backtest = Backtest(csv_dir=csv_dir,
symbol_list=symbol_list,
initial_capital=initial_capital,
heartbeat=heartbeat,
start_date=start_date,
data_handler=HistoricCSVDataHandler,
execution_handler=SimulatedExecutionHandler,
portfolio=Portfolio_For_futures,
strategy=MACS,
commission=commission,
exchangeID=exchangeID,
lever=lever
)
exc_time(backtest.simulate_trading) # run time is 1.2880792617797852
| 33.638554 | 78 | 0.567335 |
fd1e95f3c8250711415f3acb25bf5e3b26c63f39 | 3,306 | py | Python | Emergency/DB/DRAW3D.py | LeeDaeil/CNS_Autonomous | 2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f | [
"Apache-2.0"
] | 2 | 2020-03-22T14:35:00.000Z | 2020-05-26T05:06:41.000Z | Emergency/DB/DRAW3D.py | LeeDaeil/CNS_Autonomous | 2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f | [
"Apache-2.0"
] | null | null | null | Emergency/DB/DRAW3D.py | LeeDaeil/CNS_Autonomous | 2ae3688cfd654b9669893e3cdf4cdf1ac0748b9f | [
"Apache-2.0"
] | null | null | null | import matplotlib.pylab as plt
import numpy as np
import pandas as pd
from COMMONTOOL import PTCureve
DB = pd.read_csv('0_228.txt')
# DB = pd.read_csv('../3 /322.txt')
# target_time = 100
# for i in range(0, len(DB)):
# if DB['KCNTOMS'].loc[i] != target_time:
# DB.drop([i], inplace=True)
# else:
# target_time += 100
x, y, z, zero = [], [], [], []
PTY, PTX, BotZ, UpZ = [], [], [], []
RateX, RateY, RateZ = [], [], []
SaveTIMETEMP = {'Temp':0, 'Time':0}
for temp, t, pres, co1, co2 in zip(DB['UAVLEG2'].tolist(), DB['KCNTOMS'].tolist(),
DB['ZINST65'].tolist(), DB['KLAMPO6'].tolist(),
DB['KLAMPO9'].tolist()):
x.append(temp)
y.append(-t)
z.append(pres)
if co1 == 0 and co2 == 1 and t > 1500:
if SaveTIMETEMP['Time'] == 0:
SaveTIMETEMP['Time'] = t
SaveTIMETEMP['Temp'] = temp
rate = -55 / (60 * 60 * 5)
get_temp = rate * (t - SaveTIMETEMP['Time']) + SaveTIMETEMP['Temp']
RateX.append(get_temp)
RateY.append(-t)
RateZ.append(0)
zero.append(0)
Temp = []
UpPres = []
BotPres = []
for _ in range(0, 350):
uppres, botpres = PTCureve()._get_pres(_)
Temp.append([_])
UpPres.append([uppres])
BotPres.append([botpres])
PTX = np.array(Temp)
BotZ = np.array(BotPres)
UpZ = np.array(UpPres)
PTY = np.array([[0] for _ in range(0, 350)])
PTX = np.hstack([PTX[:, 0:1], Temp])
BotZ = np.hstack([BotZ[:, 0:1], BotPres])
UpZ = np.hstack([UpZ[:, 0:1], UpPres])
PTY = np.hstack([PTY[:, 0:1], np.array([[-t] for _ in range(0, 350)])])
print(np.shape(PTX))
fig = plt.figure()
ax1 = plt.axes(projection='3d')
ax1.plot3D(RateX, RateY, RateZ, color='orange', lw=1.5, ls='--')
ax1.plot3D([170, 0, 0, 170, 170],
[y[-1], y[-1], 0, 0, y[-1]],
[29.5, 29.5, 29.5, 29.5, 29.5], color='black', lw=0.5, ls='--')
ax1.plot3D([170, 0, 0, 170, 170],
[y[-1], y[-1], 0, 0, y[-1]],
[17, 17, 17, 17, 17], color='black', lw=0.5, ls='--')
ax1.plot3D([170, 170], [y[-1], y[-1]],
[17, 29.5], color='black', lw=0.5, ls='--')
ax1.plot3D([170, 170], [0, 0], [17, 29.5], color='black', lw=0.5, ls='--')
ax1.plot3D([0, 0], [y[-1], y[-1]], [17, 29.5], color='black', lw=0.5, ls='--')
ax1.plot3D([0, 0], [0, 0], [17, 29.5], color='black', lw=0.5, ls='--')
ax1.plot_surface(PTX, PTY, UpZ, rstride=8, cstride=8, alpha=0.15, color='r')
ax1.plot_surface(PTX, PTY, BotZ, rstride=8, cstride=8, alpha=0.15, color='r')
# ax1.scatter(PTX, PTY, BotZ, marker='*')
ax1.plot3D(x, y, z, color='blue', lw=1.5)
# linewidth or lw: float
ax1.plot3D([x[-1], x[-1]], [y[-1], y[-1]], [0, z[-1]], color='blue', lw=0.5, ls='--')
ax1.plot3D([0, x[-1]], [y[-1], y[-1]], [z[-1], z[-1]], color='blue', lw=0.5, ls='--')
ax1.plot3D([x[-1], x[-1]], [0, y[-1]], [z[-1], z[-1]], color='blue', lw=0.5, ls='--')
# each
ax1.plot3D(x, y, zero, color='black', lw=1, ls='--') # temp
ax1.plot3D(zero, y, z, color='black', lw=1, ls='--') # pres
ax1.plot3D(x, zero, z, color='black', lw=1, ls='--') # PT
#
ax1.set_yticklabels([int(_) for _ in abs(ax1.get_yticks())])
ax1.set_xlabel('Temperature')
ax1.set_ylabel('Time [Tick]')
ax1.set_zlabel('Pressure')
ax1.set_xlim(0, 350)
ax1.set_zlim(0, 200)
plt.show() | 30.897196 | 85 | 0.5366 |
fd1f6bede9086f03b21d05de1c334c5625e057f0 | 658 | py | Python | aeroplast/images.py | kk6/aeroplast | 8347bf071f43a560b865a7f37b76fb05f5cea57d | [
"MIT"
] | 1 | 2019-11-12T07:02:20.000Z | 2019-11-12T07:02:20.000Z | aeroplast/images.py | kk6/aeroplast | 8347bf071f43a560b865a7f37b76fb05f5cea57d | [
"MIT"
] | 14 | 2018-11-13T09:57:09.000Z | 2019-04-05T20:02:46.000Z | aeroplast/images.py | kk6/aeroplast | 8347bf071f43a560b865a7f37b76fb05f5cea57d | [
"MIT"
] | 1 | 2018-12-20T07:52:59.000Z | 2018-12-20T07:52:59.000Z | # -*- coding: utf-8 -*-
"""
Transparent PNG conversion
~~~~~~~~~~~~~~~~~~~~~~~~~~
"""
from PIL import Image
def get_new_size(original_size):
"""
Returns each width and height plus 2px.
:param original_size: Original image's size
:return: Width / height after calculation
:rtype: tuple
"""
return tuple(x + 2 for x in original_size)
def resize_image(image, size):
"""
Resize the image to fit in the specified size.
:param image: Original image.
:param size: Tuple of (width, height).
:return: Resized image.
:rtype: :py:class: `~PIL.Image.Image`
"""
image.thumbnail(size)
return image
| 18.8 | 50 | 0.617021 |
fd22786701bf4e42b8d3932674e46c80a650982c | 678 | py | Python | common/common/management/commands/makemessages.py | FSTUM/rallyetool-v2 | 2f3e2b5cb8655abe023ed1215b7182430b75bb23 | [
"MIT"
] | 1 | 2021-10-30T09:31:02.000Z | 2021-10-30T09:31:02.000Z | common/common/management/commands/makemessages.py | FSTUM/rallyetool-v2 | 2f3e2b5cb8655abe023ed1215b7182430b75bb23 | [
"MIT"
] | 9 | 2021-11-23T10:13:43.000Z | 2022-03-01T15:04:15.000Z | common/common/management/commands/makemessages.py | CommanderStorm/rallyetool-v2 | 721413d6df8afc9347dac7ee83deb3a0ad4c01bc | [
"MIT"
] | 1 | 2021-10-16T09:07:47.000Z | 2021-10-16T09:07:47.000Z | from django.core.management.commands import makemessages
| 27.12 | 64 | 0.610619 |
fd2333a5ba2bad8fcd4a158981a7c15852072e07 | 6,529 | py | Python | app/api/v2/users/views_update.py | Raywire/iReporter | ac58414b84b9c96f0be5e0d477355d0811d8b9c5 | [
"MIT"
] | 3 | 2019-01-09T15:17:28.000Z | 2019-12-01T18:40:50.000Z | app/api/v2/users/views_update.py | Raywire/iReporter | ac58414b84b9c96f0be5e0d477355d0811d8b9c5 | [
"MIT"
] | 13 | 2018-11-30T05:33:13.000Z | 2021-04-30T20:46:41.000Z | app/api/v2/users/views_update.py | Raywire/iReporter | ac58414b84b9c96f0be5e0d477355d0811d8b9c5 | [
"MIT"
] | 3 | 2018-12-02T16:10:12.000Z | 2019-01-04T14:51:04.000Z | """Views for users"""
from flask_restful import Resource
from flask import jsonify, request
from app.api.v2.users.models import UserModel
from app.api.v2.decorator import token_required, get_token
from app.api.v2.send_email import send
| 33.482051 | 135 | 0.551386 |
fd247bba2b56b1306086374a12025c1833517c10 | 7,357 | py | Python | LoadDataAndPrepare/Make_Dictionaries/4_make_reviews_all_words_vocab_dictionary.py | ngrover2/Automatic_Lexicon_Induction | b58a1d55f294293161dc23ab2e6d669c1c5e90d8 | [
"MIT"
] | null | null | null | LoadDataAndPrepare/Make_Dictionaries/4_make_reviews_all_words_vocab_dictionary.py | ngrover2/Automatic_Lexicon_Induction | b58a1d55f294293161dc23ab2e6d669c1c5e90d8 | [
"MIT"
] | null | null | null | LoadDataAndPrepare/Make_Dictionaries/4_make_reviews_all_words_vocab_dictionary.py | ngrover2/Automatic_Lexicon_Induction | b58a1d55f294293161dc23ab2e6d669c1c5e90d8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import sys
import os
import traceback
import ujson
from pprint import pprint
from textblob import TextBlob as tb
from textblob import Word as wd
import shutil
from collections import defaultdict
from gensim.corpora import Dictionary
from socialconfig import config
if __name__ == "__main__":
try:
mk = make_dictionaries()
mk.create_dictionary()
except:
raise
| 42.773256 | 156 | 0.740791 |
fd25176d16569d58b76384f5892d5e731b3f5603 | 434 | py | Python | tests/test_demo.py | PrabhuLoganathan/Pytest-Selenium-POM--Google | d0b50faf28a0c3797d1b0bc838d77f82dd0be81c | [
"MIT"
] | null | null | null | tests/test_demo.py | PrabhuLoganathan/Pytest-Selenium-POM--Google | d0b50faf28a0c3797d1b0bc838d77f82dd0be81c | [
"MIT"
] | 1 | 2021-04-22T12:02:19.000Z | 2021-04-22T12:02:19.000Z | tests/test_demo.py | wadle/test_demo_pytest_selenium | bc9f2b4a9da895568da2cc3f0bc904f78775884c | [
"MIT"
] | 1 | 2020-11-26T20:03:07.000Z | 2020-11-26T20:03:07.000Z | from pages.demo_page import DemoPage
| 16.074074 | 46 | 0.532258 |
fd266f079f9daa527e01e31d5df3c4df79e8150b | 1,126 | py | Python | day 03/day03_part1.py | MischaDy/PyAdventOfCode2020 | 3e0a1a61ac930d7e30a0104ac617008297508fcb | [
"CC0-1.0"
] | 2 | 2020-12-17T18:49:20.000Z | 2021-02-20T16:48:14.000Z | day 03/day03_part1.py | MischaDy/PyAdventOfCode2020 | 3e0a1a61ac930d7e30a0104ac617008297508fcb | [
"CC0-1.0"
] | null | null | null | day 03/day03_part1.py | MischaDy/PyAdventOfCode2020 | 3e0a1a61ac930d7e30a0104ac617008297508fcb | [
"CC0-1.0"
] | 3 | 2020-12-20T19:08:32.000Z | 2020-12-26T22:11:15.000Z | from helpers.cyclic_list import CyclicList
from helpers.coordinates2d import Coordinates2D
RUN_TEST = False
TEST_SOLUTION = 7
TEST_INPUT_FILE = 'test_input_day_03.txt'
INPUT_FILE = 'input_day_03.txt'
START = Coordinates2D((0, 0)) # top left corner
TRAJECTORY = Coordinates2D((3, 1)) # right 3, down 1
ARGS = [START, TRAJECTORY]
if __name__ == '__main__':
if RUN_TEST:
solution = main_part1(TEST_INPUT_FILE, *ARGS)
print(solution)
assert (TEST_SOLUTION == solution)
else:
solution = main_part1(INPUT_FILE, *ARGS)
print(solution)
| 26.186047 | 86 | 0.688277 |
fd279c40ef3cc1786017d66b7c1e1b885d2e67e1 | 807 | py | Python | binary_tree/e_path_sum.py | dhrubach/python-code-recipes | 14356c6adb1946417482eaaf6f42dde4b8351d2f | [
"MIT"
] | null | null | null | binary_tree/e_path_sum.py | dhrubach/python-code-recipes | 14356c6adb1946417482eaaf6f42dde4b8351d2f | [
"MIT"
] | null | null | null | binary_tree/e_path_sum.py | dhrubach/python-code-recipes | 14356c6adb1946417482eaaf6f42dde4b8351d2f | [
"MIT"
] | null | null | null | ###############################################
# LeetCode Problem Number : 112
# Difficulty Level : Easy
# URL : https://leetcode.com/problems/path-sum/
###############################################
from binary_search_tree.tree_node import TreeNode
| 25.21875 | 59 | 0.484511 |
fd2a11e31304af5fc6efc0099f574785c36f1cf8 | 127 | py | Python | High School/9th Grade APCSP (Python)/Unit 7/07.02.03.py | SomewhereOutInSpace/Computer-Science-Class | f5d21850236a7a18dc53b4a650ecbe9a11781f1d | [
"Unlicense"
] | null | null | null | High School/9th Grade APCSP (Python)/Unit 7/07.02.03.py | SomewhereOutInSpace/Computer-Science-Class | f5d21850236a7a18dc53b4a650ecbe9a11781f1d | [
"Unlicense"
] | null | null | null | High School/9th Grade APCSP (Python)/Unit 7/07.02.03.py | SomewhereOutInSpace/Computer-Science-Class | f5d21850236a7a18dc53b4a650ecbe9a11781f1d | [
"Unlicense"
] | null | null | null | st1 = input()
st2 = input()
st3 = input()
listy = [st1, st2, st3]
listy.sort()
print(listy[0])
print(listy[1])
print(listy[2])
| 14.111111 | 23 | 0.629921 |
fd2a6655ca5c2bb5ada546fc62616fc063ba84a1 | 3,900 | py | Python | tests/unit/guests/linux/storage/disk.py | tessia-project/tessia-baselib | 07004b7f6462f081a6f7e810954fd7e0d2cdcf6b | [
"Apache-2.0"
] | 1 | 2022-01-27T01:32:14.000Z | 2022-01-27T01:32:14.000Z | tests/unit/guests/linux/storage/disk.py | tessia-project/tessia-baselib | 07004b7f6462f081a6f7e810954fd7e0d2cdcf6b | [
"Apache-2.0"
] | null | null | null | tests/unit/guests/linux/storage/disk.py | tessia-project/tessia-baselib | 07004b7f6462f081a6f7e810954fd7e0d2cdcf6b | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test module for disk module
"""
#
# IMPORTS
#
from tessia.baselib.common.ssh.client import SshClient
from tessia.baselib.common.ssh.shell import SshShell
from tessia.baselib.guests.linux.storage import disk as disk_module
from unittest import mock
from unittest import TestCase
#
# CONSTANTS AND DEFINITIONS
#
PARAMS_WITH_SYS_ATTRS = {
"system_attributes": {
"libvirt": "somexml"
},
"volume_id": "some_disk_id"
}
PARAMS_WITHOUT_SYS_ATTRS = {
"volume_id": "some_disk_id"
}
#
# CODE
#
# TestBaseDisk
| 30 | 78 | 0.646923 |
fd2a6afac5326ae943f8dfecdd01a780b28ea0e3 | 5,888 | py | Python | scripts/mv_drawables.py | euitam/MoveDrawable | 33923276d856de8effff9e958ba7a33767739137 | [
"Apache-2.0"
] | null | null | null | scripts/mv_drawables.py | euitam/MoveDrawable | 33923276d856de8effff9e958ba7a33767739137 | [
"Apache-2.0"
] | null | null | null | scripts/mv_drawables.py | euitam/MoveDrawable | 33923276d856de8effff9e958ba7a33767739137 | [
"Apache-2.0"
] | null | null | null | import sys, os, errno, shutil, signal, subprocess
from glob import glob
signal.signal(signal.SIGINT, sigint_handler)
# BEGIN SCRIPT
printTitle()
print bcolors.FAIL
print "Exit script anytime with CTRL+C"
print bcolors.ENDC
# Check argument number
if len(sys.argv) < 3:
print "Usage: {} SRC_DIRECTORY ANDROID_RESOURCES_DIRECTORY".format(sys.argv[0])
sys.exit()
srcDir = sys.argv[1]
destDir = sys.argv[2]
TMP_DIR = srcDir + '/tmp'
MDPI = '/drawable-mdpi/'
HDPI = '/drawable-hdpi/'
XHDPI = '/drawable-xhdpi/'
XXHDPI = '/drawable-xxhdpi/'
XXXHDPI = '/drawable-xxxhdpi/'
MDPI_DIR = TMP_DIR + MDPI
HDPI_DIR = TMP_DIR + HDPI
XHDPI_DIR = TMP_DIR + XHDPI
XXHDPI_DIR = TMP_DIR + XXHDPI
XXXHDPI_DIR = TMP_DIR + XXXHDPI
RESULT_DIRS = [MDPI, HDPI, XHDPI, XXHDPI, XXXHDPI]
cleanup()
# Check source directory
if not os.path.isdir(srcDir):
print "{} should be a directory".format(srcDir)
exit()
# Check destination directory
if not os.path.isdir(destDir):
print "{} should be a directory".format(destDir)
exit()
if not os.path.isdir(destDir+'/values'):
print "{} is not an Android resources directory".format(destDir)
exit()
if not os.path.isdir(destDir+'/layout'):
print "{} is not an Android resources directory".format(destDir)
exit()
images = [y for x in os.walk(srcDir) for y in glob(os.path.join(x[0], '*.png'))]
if len(images) == 0:
print bcolors.FAIL+"No files to process"+bcolors.ENDC
exit()
# CONSTANTS
# Create density directories
createDirIfNotExists(MDPI_DIR)
createDirIfNotExists(HDPI_DIR)
createDirIfNotExists(XHDPI_DIR)
createDirIfNotExists(XXHDPI_DIR)
createDirIfNotExists(XXXHDPI_DIR)
for file in images:
# print "- {}".format(file)
moveFileToTmp(file)
# build distinct names
newImages = [y for x in os.walk(TMP_DIR) for y in glob(os.path.join(x[0], '*.png'))]
distinctNames = []
print bcolors.BOLD + 'Drawable files found:' + bcolors.ENDC
for file in newImages:
name = os.path.basename(file)
if not name in distinctNames:
print '- {}'.format(name)
distinctNames.append(name)
# Ask for renaming
print ""
if len(distinctNames):
print bcolors.HEADER + "Any existing file will be overwritten by the renaming" + bcolors.ENDC
for name in distinctNames:
newName = raw_input('Rename '+bcolors.OKBLUE+name.replace('.png', '')+bcolors.ENDC+' to ('+bcolors.UNDERLINE+'leave blank to skip renaming'+bcolors.ENDC+'): ')
newName = "{}.png".format(newName).strip()
if len(newName) > 0:
renameFile(name, newName)
# Ask for WebP compression
cwebp = getWebpConverter()
compress = False
if len(cwebp) > 0:
compressResponse = raw_input('Compress files to WebP format? [y] or [n] ')
compressResponse = compressResponse.strip()
if len(compressResponse) > 0 and (compressResponse == 'y' or compressResponse == 'Y'):
compress = True
# Move to destination folder
if compress:
moveToDest(cwebp)
else:
moveToDest("")
print ""
print bcolors.OKGREEN + '{} resource files moved to workspace'.format(len(newImages)) + bcolors.ENDC
cleanup()
| 28.038095 | 160 | 0.667969 |
fd2ae8dc293d1b7377165b6678e015927d2d75d1 | 5,479 | py | Python | kornia/x/trainer.py | AK391/kornia | a2535eb7593ee2fed94d23cc720804a16f9f0e7e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/x/trainer.py | AK391/kornia | a2535eb7593ee2fed94d23cc720804a16f9f0e7e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | kornia/x/trainer.py | AK391/kornia | a2535eb7593ee2fed94d23cc720804a16f9f0e7e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | import logging
from typing import Callable, Dict
import torch
import torch.nn as nn
from torch.utils.data import DataLoader
# the accelerator library is a requirement for the Trainer
# but it is optional for grousnd base user of kornia.
try:
from accelerate import Accelerator
except ImportError:
Accelerator = None
from .metrics import AverageMeter
from .utils import Configuration, TrainerState
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.DEBUG)
callbacks_whitelist = [
"preprocess", "augmentations", "evaluate", "fit", "checkpoint", "terminate"
]
| 34.459119 | 95 | 0.632962 |
fd2c1f1342cad7325a43f5762d5c2d1d94cfe573 | 2,795 | py | Python | datasets/transformations/jpeg_compress.py | bytedance/Hammer | 388ed20b3d9b34f33f5357d75f8fe5d726782ec8 | [
"MIT"
] | 97 | 2022-02-08T09:00:57.000Z | 2022-03-23T05:33:35.000Z | datasets/transformations/jpeg_compress.py | bytedance/Hammer | 388ed20b3d9b34f33f5357d75f8fe5d726782ec8 | [
"MIT"
] | null | null | null | datasets/transformations/jpeg_compress.py | bytedance/Hammer | 388ed20b3d9b34f33f5357d75f8fe5d726782ec8 | [
"MIT"
] | 7 | 2022-02-08T15:13:02.000Z | 2022-03-19T19:11:13.000Z | # python3.7
"""Implements JPEG compression on images."""
import cv2
import numpy as np
try:
import nvidia.dali.fn as fn
import nvidia.dali.types as types
except ImportError:
fn = None
from utils.formatting_utils import format_range
from .base_transformation import BaseTransformation
__all__ = ['JpegCompress']
| 35.379747 | 80 | 0.65975 |
fd2ca1a6e56d2464e000ae2d9a68e5afd6f6c238 | 2,046 | py | Python | venv/VFR/flask_app_3d.py | flhataf/Virtual-Fitting-Room | e5b41849df963cebd3b7deb7e87d643ece5b6d18 | [
"MIT"
] | null | null | null | venv/VFR/flask_app_3d.py | flhataf/Virtual-Fitting-Room | e5b41849df963cebd3b7deb7e87d643ece5b6d18 | [
"MIT"
] | null | null | null | venv/VFR/flask_app_3d.py | flhataf/Virtual-Fitting-Room | e5b41849df963cebd3b7deb7e87d643ece5b6d18 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 15 00:31:35 2021
@author: RayaBit
"""
from flask import Flask, render_template, Response
from imutils.video import VideoStream
from skeletonDetector import skeleton
import cv2
from skeleton3DDetector import Skeleton3dDetector
from visualization import Visualizer
import time
app = Flask(__name__)
if __name__ == '__main__':
app.run(host="127.0.0.1", port=8087) | 26.571429 | 115 | 0.616813 |
fd2dbbfa0aac3167c6b35b08529f51283edf8826 | 8,144 | py | Python | schedsi/threads/thread.py | z33ky/schedsi | 3affe28a3e1d2001c639d7c0423cb105d1991590 | [
"CC0-1.0"
] | 1 | 2017-08-03T12:58:53.000Z | 2017-08-03T12:58:53.000Z | schedsi/threads/thread.py | z33ky/schedsi | 3affe28a3e1d2001c639d7c0423cb105d1991590 | [
"CC0-1.0"
] | null | null | null | schedsi/threads/thread.py | z33ky/schedsi | 3affe28a3e1d2001c639d7c0423cb105d1991590 | [
"CC0-1.0"
] | null | null | null | """Define the :class:`Thread`."""
import threading
from schedsi.cpu import request as cpurequest
from schedsi.cpu.time import Time
#: Whether to log individual times, or only the sum
LOG_INDIVIDUAL = True
| 32.706827 | 94 | 0.608915 |
fd3187f8e540b93ec7789114fa6cc6e3969608ec | 1,335 | py | Python | pyBRML/pyBRML/core.py | anich003/brml_toolkit | de8218bdf333902431d4c0055fcf5cb3dc47d0c1 | [
"MIT"
] | null | null | null | pyBRML/pyBRML/core.py | anich003/brml_toolkit | de8218bdf333902431d4c0055fcf5cb3dc47d0c1 | [
"MIT"
] | null | null | null | pyBRML/pyBRML/core.py | anich003/brml_toolkit | de8218bdf333902431d4c0055fcf5cb3dc47d0c1 | [
"MIT"
] | null | null | null | import copy
from pyBRML import utils
from pyBRML import Array
def multiply_potentials(list_of_potentials):
"""
Returns the product of each potential in list_of_potentials, useful for
calculating joint probabilities.
For example, if the joint probability of a system is defined as
p(A,B,C) = p(C|A,B) p(A) p(B)
then, list_of_potentials should contain 3 potentials corresponding to each factor.
Since, potentials can be defined in an arbitrary order, each potential will be
reshaped and cast using numpy ndarray functions before being multiplied, taking
advantage of numpy's broadcasting functionality.
"""
# Collect the set of variables from each pot. Used to reshape each potential.table
variable_set = set(var for potential in list_of_potentials for var in potential.variables)
variable_set = list(variable_set)
# Copy potentials to avoid mutating original objects
potentials = copy.deepcopy(list_of_potentials)
# Reshape each potential prior to taking their product
for pot in potentials:
pot = utils.format_table(pot.variables, pot.table, variable_set)
# Multiply potentials and return
new_potential = potentials[0]
for pot in potentials[1:]:
new_potential.table = new_potential.table * pot.table
return new_potential
| 35.131579 | 94 | 0.743071 |
fd3394f2b7968055dc2a5d2b8bdde46ae4644c49 | 2,098 | py | Python | lib/akf_known_uncategories.py | UB-Mannheim/docxstruct | dd6d99b6fd6f5660fdc61a14b60e70a54ac9be85 | [
"Apache-2.0"
] | 1 | 2019-03-06T14:59:44.000Z | 2019-03-06T14:59:44.000Z | lib/akf_known_uncategories.py | UB-Mannheim/docxstruct | dd6d99b6fd6f5660fdc61a14b60e70a54ac9be85 | [
"Apache-2.0"
] | null | null | null | lib/akf_known_uncategories.py | UB-Mannheim/docxstruct | dd6d99b6fd6f5660fdc61a14b60e70a54ac9be85 | [
"Apache-2.0"
] | null | null | null | import re | 31.313433 | 108 | 0.515253 |
fd33bdf592a5bbf5b20d72627b7e89fa294ef5bf | 1,640 | py | Python | maps/templatetags/mapas_tags.py | lsalta/mapground | d927d283dab6f756574bd88b3251b9e68f000ca7 | [
"MIT"
] | null | null | null | maps/templatetags/mapas_tags.py | lsalta/mapground | d927d283dab6f756574bd88b3251b9e68f000ca7 | [
"MIT"
] | 3 | 2020-02-11T23:04:56.000Z | 2021-06-10T18:07:53.000Z | maps/templatetags/mapas_tags.py | lsalta/mapground | d927d283dab6f756574bd88b3251b9e68f000ca7 | [
"MIT"
] | 1 | 2021-08-20T14:49:09.000Z | 2021-08-20T14:49:09.000Z | from django import template
register = template.Library()
register.inclusion_tag('mapas/mapa.html', takes_context=True)(mostrar_resumen_mapa)
register.filter('quitar_char',quitar_char)
register.filter('replace_text',replace_text)
register.filter('truncar_string',truncar_string)
def get_range(value):
"""
Filter - returns a list containing range made from given value
Usage (in template):
<ul>{% for i in 3|get_range %}
<li>{{ i }}. Do something</li>
{% endfor %}</ul>
Results with the HTML:
<ul>
<li>0. Do something</li>
<li>1. Do something</li>
<li>2. Do something</li>
</ul>
Instead of 3 one may use the variable set in the views
"""
try:
return range( value )
except:
return None
register.filter('get_range',get_range)
register.filter('sort_by',sort_by)
| 22.777778 | 83 | 0.668902 |