hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 11
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 251 | max_stars_repo_name stringlengths 4 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 251 | max_issues_repo_name stringlengths 4 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 251 | max_forks_repo_name stringlengths 4 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.05M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.04M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6c1bc99ed022294f204e51cb23b911f2274cbb0b | 525 | py | Python | examples/cuda/bfs/py/vcache.py | bespoke-silicon-group/bsg_replicant | cadd8dcb3fb1382adf39479cdd9bc7463f269fa0 | [
"BSD-3-Clause"
] | 12 | 2020-03-27T13:15:54.000Z | 2022-03-25T14:22:26.000Z | examples/cuda/bfs/py/vcache.py | bespoke-silicon-group/bsg_f1 | 08b7be7162719b92b4796f18b0caad263f90ea2f | [
"BSD-3-Clause"
] | 255 | 2019-05-10T01:08:51.000Z | 2020-01-29T18:45:32.000Z | examples/cuda/bfs/py/vcache.py | bespoke-silicon-group/bsg_replicant | cadd8dcb3fb1382adf39479cdd9bc7463f269fa0 | [
"BSD-3-Clause"
] | 8 | 2020-02-21T18:28:34.000Z | 2021-07-24T00:22:29.000Z | from vcache_utils import VCacheStats
from bfs_common import BFSParameters
import sys
import pandas as pd
data = pd.DataFrame()
for filename in sys.argv[1:]:
data = data.append(BFSVCacheStats(filename).diffed_data)
data.to_csv("vcache.summary.csv")
| 29.166667 | 60 | 0.76381 |
6c1bdfa67d70b8200b8f300c42147c7b6f88c84a | 14,511 | py | Python | fgread/read.py | FASTGenomics/jupyter-fgread-py | 400eb54e2376a8a3afaa674397617fa64c33a280 | [
"MIT"
] | 1 | 2019-12-09T17:41:09.000Z | 2019-12-09T17:41:09.000Z | fgread/read.py | FASTGenomics/jupyter-fgread-py | 400eb54e2376a8a3afaa674397617fa64c33a280 | [
"MIT"
] | 2 | 2019-09-26T13:49:56.000Z | 2020-08-06T15:10:17.000Z | fgread/read.py | FASTGenomics/jupyter-fgread-py | 400eb54e2376a8a3afaa674397617fa64c33a280 | [
"MIT"
] | null | null | null | import json
import logging
import re
from pathlib import Path
from typing import Optional, Union
import pandas as pd
from . import DOCSURL, DS_URL_PREFIX, readers
# configure logging
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.INFO)
logger.addHandler(ch)
DEFAULT_READERS = {
"loom": readers.read_loom_to_anndata,
"rds": readers.read_seurat_to_anndata,
"h5ad": readers.read_anndata_to_anndata,
"hdf5": readers.read_10xhdf5_to_anndata,
"h5": readers.read_10xhdf5_to_anndata,
"tsv": readers.read_densetsv_to_anndata,
"csv": readers.read_densecsv_to_anndata,
}
DATA_DIR = Path("/fastgenomics/data")
DF_SORT_ORDER = [
"title",
"id",
"organism",
"tissue",
"numberOfCells",
"numberOfGenes",
"path",
"numberOfExpressionDataFiles",
"expressionDataFileNames",
"numberOfMetaDataFiles",
"metaDataFileNames",
"expressionDataFileInfos",
"metaDataFileInfos",
]
def get_datasets_df(data_dir: Path = DATA_DIR) -> pd.DataFrame:
"""Constructs a :py:func:`pandas.DataFrame` from all available datasets.
Parameters
----------
data_dir : Path, optional
Directory containing the datasets, e.g. ``fastgenomics/data``, by default DATA_DIR
Returns
-------
pd.DataFrame
A pandas DataFrame containing all available datasets
"""
ds_paths = get_ds_paths(data_dir=data_dir)
ds_df = pd.DataFrame()
for ds_path in ds_paths:
with open(ds_path / "dataset_info.json") as f:
info_df = json.load(f)
info_df["path"] = str(ds_path)
info_df["numberOfExpressionDataFiles"] = len(
info_df["expressionDataFileInfos"]
)
info_df["numberOfMetaDataFiles"] = len(info_df["metaDataFileInfos"])
_ = info_df.pop("schemaVersion", None)
ds_df = ds_df.append(info_df, ignore_index=True)
# sort colnames
col_names = ds_df.columns.values.tolist()
col_names_sorted = [name for name in DF_SORT_ORDER if name in col_names]
[col_names.remove(name) for name in DF_SORT_ORDER if name in col_names]
col_names_sorted.extend(col_names)
ds_df = ds_df[col_names_sorted]
# Format types
ds_df = ds_df.astype(
{
"numberOfCells": "int32",
"numberOfGenes": "int32",
"numberOfExpressionDataFiles": "int32",
"numberOfMetaDataFiles": "int32",
}
)
return ds_df
def ds_info(
ds: Optional[str] = None,
pretty: bool = None,
output: bool = None,
data_dir: Path = DATA_DIR,
) -> pd.DataFrame:
"""Get information on all available datasets in this analysis.
Parameters
----------
ds : Optional[str], optional
A single dataset ID or dataset title. If set, only this dataset will be displayed. Recommended to use with ``pretty``, by default None
pretty : bool, optional
Whether to display some nicely formatted output, by default True
output : bool, optional
Whether to return a DataFrame or not, by default True
data_dir : Path, optional
Directory containing the datasets, e.g. ``fastgenomics/data``, by default DATA_DIR
Returns
-------
pd.DataFrame
A pandas DataFrame containing all, or a single dataset (depends on ``ds``)
"""
if pretty is None:
pretty = ds is not None
if output is None:
output = ds is None
if not pretty and not output:
logger.warning(
'You have set "pretty" and "output" to false. Hence, this function will do/return nothing.'
)
return
try:
ds_df = get_datasets_df(data_dir=data_dir)
except NoDatasetsError as err:
logger.warning(err)
return pd.DataFrame()
if ds:
single_ds_df = select_ds_id(ds, df=ds_df)
single_ds_df["expressionDataFileNames"] = ", ".join(
[
expr["name"]
for expr in single_ds_df.loc[0, "expressionDataFileInfos"]
]
)
single_ds_df["metaDataFileNames"] = ", ".join(
[expr["name"] for expr in single_ds_df.loc[0, "metaDataFileInfos"]]
)
# Sort columns
single_col_names = single_ds_df.columns.values.tolist()
single_col_names_sorted = [
name for name in DF_SORT_ORDER if name in single_col_names
]
[
single_col_names.remove(name)
for name in DF_SORT_ORDER
if name in single_col_names
]
single_col_names_sorted.extend(single_col_names)
single_ds_df = single_ds_df[single_col_names_sorted]
if pretty:
pretty_df = single_ds_df
pretty_df["expressionDataFileNames"] = "<br>".join(
[
expr["name"]
for expr in pretty_df.loc[0, "expressionDataFileInfos"]
]
)
pretty_df["metaDataFileNames"] = ", ".join(
[expr["name"] for expr in pretty_df.loc[0, "metaDataFileInfos"]]
)
empty_cols = [
col for col in pretty_df.columns if pretty_df.loc[0, col] == ""
]
pretty_df = pretty_df.drop(
labels=["expressionDataFileInfos", "metaDataFileInfos"]
+ empty_cols,
axis=1,
errors="ignore",
)
pretty_df.loc[0, "title"] = pretty_df.apply(
lambda x: add_url(x.title, x.id), axis=1
).squeeze()
disp_pretty_df(pretty_df.T, header=False)
if output:
return single_ds_df
else:
if pretty:
pretty_df = ds_df.drop(
labels=[
"description",
"license",
"preprocessing",
"citation",
"webLink",
"file",
"expressionDataFileInfos",
"metaDataFileInfos",
],
axis=1,
errors="ignore",
)
pretty_df["title"] = pretty_df.apply(
lambda x: add_url(x.title, x.id), axis=1
)
disp_pretty_df(pretty_df)
if output:
return ds_df
def load_data(
ds: Optional[str] = None,
data_dir: Path = DATA_DIR,
additional_readers: dict = {},
expression_file: Optional[str] = None,
as_format: Optional[str] = None,
):
"""This function loads a single dataset into an AnnData object.
If there are multiple datasets available you need to specify one by setting
``ds`` to a dataset `id` or dataset `title`.
To get an overview of availabe dataset use :py:func:`ds_info`
Parameters
----------
ds : str, optional
A single dataset ID or dataset title to select a dataset to be loaded.
If only one dataset is available you do not need to set this parameter, by default None
data_dir : Path, optional
Directory containing the datasets, e.g. ``fastgenomics/data``, by default DATA_DIR
additional_readers : dict, optional
Used to specify your own readers for the specific data set format.
Dict key needs to be file extension (e.g., h5ad), dict value a function.
Still experimental, by default {}
expression_file: str, Optional
The name of the expression file to load.
Only needed when there are multiple expression files in a dataset.
as_format: str, optional
Specifies which reader should be uses for this dataset. Overwrites the auto-detection
of the format. Possible parameters are the file extensions of our supported data
formats: ``h5ad``, ``h5``, ``hdf5``, ``loom``, ``rds``, ``csv``, ``tsv``.
Returns
-------
AnnData Object
A single AnnData object with dataset id in `obs` and all dataset metadata in `uns`
Examples
--------
To use a custom reader for files with the extension ".fg", you have to define a function first:
>>> def my_loader(file):
... anndata = magic_file_loading(file)
... return anndata
You can then use this reader like this:
>>> fgread.load_data("my_dataset", additional_readers={"fg": my_loader})
"""
readers = {**DEFAULT_READERS, **additional_readers}
if ds:
single_df = select_ds_id(ds, df=get_datasets_df(data_dir=data_dir))
else:
single_df = get_datasets_df(data_dir=data_dir)
if len(single_df) > 1:
raise RuntimeError(
"There is more than one dataset available in this analysis. "
"Please select one by its ID or title. "
'You can list available datasets by using "fgread.ds_info()".'
)
exp_count = single_df.loc[0, "numberOfExpressionDataFiles"]
meta_count = single_df.loc[0, "numberOfMetaDataFiles"]
if exp_count == 0:
raise TypeError(
f"There is no expression data available in this data set.\n"
f"Metadata files: {meta_count}."
)
exp_files = [
exp["name"] for exp in single_df.loc[0, "expressionDataFileInfos"]
]
if expression_file:
if expression_file in exp_files:
file = expression_file
else:
raise KeyError(
f'Expression file "{expression_file}" not found in dataset. '
f"Available expression files are: {exp_files}."
)
else:
if exp_count == 1:
file = single_df.loc[0, "expressionDataFileInfos"][0]["name"]
else:
raise TypeError(
f"There are {exp_count} expression data files in this dataset. "
'Please specify which one you want to load using the parameter "expression_file". '
f"Available expression files are: {exp_files}."
)
title = single_df.loc[0, "title"]
ds_id = single_df.loc[0, "id"]
path = single_df.loc[0, "path"]
metadata_dict = single_df.loc[0].to_dict()
if as_format:
format = as_format.lower()
else:
try:
format = file.rsplit(".", 1)[1].lower()
logger.info(f'Expression file "{file}" with format "{format}".')
except ValueError as e:
raise ValueError(
f'The expression file "{file}" has no valid file suffix.'
).with_traceback(e.__traceback__)
if format in readers:
if meta_count != 0:
logger.info(
f"There are {meta_count} metadata files in this dataset. "
"This data will not be integrated into the anndata object."
)
logger.info(
f'Loading file "{file}" from dataset "{title}" in format "{format}" from directory "{path}"...\n'
)
adata = readers[format](Path(path) / file)
adata.uns["ds_metadata"] = {ds_id: {"title": title}}
adata.uns["ds_metadata_raw"] = {ds_id: str(metadata_dict)}
adata.obs["fg_id"] = ds_id
n_genes = adata.shape[1]
n_cells = adata.shape[0]
logger.info(
f'Loaded dataset "{title}" with {n_cells} cells and {n_genes} genes.\n'
f"==================================================================\n"
)
return adata
else:
raise KeyError(
f'Unsupported file format "{format}", use one of {list(readers)}. '
f'You can force the usage of a specific reader by setting "as_format" to a supported format. '
f"In addition, you can also implement your own reading function. See {DOCSURL} for more information."
)
def select_ds_id(ds: str, df: pd.DataFrame = None) -> pd.DataFrame:
"""Select a single dataset from a pandas DataFrame by its ID or title
Parameters
----------
ds : str
A single dataset ID or dataset title for selection
df : pd.DataFrame, optional
A pandas DataFrame from which a single entry is selected, by default None
Returns
-------
pd.DataFrame
A pandas DataFrame with only the selected dataset.
"""
single_df = df.loc[(df["id"] == ds) | (df["title"] == ds)].reset_index(
drop=True
)
len_df = len(single_df)
if len_df == 1:
return single_df.copy()
elif len_df == 0:
add_err = ""
if not ds.startswith("dataset-"):
add_err = " Please note that dataset titles can be changed by the owner. To be safe, you might want to consider dataset IDs instead."
raise KeyError("Your selection matches no datasets." + add_err)
else:
display(single_df)
raise KeyError(
f"Your selection matches {len_df} datasets. Please make sure to select exactly one."
)
def get_ds_paths(data_dir: Union[str, Path] = DATA_DIR) -> list:
"""Gets available datasets for this analysis from path.
Parameters
----------
data_dir : Union[str,Path], optional
Directory containing the datasets, e.g. "fastgenomics/data", by default DATA_DIR
Returns
-------
list
A list of dataset paths
"""
data_dir = Path(data_dir)
if not data_dir.exists():
raise NoDatasetsError(
f'There are no datasets attached to this analysis. Path "{data_dir}" does not exist.'
)
paths = [
Path(subdir)
for subdir in sorted(data_dir.iterdir())
if subdir.is_dir() and re.match(r"^dataset_\d{4}$", subdir.name)
]
if not paths:
raise NoDatasetsError(
f'There are no datasets attached to this analysis. Path "{data_dir}" is empty.'
)
return paths
| 32.175166 | 145 | 0.591551 |
6c1c01b8e3b61397e1a5842a61751e502b73a43e | 487 | py | Python | final.py | Georgejj123/Final | a5beeb873321446e53a1c0859c0b10bf240a0dcd | [
"MIT"
] | null | null | null | final.py | Georgejj123/Final | a5beeb873321446e53a1c0859c0b10bf240a0dcd | [
"MIT"
] | null | null | null | final.py | Georgejj123/Final | a5beeb873321446e53a1c0859c0b10bf240a0dcd | [
"MIT"
] | null | null | null | # This program rolls two dices and prints what you got
# We set two variables (min and max) , lowest and highest number of the dice.
import random
min = 1
max = 6
roll_again = "yes"
# We then use a while loop, so that the user can roll the dice again.
while roll_again == "yes" or roll_again == "y":
print "Rolling the dices:"
print "The values are:"
print random.randint(min, max)
print random.randint(min, max)
roll_again = raw_input("Roll the dices again?")
| 27.055556 | 78 | 0.689938 |
6c1cbdba3744243c1b45fcbc2d0b6115c7a3d106 | 1,584 | py | Python | Script's/04 - Generadores/Generadores Basicos.py | CamiloBallen24/Python-PildorasInformaticas | a734ac064e34b01a2f64080d5391625a5de77f54 | [
"Apache-2.0"
] | null | null | null | Script's/04 - Generadores/Generadores Basicos.py | CamiloBallen24/Python-PildorasInformaticas | a734ac064e34b01a2f64080d5391625a5de77f54 | [
"Apache-2.0"
] | null | null | null | Script's/04 - Generadores/Generadores Basicos.py | CamiloBallen24/Python-PildorasInformaticas | a734ac064e34b01a2f64080d5391625a5de77f54 | [
"Apache-2.0"
] | 1 | 2019-06-04T19:51:05.000Z | 2019-06-04T19:51:05.000Z | #TEMA:GENERADORES
######################################################################
#Funcion que me regresa un numero determinado de numeros pares
######################################################################
######################################################################
print("Ejemplo #1")
print(generaParesFuncion(10))
print()
print()
print()
print()
######################################################################
######################################################################
#Generador que hace lo mismo que el ejemplo anterior
######################################################################
######################################################################
print("Ejemplo #2")
miGenerador = generaPares(10) #Creo mi generdor
for i in miGenerador:
print(i)
print()
print()
print()
print()
print()
######################################################################
######################################################################
#Otro Uso del generador
print("Ejemplo #3")
miGenerador02= generaPares(12)
print("Primera llamada")
print(next(miGenerador02))
print()
print("Segunda llamada")
print(next(miGenerador02))
print()
print("Tercera llamada")
print(next(miGenerador02))
print()
print()
print()
print()
print()
###################################################################### | 21.12 | 71 | 0.395202 |
6c1cda3f913ecea499264b75e17f95a35ff6a498 | 443 | py | Python | lantz/qt/widgets/__init__.py | mtsolmn/lantz-qt | 72cb16bd3aafe33caa1a822ac2ba98b3425d4420 | [
"BSD-3-Clause"
] | 1 | 2020-05-13T08:29:16.000Z | 2020-05-13T08:29:16.000Z | lantz/qt/widgets/__init__.py | mtsolmn/lantz-qt | 72cb16bd3aafe33caa1a822ac2ba98b3425d4420 | [
"BSD-3-Clause"
] | null | null | null | lantz/qt/widgets/__init__.py | mtsolmn/lantz-qt | 72cb16bd3aafe33caa1a822ac2ba98b3425d4420 | [
"BSD-3-Clause"
] | 3 | 2019-09-24T16:49:10.000Z | 2020-09-23T17:53:20.000Z | # -*- coding: utf-8 -*-
"""
lantz.qt.widgets
~~~~~~~~~~~~~~~~
PyQt widgets wrapped to work with lantz.
:copyright: 2018 by Lantz Authors, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from . import feat, nonnumeric, numeric
from .common import WidgetMixin, ChildrenWidgets
from .initialize import InitializeWindow, InitializeDialog
from .testgui import DriverTestWidget, SetupTestWidget | 27.6875 | 68 | 0.711061 |
6c1dc30f32a47cfe9ea5fa235e76eff1529c75dd | 4,368 | py | Python | iotapy/storage/providers/types/transaction_metadata.py | aliciawyy/iota-python | b8d421acf94ccd9e7374f799fbe496f6d23e3cf3 | [
"MIT"
] | 34 | 2017-10-24T15:04:02.000Z | 2021-09-05T17:46:43.000Z | iotapy/storage/providers/types/transaction_metadata.py | aliciawyy/iota-python | b8d421acf94ccd9e7374f799fbe496f6d23e3cf3 | [
"MIT"
] | 8 | 2017-12-18T21:53:08.000Z | 2021-06-01T21:24:31.000Z | iotapy/storage/providers/types/transaction_metadata.py | aliciawyy/iota-python | b8d421acf94ccd9e7374f799fbe496f6d23e3cf3 | [
"MIT"
] | 11 | 2017-12-18T22:02:29.000Z | 2020-11-10T17:58:22.000Z | # -*- coding: utf-8 -*-
import struct
import iota
from iotapy.storage import converter as conv
TRANSACTION_METADATA_TRITS_LENGTH = 1604
HASH_BYTES_LENGTH = 49
HASH_TRITS_LENGTH = 243
| 35.225806 | 123 | 0.66163 |
6c1f737d01ed462f9d3028ca12a6e4e32ea970ac | 22,271 | py | Python | OC/network.py | Xin-Ye-1/HIEM | 6764f579eef6ec92dd85a005af27419f630df7da | [
"Apache-2.0"
] | 2 | 2021-04-12T02:41:00.000Z | 2021-05-15T02:18:15.000Z | OC/network.py | Xin-Ye-1/HIEM | 6764f579eef6ec92dd85a005af27419f630df7da | [
"Apache-2.0"
] | null | null | null | OC/network.py | Xin-Ye-1/HIEM | 6764f579eef6ec92dd85a005af27419f630df7da | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/env python
import tensorflow as tf
import tensorflow.contrib.slim as slim
seed = 0
| 55.6775 | 140 | 0.504917 |
6c1f784f7fc92dd4f1d6302efb41edae068a6f5e | 5,980 | py | Python | Student Database/last.py | manas1410/Miscellaneous-Development | 8ffd2b586cb05b12ed0855d97c3015c8bb2a6c01 | [
"MIT"
] | null | null | null | Student Database/last.py | manas1410/Miscellaneous-Development | 8ffd2b586cb05b12ed0855d97c3015c8bb2a6c01 | [
"MIT"
] | null | null | null | Student Database/last.py | manas1410/Miscellaneous-Development | 8ffd2b586cb05b12ed0855d97c3015c8bb2a6c01 | [
"MIT"
] | null | null | null | from tkinter import*
import website
import tkinter.font as font
from PIL import ImageTk,Image
import os
import sqlite3
import webbrowser
if __name__=='__main__':
main()
| 30.824742 | 133 | 0.533779 |
6c1ff08cd085c626e8f3f1328f189116ff83820d | 4,650 | py | Python | app/main.py | govdirectory/health-check-service | c32e1055e1c755fdb03e2786dc0a157697250421 | [
"CC0-1.0"
] | 1 | 2021-09-28T00:09:18.000Z | 2021-09-28T00:09:18.000Z | app/main.py | govdirectory/health-check-service | c32e1055e1c755fdb03e2786dc0a157697250421 | [
"CC0-1.0"
] | null | null | null | app/main.py | govdirectory/health-check-service | c32e1055e1c755fdb03e2786dc0a157697250421 | [
"CC0-1.0"
] | null | null | null | import requests
from typing import List
from fastapi import FastAPI, Path
from pydantic import BaseModel, HttpUrl
from fastapi.middleware.cors import CORSMiddleware
cors_origins = [
'https://www.govdirectory.org',
'https://www.wikidata.org',
]
user_agent_external = 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:90.0) Gecko/20100101 Firefox/90.0 Govdirectory.org account existence checker'
user_agent_wikimedia = 'Wikidata:WikiProject Govdirectory (health check service)'
url_properties = [
{
'name': 'official website',
'prop': 'P856',
},
{
'name': 'URL for citizen\'s initiatives',
'prop': 'P9732',
},
]
platform_properties = [
{
'name': 'Twitter username',
'prop': 'P2002',
'formatter_url': 'https://twitter.com/$1',
},
{
'name': 'YouTube channel ID',
'prop': 'P2397',
'formatter_url': 'https://www.youtube.com/channel/$1',
},
{
'name': 'Facebook ID',
'prop': 'P2013',
'formatter_url': 'https://www.facebook.com/$1',
},
{
'name': 'Instagram username',
'prop': 'P2003',
'formatter_url': 'https://www.instagram.com/$1/',
},
{
'name': 'GitHub username',
'prop': 'P2037',
'formatter_url': 'https://github.com/$1',
},
{
'name': 'Vimeo identifier',
'prop': 'P4015',
'formatter_url': 'https://vimeo.com/$1',
},
{
'name': 'Flickr user ID',
'prop': 'P3267',
'formatter_url': 'https://www.flickr.com/people/$1',
},
{
'name': 'Pinterest username',
'prop': 'P3836',
'formatter_url': 'https://www.pinterest.com/$1/',
},
{
'name': 'Dailymotion channel ID',
'prop': 'P2942',
'formatter_url': 'https://www.dailymotion.com/$1',
},
{
'name': 'TikTok username',
'prop': 'P7085',
'formatter_url': 'https://www.tiktok.com/@$1',
},
{
'name': 'SlideShare username',
'prop': 'P4016',
'formatter_url': 'https://www.slideshare.net/$1',
},
]
app = FastAPI(
title='Govdirectory Health Check Service',
description='Microservice that validates various external identifiers and URLs associated with a given Wikidata identifier.',
version='0.1.0',
docs_url='/',
)
app.add_middleware(
CORSMiddleware,
allow_origins=cors_origins,
allow_credentials=True,
allow_methods=['GET'],
allow_headers=['*'],
)
| 29.807692 | 170 | 0.543871 |
6c2088e0cf4acb484abbd57109fedff77aded588 | 569 | py | Python | lie2me/fields/password.py | hugollm/lie2me | c3b47e88264a32c10c893368987c4d8a3df054d9 | [
"MIT"
] | null | null | null | lie2me/fields/password.py | hugollm/lie2me | c3b47e88264a32c10c893368987c4d8a3df054d9 | [
"MIT"
] | null | null | null | lie2me/fields/password.py | hugollm/lie2me | c3b47e88264a32c10c893368987c4d8a3df054d9 | [
"MIT"
] | null | null | null | from ..field import Field
| 23.708333 | 58 | 0.567663 |
6c21574006d86d8b50934be4fe7a2c2e0d87d074 | 5,933 | py | Python | src/szz/manipulate_sql_tables/add_depth_to_BTE_table.py | dSar-UVA/repoMiner | 8f75074e388ff13419a0a37b4337c0cdcb459f74 | [
"BSD-3-Clause"
] | 9 | 2017-10-21T13:29:46.000Z | 2022-01-10T23:49:54.000Z | src/szz/manipulate_sql_tables/add_depth_to_BTE_table.py | dSar-UVA/repoMiner | 8f75074e388ff13419a0a37b4337c0cdcb459f74 | [
"BSD-3-Clause"
] | 3 | 2018-01-09T11:28:55.000Z | 2019-01-20T08:45:18.000Z | src/szz/manipulate_sql_tables/add_depth_to_BTE_table.py | dSar-UVA/repoMiner | 8f75074e388ff13419a0a37b4337c0cdcb459f74 | [
"BSD-3-Clause"
] | 1 | 2020-12-29T05:10:31.000Z | 2020-12-29T05:10:31.000Z | """
This script adds a specific column to the `bug_type_entropy_projectname_old` tables. The added column contains the nesting depth (>=0) of each line.
"""
import os, sys, psycopg2, ntpath, traceback, subprocess
from pprint import pprint
#--------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------------------------------
if __name__ == "__main__":
if len(sys.argv) != 2:
print("\nUsage: python add_depth_to_BTE_table.py <project_name>")
print("\nSample usage: python add_depth_to_BTE_table.py libgit2")
raise ValueError("Incorrect input arguments. Aborting...")
project_name = sys.argv[1]
# depth_dict = get_depth_data(project_name)
# if not depth_dict:
# raise ValueError("`get_depth_data` returned an empty `depth_dict` dictionary. Aborting...")
print("\nNow fetching BTE_old_data...")
# BTE_data is a list of lists; each element list = [file_name, sha, line_num, parents_all]
BTE_data = get_BTE_data(project_name)
if not BTE_data:
raise ValueError("`get_BTE_data` returned an empty `BTE_data` list. Aborting...")
print("\nNow creating BTE_prime_data, i.e., table with `depth` appended to BTE_old_data...")
# We will add `depth` attribute to each row in BTE_data
error_count = 0
for index, BTE_tuple in enumerate(BTE_data):
# `depth` = number of parents as given in `parents_all` column of BTE table
depth = BTE_tuple[3].count('-') + 1
if BTE_tuple[3] == '':
BTE_data[index].append(0)
else:
BTE_data[index].append(depth)
print("\nNow dumping the temporary table BTE_prime. This may take approx. 3-4 min per million LOC...")
dump_BTE_prime_table(BTE_data, project_name)
print("\nNow joining BTE_old and BTE_prime to get desired table. This takes about 2 min per million LOC...")
join_BTE_old_and_BTE_prime(project_name)
#--------------------------------------------------------------------------------------------------------------------------
| 40.917241 | 148 | 0.539019 |
6c21d95c779b91777f1c1e4c2f9a294fa6bd8d6e | 10,862 | py | Python | tools/ProjectionTools/Lidar2RGB/lib/utils.py | ladt/SeeingThroughFog | c714a4c3e8f8e604494b1db6e9eef529b0326405 | [
"MIT"
] | null | null | null | tools/ProjectionTools/Lidar2RGB/lib/utils.py | ladt/SeeingThroughFog | c714a4c3e8f8e604494b1db6e9eef529b0326405 | [
"MIT"
] | null | null | null | tools/ProjectionTools/Lidar2RGB/lib/utils.py | ladt/SeeingThroughFog | c714a4c3e8f8e604494b1db6e9eef529b0326405 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import numpy as np
import cv2
import scipy.spatial
from sklearn.linear_model import RANSACRegressor
import os
import sys
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0, parentdir)
import lib.settings
def dense_map(Pts, n, m, grid):
'''
interpolate lidar depth
:param Pts: num observations of (W, H, D) lidar coordinates (D - depth corrsponding to (W,H) image positions), Pts.shape==(3, num)
:param n: image width
:param m: image height
:param grid: (grid*2+1) is neighborhood size
:return:
'''
ng = 2 * grid + 1
mX = np.zeros((m, n)) + np.float("inf")
mY = np.zeros((m, n)) + np.float("inf")
mD = np.zeros((m, n))
mX[np.int32(Pts[1]), np.int32(Pts[0])] = Pts[0] - np.round(Pts[0])
mY[np.int32(Pts[1]), np.int32(Pts[0])] = Pts[1] - np.round(Pts[1])
mD[np.int32(Pts[1]), np.int32(Pts[0])] = Pts[2]
KmX = np.zeros((ng, ng, m - ng, n - ng))
KmY = np.zeros((ng, ng, m - ng, n - ng))
KmD = np.zeros((ng, ng, m - ng, n - ng))
for i in range(ng):
for j in range(ng):
KmX[i, j] = mX[i: (m - ng + i), j: (n - ng + j)] - grid - 1 + i
KmY[i, j] = mY[i: (m - ng + i), j: (n - ng + j)] - grid - 1 + i
KmD[i, j] = mD[i: (m - ng + i), j: (n - ng + j)]
S = np.zeros_like(KmD[0, 0])
Y = np.zeros_like(KmD[0, 0])
for i in range(ng):
for j in range(ng):
s = 1 / np.sqrt(KmX[i, j] * KmX[i, j] + KmY[i, j] * KmY[i, j])
Y = Y + s * KmD[i, j]
S = S + s
S[S == 0] = 1
out = np.zeros((m, n))
out[grid + 1: -grid, grid + 1: -grid] = Y / S
return out
def transform_coordinates(xyz):
"""
Takes as input a Pointcloud with xyz coordinates and appends spherical coordinates as columns
:param xyz:
:return: Pointcloud with following columns, r, phi, theta, ring, intensity, x, y, z, intensity, ring
"""
ptsnew = np.hstack((np.zeros_like(xyz), xyz))
r_phi = xyz[:, 0] ** 2 + xyz[:, 1] ** 2
ptsnew[:, 0] = np.sqrt(r_phi + xyz[:, 2] ** 2)
ptsnew[:, 2] = np.pi / 2 - np.arctan2(np.sqrt(r_phi), xyz[:, 2]) # for elevation angle defined from Z-axis down
ptsnew[:, 1] = np.arctan2(xyz[:, 1], xyz[:, 0])
ptsnew[:, 3] = xyz[:, 4]
ptsnew[:, 4] = xyz[:, 3]
return ptsnew
def find_closest_neighbors(x, reference):
"""
This function allows you to match strongest and last echos and reason about scattering distributions.
:param x: Pointcloud which should be matched
:param reference: Reference Pointcloud
:return: returns valid matching indexes
"""
tree = scipy.spatial.KDTree(reference[:, 1:4])
distances, indexes = tree.query(x[:, 1:4], p=2)
print('indexes', indexes)
print('found matches', len(indexes), len(set(indexes)))
# return 0
valid = []
# not matching contains all not explainable scattered mismatching particles
not_matching = []
for idx, i in enumerate(indexes):
delta = reference[i, :] - x[idx, :]
# Laser Ring has to match
if delta[-1] == 0:
# Follows assumption that strongest echo has higher intensity than last and that the range is more distant
# for the last return. The sensor can report 2 strongest echo if strongest and last echo are matching.
# Here those points are not being matched.
if delta[-2] < 0 and delta[0] > 0:
valid.append((i, idx))
else:
not_matching.append((i, idx))
else:
not_matching.append((i, idx))
return valid
def filter(lidar_data, distance):
"""
Takes lidar Pointcloud as ibnput and filters point below distance threshold
:param lidar_data: Input Pointcloud
:param distance: Minimum distance for filtering
:return: Filtered Pointcloud
"""
r = np.sqrt(lidar_data[:, 0] ** 2 + lidar_data[:, 1] ** 2 + lidar_data[:, 2] ** 2)
true_idx = np.where(r > distance)
lidar_data = lidar_data[true_idx, :]
return lidar_data[0]
| 39.498182 | 147 | 0.612042 |
6c221044ede7be29a963c10b013a344a5da5c962 | 2,221 | py | Python | menu.py | RandelSouza/Space_Invaders | d266662a84b2324fd398fbb31b3f0a2004b40c99 | [
"MIT"
] | null | null | null | menu.py | RandelSouza/Space_Invaders | d266662a84b2324fd398fbb31b3f0a2004b40c99 | [
"MIT"
] | null | null | null | menu.py | RandelSouza/Space_Invaders | d266662a84b2324fd398fbb31b3f0a2004b40c99 | [
"MIT"
] | null | null | null | from setup import *
import pygame, sys, os
from pygame.locals import *
| 34.169231 | 112 | 0.547951 |
6c22c119dc085d095980f04411cdc49210dadf86 | 459 | py | Python | investing_algorithm_framework/app/stateless/action_handlers/check_online_handler.py | investing-algorithms/investing-algorithm-framework | d579e142a3857e2e2dfb59b7d6e54202f7df5466 | [
"Apache-2.0"
] | 1 | 2019-12-23T21:23:45.000Z | 2019-12-23T21:23:45.000Z | investing_algorithm_framework/app/stateless/action_handlers/check_online_handler.py | investing-algorithms/investing-algorithm-framework | d579e142a3857e2e2dfb59b7d6e54202f7df5466 | [
"Apache-2.0"
] | null | null | null | investing_algorithm_framework/app/stateless/action_handlers/check_online_handler.py | investing-algorithms/investing-algorithm-framework | d579e142a3857e2e2dfb59b7d6e54202f7df5466 | [
"Apache-2.0"
] | 1 | 2019-12-23T21:23:50.000Z | 2019-12-23T21:23:50.000Z | import json
from investing_algorithm_framework.app.stateless.action_handlers \
.action_handler_strategy import ActionHandlerStrategy
| 30.6 | 66 | 0.690632 |
6c239288cab4c6c0e0a6b09c9cc9d94248d11f8f | 20,757 | py | Python | main.py | AniruddhaGawali/Hand_Cricket | 71fcf5ffa49dbfcfdfceba9784d88c5adfd0fccb | [
"Apache-2.0"
] | 1 | 2020-11-08T07:38:58.000Z | 2020-11-08T07:38:58.000Z | main.py | AniruddhaGawali/Hand_Cricket | 71fcf5ffa49dbfcfdfceba9784d88c5adfd0fccb | [
"Apache-2.0"
] | null | null | null | main.py | AniruddhaGawali/Hand_Cricket | 71fcf5ffa49dbfcfdfceba9784d88c5adfd0fccb | [
"Apache-2.0"
] | null | null | null | # ---------------------------------------------------------------ALL REQUIRD FILES-------------------------------------------------------------
from tkinter import *
import tkinter.ttk as ttk
import tkinter.messagebox as msg
import tkinter.filedialog as tf
from ttkthemes import ThemedStyle
from PIL import Image, ImageTk
import random,pickle,os,playsound,datetime
root = Tk()
style = ThemedStyle(root)
root.wm_iconbitmap("data/img/ico/icon.ico")
root.title('Hand Cricket')
if os.path.isfile('data/files/app_data.p'):
f1 = open('data/files/app_data.p','rb')
theme = pickle.load(f1)
else:
theme=2
if theme ==2:
bg_color='gray10'
fg_color='dodgerblue'
root.config(bg='gray10')
label_bg_color = 'gray20'
label_fg_color = 'dodgerblue'
elif theme ==1:
bg_color='white'
fg_color='dodgerblue'
root.config(bg='white')
label_bg_color = 'dodgerblue'
label_fg_color = 'white'
style.set_theme("vista")
root.geometry('300x520')
root.maxsize(300,518)
# --------------------------------------------------------------------VARIBILES-----------------------------------------------------------------
# n=0
player_run=0
comp_run=0
Total_runs=0
comp_Total_runs=0
player_wicket=0
comp_wicket=0
players_balls=0
comp_balls=0
target=0
Total_overs = 0
Total_wicket =0
who_win = ''
player_bat_choice={}
comp_bat_choice={}
# -------------------------------------------------------------------FUNCTIONS------------------------------------------------------------------
# ------------------------------------------------------------FRAMES AND MAIN PROGRAM-----------------------------------------------------------
# ----------------------------------------------------------------------FRAME1------------------------------------------------------------------
root_frame1=Frame(root,bg=bg_color)
root_frame2=Frame(root,bg=bg_color)
root_frame3=Frame(root,bg=bg_color)
for frame in (root_frame1,root_frame2,root_frame3):
frame.grid(row=0,column=0,sticky='news')
raise_frame(root_frame1)
root_frame1_label_style=ttk.Style()
root_frame1_label_style.configure('TLabel',background=bg_color,foreground=fg_color)
over_select_label=ttk.Label(root_frame1,text='Select No. of Overs',font="Helvetica 15 bold",style='TLabel')
over_select_label.config(anchor=CENTER)
over_select_label.pack(padx=(23,0),pady=(20,0))
over=StringVar()
over.set('0')
over_select= ttk.Spinbox(root_frame1,from_=1,to=50,font='Helvetica 15 bold',textvariable=over)
over_select.pack(pady=8,padx=(23,0))
wicket=StringVar()
wicket.set('0')
player_select_label=ttk.Label(root_frame1,text='Select No. of Players',font="Helvetica 15 bold",style='TLabel')
player_select_label.config(anchor=CENTER)
player_select_label.pack(padx=(23,0))
no_of_players=ttk.Spinbox(root_frame1,from_=1,to=50,font='Helvetica 15 bold',textvariable=wicket)
no_of_players.pack(pady=8,padx=(23,0))
style_checkbutton=ttk.Style()
style_checkbutton.configure('TCheckbutton',width=10,hight=100,background=bg_color,foreground=fg_color,font='Helvetica 15 bold')
toss_label=ttk.Label(root_frame1,text='Select the Face',font='Helvetica 15 bold',style='TLabel')
toss_label.pack(pady=(10,5))
toss=StringVar()
head=ttk.Checkbutton(root_frame1,text='HEADS',variable=toss,onvalue='h',style='TCheckbutton')
tails=ttk.Checkbutton(root_frame1,text='TAILS',variable=toss,onvalue='t',style='TCheckbutton')
head.pack()
tails.pack()
over_selected=ttk.Button(root_frame1,text='Toss',command=lambda : coin_toss(toss.get()))
over_selected.pack(pady=15,padx=(23,0))
# ----------------------------------------------------------------------FRAME3------------------------------------------------------------------
First_to=StringVar()
label1=Label(root_frame3,text='YOU WIN THE TOSS',background=bg_color,foreground=fg_color,font='Helvetica 15 bold')
label1.pack(padx=(20,0))
bat=ttk.Checkbutton(root_frame3,text='BAT',variable=First_to,onvalue='b',style='TCheckbutton')
ball=ttk.Checkbutton(root_frame3,text='BALL',variable=First_to,onvalue='ba',style='TCheckbutton')
bat.pack(pady=5,padx=(52,0))
ball.pack(pady=5,padx=(52,0))
buttton_of_match=ttk.Button(root_frame3,text="Start",command=lambda : raise_frame(root_frame2))
buttton_of_match.pack(pady=10)
# ----------------------------------------------------------------------FRAME2------------------------------------------------------------------
selected_no_frame=Frame(root_frame2,bg=bg_color)
selected_no_frame.pack()
player_select_no_label=ttk.Label(selected_no_frame,text=' You Select ',font="none 10 bold",style='TLabel')
player_select_no_label.grid(row=0,column=0,padx=(15,5),pady=5)
comp_select_no_label=ttk.Label(selected_no_frame,text='Comp Select',font="none 10 bold",style='TLabel')
comp_select_no_label.grid(row=0,column=1,padx=(40,0),pady=5)
player_select_no=ttk.Label(selected_no_frame,text='-',font='Helvetica 30 bold',style='TLabel')
comp_select_no=ttk.Label(selected_no_frame,text='-',font='Helvetica 30 bold',style='TLabel')
player_select_no.grid(row=1,column=0,padx=(15,5),pady=(5,2))
comp_select_no.grid(row=1,column=1,padx=(40,0),pady=(5,2))
conc_frame=Frame(root_frame2,bg=bg_color, relief=SUNKEN)
conc_frame.pack()
conc_style=ttk.Style()
conc_style.configure('conc.TLabel',background=bg_color,foreground='white')
concustion_label=ttk.Label(conc_frame,text='-',font='Helvetica 15 bold',style='conc.TLabel')
concustion_label.pack(padx=(31,10),pady=(0,15))
button_frame=Frame(root_frame2,bg=bg_color)
button_frame.pack(pady=20)
for i in range(0,7):
if i==5:
continue
else:
globals()['img%s'%i]= ImageTk.PhotoImage(Image.open(f"data/img/hand_numbers/img{i}.png"))
but0=Button(button_frame,text=i,image=img0,borderwidth=2,command= lambda : add_runs(0) )
but1=Button(button_frame,text=i,image=img1,borderwidth=2,command= lambda : add_runs(1) )
but2=Button(button_frame,text=i,image=img2,borderwidth=2,command= lambda : add_runs(2) )
but3=Button(button_frame,text=i,image=img3,borderwidth=2,command= lambda : add_runs(3) )
but4=Button(button_frame,text=i,image=img4,borderwidth=2,command= lambda : add_runs(4) )
but6=Button(button_frame,text=i,image=img6,borderwidth=2,command= lambda : add_runs(6) )
but0.grid(row=0,column=0,padx=(25,6),pady=5)
but1.grid(row=0,column=1,padx=(4,0),pady=5)
but2.grid(row=0,column=2,padx=(10,0),pady=5)
but3.grid(row=1,column=0,padx=(25,6),pady=5)
but4.grid(row=1,column=1,padx=(4,0),pady=5)
but6.grid(row=1,column=2,padx=(10,0),pady=5)
scrore_frame=Frame(root_frame2,bg=bg_color)
scrore_frame.pack(pady=10)
score_name_label=ttk.Label(scrore_frame,text='Your Score : ',font='Helvetica 20 bold')
score_name_label.grid(row=2,column=0,sticky=W,pady=(3,0),padx=(8,0))
score=ttk.Label(scrore_frame,text=f'{Total_runs}/{player_wicket}',font='Helvetica 20 bold')
score.grid(row=2,column=1,sticky=W,pady=(3,0))
comp_score_name_label=ttk.Label(scrore_frame,text='Comp Score : ',font='Helvetica 20 bold')
comp_score_name_label.grid(row=3,column=0,sticky=W,pady=(3,0),padx=(8,0))
comp_score=ttk.Label(scrore_frame,text=f'{comp_Total_runs}/{comp_wicket}',font='Helvetica 20 bold')
comp_score.grid(row=3,column=1,sticky=W,pady=(3,0))
over_count=ttk.Label(scrore_frame,text='Over : 3',font='Helvetica 13 bold')
over_count.grid(row=4,column=0,sticky=W,padx=9)
balls_remain=ttk.Label(scrore_frame,text='Balls : 0',font='Helvetica 13 bold')
balls_remain.grid(row=4,column=1,sticky=W,padx=0)
target_label=ttk.Label(scrore_frame,text=f'Target : {target}',font='Helvetica 13 bold')
target_label.grid(row=5,column=0,sticky=W,padx=8)
who_bat=ttk.Label(scrore_frame,text='Batting : -',font='Helvetica 10 ')
who_ball=ttk.Label(scrore_frame,text='Bowling : -' ,font='Helvetica 10 ')
who_bat.grid(row=6,column=0,sticky=W,padx=(10,0))
who_ball.grid(row=7,column=0,sticky=W,padx=(10,0))
# --------------------------------------------------------------------MENU----------------------------------------------------------------------
mainmenu = Menu(root, activebackground=label_bg_color)
root.config(menu=mainmenu)
m1 = Menu(mainmenu, tearoff=0, bg=bg_color, fg=fg_color,activebackground=label_bg_color, activeforeground=label_fg_color)
m1.add_command(label='New Game',command=newgame)
m1.add_command(label='Save Game',command=save_game)
m1.add_separator()
m1.add_command(label='Exit',command=quitapp)
mainmenu.add_cascade(label='Menu', menu=m1)
m2 = Menu(mainmenu, tearoff=0, bg=bg_color, fg=fg_color,activebackground=label_bg_color, activeforeground=label_fg_color)
m2_sub = Menu(m2,tearoff=0, bg=bg_color, fg=fg_color,activebackground=label_bg_color, activeforeground=label_fg_color)
m2_sub.add_command(label='Dark', command=temp_dark)
m2_sub.add_command(label='Light', command=temp_light)
m2.add_cascade(label='Theme',menu=m2_sub)
m2.add_command(label='Help', command=lambda: msg.showinfo('Help', 'We will help you soon'))
m2.add_command(label='More About', command=lambda: msg.showinfo('About', 'This GUI is created by AKG007\n Made in India'))
mainmenu.add_cascade(label='Settings', menu=m2)
root.mainloop()
f1= open('data/files/app_data.p','wb')
pickle.dump(theme,f1)
f1.close() | 36.803191 | 158 | 0.620321 |
6c24371f8f3543e387962e5276370824b85090e5 | 108 | py | Python | model/__init__.py | SunnyMarkLiu/Kaggle_Invasive_Species_Monitoring | a103d4d4811c7b359f07e34aeb69fa7637adae35 | [
"MIT"
] | null | null | null | model/__init__.py | SunnyMarkLiu/Kaggle_Invasive_Species_Monitoring | a103d4d4811c7b359f07e34aeb69fa7637adae35 | [
"MIT"
] | 1 | 2017-08-29T13:08:18.000Z | 2017-08-29T13:08:18.000Z | model/__init__.py | SunnyMarkLiu/Kaggle_Invasive_Species_Monitoring | a103d4d4811c7b359f07e34aeb69fa7637adae35 | [
"MIT"
] | null | null | null | #!/usr/local/miniconda2/bin/python
# _*_ coding: utf-8 _*_
"""
@author: MarkLiu
@time : 17-6-19 8:44
""" | 15.428571 | 34 | 0.62963 |
6c26670f1bac191aee07007494c6fa726372a36b | 11,451 | py | Python | Game/game_functions.py | Gabriel-limadev/Alien-Invasion | 1b8b1ad7dfe9cf5cd99ff0595eedf3eb78953eaf | [
"MIT"
] | 3 | 2021-09-11T16:35:20.000Z | 2021-09-25T02:42:04.000Z | Game/game_functions.py | Gabriel-limadev/Invasao-Alienigena | 1b8b1ad7dfe9cf5cd99ff0595eedf3eb78953eaf | [
"MIT"
] | null | null | null | Game/game_functions.py | Gabriel-limadev/Invasao-Alienigena | 1b8b1ad7dfe9cf5cd99ff0595eedf3eb78953eaf | [
"MIT"
] | 1 | 2022-02-28T01:06:15.000Z | 2022-02-28T01:06:15.000Z | import sys #Fornece funes e variveis para manipular partes do ambiente de tempo de execuo do Python
from time import sleep
import pygame
from settings import Settings
from game_stats import GameStats
from bullet import Bullet
from alien import Alien
def check_keydown_events(event, ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Responde a pressionamentos de tecla"""
if event.key == pygame.K_RIGHT or event.key == pygame.K_d:
# Move a nave para a direita
ship.moving_right = True
elif event.key == pygame.K_LEFT or event.key == pygame.K_a:
# Move a nave para a esquerda
ship.moving_left = True
elif event.key == pygame.K_UP or event.key == pygame.K_w:
# Move a nave para cima
ship.moving_top = True
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
# Move a nave para baixo
ship.moving_bottom = True
elif event.key == pygame.K_SPACE:
# Cria um novo projetil e adiciona ao grupo de projeteis
fire_bullet(ai_settings, screen, ship, bullets)
elif event.key == pygame.K_ESCAPE:
# O jogo finaliza quando o jogador tecla esc
sys.exit()
elif event.key == pygame.K_p and (stats.game_start or stats.game_over):
start_game(ai_settings, screen, stats, sb, ship, aliens, bullets)
ai_settings.initialize_dynamic_settings()
def check_keyup_events(event, ship):
"""Respostas a solturas de tecla"""
if event.key == pygame.K_RIGHT or event.key == pygame.K_d:
ship.moving_right = False
elif event.key == pygame.K_LEFT or event.key == pygame.K_a:
ship.moving_left = False
elif event.key == pygame.K_UP or event.key == pygame.K_w:
ship.moving_top = False
elif event.key == pygame.K_DOWN or event.key == pygame.K_s:
ship.moving_bottom = False
def fire_bullet(ai_settings, screen, ship, bullets):
"""Dispara um projetil se o limite ainda no for alcanado"""
# Cria um novo projetil e o adiciona no grupo dos projeteis
if len(bullets) < ai_settings.bullets_allowed:
new_bullet = Bullet(ai_settings, screen, ship)
bullets.add(new_bullet)
# Tocando o som de tiro laser
shoot_sound = pygame.mixer.Sound('Sounds/shoot.wav')
pygame.mixer.Sound.set_volume(shoot_sound, 0.1)
shoot_sound.play()
def check_events(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets):
"""Responde eventos de teclado e de mouse"""
for event in pygame.event.get():
# Jogador apertar o x de sair
if event.type == pygame.QUIT:
sys.exit()
# Jogador apertar a seta direita
elif event.type == pygame.KEYDOWN:
check_keydown_events(event, ai_settings, screen, stats, sb, ship, aliens, bullets)
elif event.type == pygame.KEYUP:
check_keyup_events(event, ship)
# elif event.type == pygame.MOUSEBUTTONDOWN:
# mouse_x, mouse_y = pygame.mouse.get_pos()
# check_play_button(ai_settings, screen, stats, sb, play_button, ship, aliens, bullets, mouse_x, mouse_y)
def start_game(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Inicia um novo jogo quando o jogador clicar em play ou quando o jogador teclar p"""
# Oculta o cursor do mouse
# pygame.mouse.set_visible(False)
# Reinicia os dados estatisticcos e apresenta a tela do jogo
stats.reset_stats()
stats.game_active = True
stats.game_start = False
# Reinicia as imagens do painel de pontuao
sb.prep_score()
sb.prep_high_score()
sb.prep_level()
sb.prep_ships()
# Esvazia a lista de alienigenas e de projteis
aliens.empty()
bullets.empty()
# Cria uma nova frota e centraliza a espaonave
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# def check_play_button(ai_settings, screen, stats, play_button, ship, aliens, bullets, mouse_x, mouse_y):
# """Inicia um novo jogo quando o jogador clicar em play"""
# button_clicked = play_button.rect.collidepoint(mouse_x, mouse_y)
# if button_clicked and not stats.game_active:
# start_game(ai_settings, screen, stats, ship, aliens, bullets)
def update_start(screen, background_start):
"""Inicia o jogo com uma tela amigavl"""
screen.blit(background_start, (0, 0))
# Atualiza a tela
pygame.display.flip()
def update_menu(screen, background_menu, play_button):
"""Apresenta o menu na tela com o boto de play"""
# Tempo de transio de tela
sleep(3)
screen.blit(background_menu, (0, 0))
play_button.draw_button()
# Atualiza a tela
pygame.display.flip()
def update_game_over(screen, background_game_over, play_button):
"""Apresenta a tela de game-over com o boto de play"""
# Tempo de transio de tela
sleep(3)
screen.blit(background_game_over, (0, 0))
play_button.draw_button()
# Atualiza a tela
pygame.display.flip()
def update_screen(ai_settings, screen, stats, sb, ship, aliens, bullets, play_button, background):
"""Atualiza as imagens na tela e alterna para a nova tela"""
screen.fill(ai_settings.bg_color)
screen.blit(background, (0, 0))
# Desenha a informao sobre pontuao
sb.show_score()
# Redesenha a tela a cada passagem pelo lao
for bullet in bullets.sprites():
bullet.draw_bullet()
ship.blitme()
aliens.draw(screen)
# Atualiza a tela
pygame.display.flip()
def update_bullets(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Atualiza a posio dos projteis e se livra dos projteis antigos"""
# Atualiza as posies dos projteis
bullets.update()
# Livra dos projeteis que ultrapassam a tela
for bullet in bullets.copy():
if bullet.rect.bottom <= 0:
bullets.remove(bullet)
check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets)
def check_bullet_alien_collisions(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Responde a colises entre projteis e alienigenas."""
# Remove qualquer projtil e alienigena que tenham colidido
collisions = pygame.sprite.groupcollide(bullets, aliens, True, True)
if collisions:
for aliens in collisions.values():
stats.score += ai_settings.alien_points * len(aliens)
sb.prep_score()
check_high_score(stats, sb)
if len(aliens) == 0:
# Destroi os projteis existentes, aumenta a velocidade do jogo, cria uma nova frota e aumenta o nivel.
bullets.empty()
ai_settings.increase_speed()
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# Toca o som de troca de fase
# Toca o som de exploso da nave
next_level = pygame.mixer.Sound('Sounds/ufo_lowpitch.wav')
pygame.mixer.Sound.set_volume(next_level, 0.3)
next_level.play()
# Aumenta o nivel
stats.level += 1
sb.prep_level()
def get_number_aliens_x(ai_settings, alien_width):
"""Determina o nmero de aliens que cabem em uma linha"""
available_space_x = ai_settings.screen_width - 2 * alien_width
number_aliens_x = int(available_space_x / (2 * alien_width))
return number_aliens_x
def get_number_rows(ai_settings, ship_height, alien_height):
"""Determina o nmero de linhas com aliens que cabem na tela"""
available_space_y = (ai_settings.screen_height - (3 * alien_height) - ship_height)
number_rows = int(available_space_y / (2 * alien_height))
return number_rows
def create_aliens(ai_settings, screen, aliens, alien_number, row_number):
"""Cria um alien e o posiciona na tela"""
alien = Alien(ai_settings, screen)
position_aliens(alien, aliens, alien_number, row_number)
def create_fleet(ai_settings, screen, ship, aliens):
"""Cria uma frota completa de alienigenas"""
# Cria um alien e calcula o nmero de aliens em uma linha
# O espaamento entre os aliens igual largura de um alienigena
alien = Alien(ai_settings, screen)
number_aliens_x = get_number_aliens_x(ai_settings, alien.rect.width)
number_rows = get_number_rows(ai_settings, ship.rect.height, alien.rect.height)
#Cria a frota de alienigenas
for row_number in range(number_rows):
for alien_number in range(number_aliens_x):
create_aliens(ai_settings, screen, aliens, alien_number, row_number)
def check_fleet_edges(ai_settings, aliens):
"""Responde apropriadamente se algum aliengena alcanou uma borda"""
for alien in aliens.sprites():
if alien.check_edges():
change_fleet_direction(ai_settings, aliens)
break
def change_fleet_direction(ai_settings, aliens):
"""Faz toda a frota descer e muda a sua direo"""
for alien in aliens.sprites():
alien.rect.y += ai_settings.fleet_drop_speed
ai_settings.fleet_direction *= -1
def update_aliens(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Verifica se a frota est em uma das bordas
e ento atualiza as posies de todos os alienigenas da frota"""
check_fleet_edges(ai_settings, aliens)
aliens.update()
# Verifica se houve colises entre alienigenas e a espaonave
if pygame.sprite.spritecollideany(ship, aliens):
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
# Verifica se h algum alien que atingiu a parte inferior da tela
check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets)
def ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Responde ao fato de a espaonave ter sido atingida por um alienigena"""
if stats.ships_left > 1:
# Decrementa ships_left
stats.ships_left -= 1
# Atualiza o painel de pontuaes
sb.prep_ships()
stats.score -= ai_settings.alien_points * (36 - len(aliens))
sb.prep_score()
# Esvazia a lista de aliens e de projteis
aliens.empty()
bullets.empty()
# Cria uma nova frota e centraliza a espaonave
create_fleet(ai_settings, screen, ship, aliens)
ship.center_ship()
# Toca o som de exploso da nave
explotion_sound = pygame.mixer.Sound('Sounds/explosion.wav')
pygame.mixer.Sound.set_volume(explotion_sound, 0.1)
explotion_sound.play()
# Faz uma pausa
sleep(0.5)
else:
stats.game_active = False
stats.game_over = True
def check_aliens_bottom(ai_settings, screen, stats, sb, ship, aliens, bullets):
"""Verifica se algum aliengena alcanou a parte inferior da tela"""
screen_rect = screen.get_rect()
for alien in aliens.sprites():
if alien.rect.bottom >= screen_rect.bottom:
# Trata esse caso do mesmo modo que feito quando a espaonave atingida
ship_hit(ai_settings, screen, stats, sb, ship, aliens, bullets)
break
def check_high_score(stats, sb):
"""Verifica se h uma nova pontuao mxima"""
if stats.score > stats.high_score:
stats.high_score = stats.score
sb.prep_high_score()
| 36.352381 | 117 | 0.687189 |
6c27c7aa14b6a3a742020fc655ba28804b70f883 | 98 | py | Python | rover-stub/accelsensor.py | GamesCreatorsClub/GCC-Rover | 25a69f62a1bb01fc421924ec39f180f50d6a640b | [
"MIT"
] | 3 | 2018-02-13T21:39:55.000Z | 2018-04-26T18:17:39.000Z | rover-stub/accelsensor.py | GamesCreatorsClub/GCC-Rover | 25a69f62a1bb01fc421924ec39f180f50d6a640b | [
"MIT"
] | null | null | null | rover-stub/accelsensor.py | GamesCreatorsClub/GCC-Rover | 25a69f62a1bb01fc421924ec39f180f50d6a640b | [
"MIT"
] | null | null | null |
#
# Copyright 2016-2017 Games Creators Club
#
# MIT License
#
from sonarsensor_service import *
| 10.888889 | 41 | 0.744898 |
6c2a16cd533af1320b7486ce971ab489987fbb0b | 806 | py | Python | CelestialMechanics/mu.py | Camiloasc1/AstronomyUNAL | 0d533c1737e5328605c70f614024e1759e8d0962 | [
"MIT"
] | null | null | null | CelestialMechanics/mu.py | Camiloasc1/AstronomyUNAL | 0d533c1737e5328605c70f614024e1759e8d0962 | [
"MIT"
] | null | null | null | CelestialMechanics/mu.py | Camiloasc1/AstronomyUNAL | 0d533c1737e5328605c70f614024e1759e8d0962 | [
"MIT"
] | null | null | null | import numpy as np
from CelestialMechanics.kepler.constants import K
def mu_sun(m2_over_m1: float) -> float:
"""
mu = k * sqrt(1 + m2/m1)
:param m2_over_m1:
:type m2_over_m1:
:return: mu
:rtype: float
"""
mu = K * np.sqrt(1. + m2_over_m1)
return mu * mu
def mu_na(n: float, a: float) -> float:
"""
mu = n^2 / a^3
:param n: mean motion in degrees
:type n: float
:param a: semi-major axis
:type a: float
:return: mu
:rtype: float
"""
return n * n * a * a * a
def mu_gm1m2(m1: float, m2: float) -> float:
"""
mu = G (m1 + m2)
:param m1: mass 1
:type m1: float
:param m2: mass 2
:type m2: float
:return: mu
:rtype: float
"""
from astropy.constants import G
return G * (m1 + m2)
| 16.791667 | 49 | 0.545906 |
6c2ea613a50a1e1e9624048d804bb8ed4e0017dd | 3,276 | py | Python | herbstluftwm/hl_panel_content.py | FAUSheppy/config | 998e13f71a4b48c60f645470631cf937586be2fd | [
"Unlicense"
] | 1 | 2019-05-07T13:03:10.000Z | 2019-05-07T13:03:10.000Z | herbstluftwm/hl_panel_content.py | FAUSheppy/config | 998e13f71a4b48c60f645470631cf937586be2fd | [
"Unlicense"
] | null | null | null | herbstluftwm/hl_panel_content.py | FAUSheppy/config | 998e13f71a4b48c60f645470631cf937586be2fd | [
"Unlicense"
] | null | null | null | #!/usr/bin/python3
import hl_utils
from hl_constants import *
import string
import re
from datetime import datetime
if __name__ == "__main__":
print(logins(),ip(),vpn(),guthaben(),battery(),date(),sep='',end='')
| 31.2 | 100 | 0.501832 |
6c2eba18adaa6d56cede8b191f75d6ce31f0cf4f | 1,368 | py | Python | views.py | vigilcommunity/mega-project | 09a44c76170c71ee4c1d206fb0942b72c65ff45f | [
"MIT"
] | null | null | null | views.py | vigilcommunity/mega-project | 09a44c76170c71ee4c1d206fb0942b72c65ff45f | [
"MIT"
] | null | null | null | views.py | vigilcommunity/mega-project | 09a44c76170c71ee4c1d206fb0942b72c65ff45f | [
"MIT"
] | null | null | null | import datetime
from django.contrib import messages
from django.contrib.auth import authenticate, login, logout
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.urls import reverse
from vigil_ctf_app.EmailBackEnd import EmailBackEnd
#Authentication views ONLY ONLY
| 29.106383 | 120 | 0.710526 |
6c2f5071a275540f672417776634a7f3cf12f94d | 19,223 | py | Python | graph-measures/features_algorithms/vertices/motifs.py | Unknown-Data/QGCN | e074ada31c13b6de6eabba2b2ebce90e88fdfdbf | [
"MIT"
] | 3 | 2021-04-21T16:06:51.000Z | 2022-03-31T12:09:01.000Z | graph-measures/features_algorithms/vertices/motifs.py | Unknown-Data/QGCN | e074ada31c13b6de6eabba2b2ebce90e88fdfdbf | [
"MIT"
] | 1 | 2021-02-04T07:48:16.000Z | 2021-02-24T23:01:41.000Z | graph-measures/features_algorithms/vertices/motifs.py | Unknown-Data/QGCN | e074ada31c13b6de6eabba2b2ebce90e88fdfdbf | [
"MIT"
] | null | null | null | import os
import pickle
from functools import partial
from itertools import permutations, combinations
import networkx as nx
import numpy as np
from bitstring import BitArray
from collections import Counter
try:
from graph_measures.features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta
except ModuleNotFoundError as e:
from features_infra.feature_calculators import NodeFeatureCalculator, FeatureMeta
CUR_PATH = os.path.realpath(__file__)
BASE_PATH = os.path.dirname(os.path.dirname(CUR_PATH))
VERBOSE = False
DEBUG =False
SAVE_COUNTED_MOTIFS = False
interesting_groups = [
sorted([0, 1, 8, 27])
]
# consider ignoring node's data
def nth_nodes_motif(motif_level):
return partial(MotifsNodeCalculator, level=motif_level)
def nth_edges_motif(motif_level):
return partial(MotifsNodeCalculator, level=motif_level)
feature_node_entry = {
"motif3": FeatureMeta(nth_nodes_motif(3), {"m3"}),
"motif4": FeatureMeta(nth_nodes_motif(4), {"m4"}),
}
feature_edge_entry = {
"motif3_edge": FeatureMeta(nth_edges_motif(3), {"me3"}),
"motif4_edge": FeatureMeta(nth_edges_motif(4), {"me4"}),
}
if __name__ == "__main__":
from measure_tests.specific_feature_test import test_specific_feature
# Previous version contained a bug while counting twice sub-groups with double edges
# test_specific_feature(nth_edges_motif(3), is_max_connected=True)
test_specific_feature(nth_edges_motif(4), is_max_connected=True)
# def _calculate_motif_dictionaries(self):
# motifs_edges_dict = {}
# motifs_vertices_dict = {}
# motif_edges = list(permutations(range(self._level), 2))
#
# motif_file = pandas.read_csv(self._motif_path(), delimiter="\t")
# if not self._gnx.is_directed():
# motifs_vertices_dict = {BitArray(length=3, int=int(y)).bin: int(x) for i, (x, y) in motif_file.iterrows()}
# else:
# num_edges = len(motif_edges)
# for _, (x, y) in motif_file.iterrows():
# bin_repr = BitArray(length=num_edges, int=int(y))
# motifs_vertices_dict[bin_repr.bin] = int(x)
# motifs_edges_dict[bin_repr.bin] = [edge_type for bit, edge_type in zip(bin_repr, motif_edges) if bit]
#
# return {'v': motifs_vertices_dict, 'e': motifs_edges_dict}
###########################################################################################
# def _calculate(self, include=None):
# all_motifs = set(self._node_variations.values())
# undirected_gnx = self._gnx.to_undirected()
# for node in self._order_by_degree():
# history = set()
# self._features[node] = {motif_number: 0 for motif_number in all_motifs}
# neighbors_gnx = self._gnx.subgraph(self._get_neighborhood(node, self._level, gnx=undirected_gnx))
# for group in self._get_subgroups(node, self._level, gnx=neighbors_gnx):
# group = sorted(group)
# if group in history:
# continue
# history.add(group)
# motif_number = self._get_motif_number(group)
# self._features[node][motif_number] += 1
# self._gnx.remove_node(node)
#
# def _subgroups(self, node, level, gnx=None):
# if gnx is None:
# gnx = self._gnx
# if level == 1:
# return node
#
# def _calculate1(self):
# for node in self._order_by_degree():
# history = {}
# for sub_group in self._subgroups(node, self._level):
# if sub_group in history:
# continue
#
# # this might be more efficient than dijkstra (with cutoff) - a simple BFS
# def _get_neighborhood(self, node, dist, gnx=None):
# dist -= 1
# if gnx is None:
# gnx = self._gnx
# neighborhood = set()
# queue = [(node, 0)]
# while queue:
# cur_node, node_dist = queue.pop(0)
# neighborhood.add(cur_node)
# neighbors = set(nx.all_neighbors(gnx, cur_node)).difference(neighborhood)
# if node_dist >= dist - 1:
# neighborhood.update(neighbors)
# else: # node_dist is lower than (dist - 1)
# queue.extend((n, node_dist + 1) for n in neighbors)
# return neighborhood
#
# # seems more simple - but it's more costly
# def _get_neighborhood_dijkstra(self, node, dist, gnx=None):
# if gnx is None:
# gnx = self._gnx
# return set(nx.single_source_dijkstra_path_length(gnx, node, cutoff=dist))
#
# def _calculate2(self):
# self._undirected_gnx = self._gnx.to_undirected()
# for node in self._order_by_degree(self._undirected_gnx):
# # calculating the nth neighborhood of the node - is working on the neighborhood graph more efficient?
# neighbors_gnx = self._gnx.subgraph(self._get_neighborhood(node, self._level))
# history = {}
# for sub_group in self._subgroups(node, self._level, gnx=neighbors_gnx):
# if sub_group in history:
# continue
# self._gnx.remove_node(node)
# TODO: consider removing
# def _initialize_motif_hist(self):
# length = max(self._node_variations.values()) + 1
# return {n: [0] * length for n in self._gnx}
#
# def _initialize_motif_hist(self):
# node_hist = super(MotifsEdgeCalculator, self)._initialize_motif_hist()
#
# length = max(self._edge_variations.values()) + 1
# edge_hist = {e: [0] * length for e in self._gnx.edges()}
# return {'v': node_hist, 'e': edge_hist}
| 41.78913 | 121 | 0.572179 |
6c313c1861eaf709490f1bb0d7760b28295d8922 | 2,885 | py | Python | src/PostProcessing.py | PedroCardouzo/XML_Analyzer | aa4d9069f65aed927d7ecdc59ad36f823abc4c0c | [
"MIT"
] | null | null | null | src/PostProcessing.py | PedroCardouzo/XML_Analyzer | aa4d9069f65aed927d7ecdc59ad36f823abc4c0c | [
"MIT"
] | null | null | null | src/PostProcessing.py | PedroCardouzo/XML_Analyzer | aa4d9069f65aed927d7ecdc59ad36f823abc4c0c | [
"MIT"
] | null | null | null | from src import XMLAnalyzerException
import lxml.etree as ET
from src.Structures import *
from src import XMLFilter
from src import XMLUtil
import constants
import re
from src.xml_decoder import html_entitize
# split at every space, except if it is inside "$param('<here>') statement
# todo: move to tests file
# string_splitter("Employee EmployeeIDExternal == $param('please provide the EmployeeIDExternal you want to keep: ')")
# string_splitter("Employee $param('test param with spaces') == $param('please want to keep: ')")
# both passed
| 32.784091 | 121 | 0.609359 |
6c31ccf0bc17c144c7bcad4490fb8229ffccbad2 | 4,954 | py | Python | sigpy/mri/rf/sim.py | jonbmartin/sigpy-rf-staging | 1be409a1ce0799574f1a979044b02fe21a19bf5d | [
"BSD-3-Clause"
] | null | null | null | sigpy/mri/rf/sim.py | jonbmartin/sigpy-rf-staging | 1be409a1ce0799574f1a979044b02fe21a19bf5d | [
"BSD-3-Clause"
] | null | null | null | sigpy/mri/rf/sim.py | jonbmartin/sigpy-rf-staging | 1be409a1ce0799574f1a979044b02fe21a19bf5d | [
"BSD-3-Clause"
] | null | null | null | """RF Pulse Simulation Functions.
"""
from sigpy import backend
__all__ = ['abrm', 'abrm_nd', 'abrm_hp']
def abrm(rf, x, balanced=False):
r"""1D RF pulse simulation, with simultaneous RF + gradient rotations.
Args:
rf (array): rf waveform input.
x (array): spatial locations.
balanced (bool): toggles application of rewinder.
Returns:
2-element tuple containing
- **a** (*array*): SLR alpha parameter.
- **b** (*array*): SLR beta parameter.
References:
Pauly, J., Le Roux, Patrick., Nishimura, D., and Macovski, A.(1991).
'Parameter Relations for the Shinnar-LeRoux Selective Excitation
Pulse Design Algorithm'.
IEEE Transactions on Medical Imaging, Vol 10, No 1, 53-65.
"""
device = backend.get_device(rf)
xp = device.xp
with device:
eps = 1e-16
g = xp.ones(xp.size(rf)) * 2 * xp.pi / xp.size(rf)
a = xp.ones(xp.size(x), dtype=complex)
b = xp.zeros(xp.size(x), dtype=complex)
for mm in range(xp.size(rf)):
om = x * g[mm]
phi = xp.sqrt(xp.abs(rf[mm]) ** 2 + om ** 2) + eps
n = xp.column_stack((xp.real(rf[mm]) / phi,
xp.imag(rf[mm]) / phi,
om / phi))
av = xp.cos(phi / 2) - 1j * n[:, 2] * xp.sin(phi / 2)
bv = -1j * (n[:, 0] + 1j * n[:, 1]) * xp.sin(phi / 2)
at = av * a - xp.conj(bv) * b
bt = bv * a + xp.conj(av) * b
a = at
b = bt
if balanced: # apply a rewinder
g = -2 * xp.pi / 2
om = x * g
phi = xp.abs(om) + eps
nz = om / phi
av = xp.cos(phi / 2) - 1j * nz * xp.sin(phi / 2)
a = av * a
b = xp.conj(av) * b
return a, b
def abrm_nd(rf, x, g):
r"""N-dim RF pulse simulation
Assumes that x has inverse spatial units of g, and g has gamma*dt applied.
Assumes dimensions x = [...,Ndim], g = [Ndim,Nt].
Args:
rf (array): rf waveform input.
x (array): spatial locations.
g (array): gradient array.
Returns:
2-element tuple containing
- **a** (*array*): SLR alpha parameter.
- **b** (*array*): SLR beta parameter.
References:
Pauly, J., Le Roux, Patrick., Nishimura, D., and Macovski, A.(1991).
'Parameter Relations for the Shinnar-LeRoux Selective Excitation
Pulse Design Algorithm'.
IEEE Transactions on Medical Imaging, Vol 10, No 1, 53-65.
"""
device = backend.get_device(rf)
xp = device.xp
with device:
eps = 1e-16
a = xp.ones(xp.shape(x)[0], dtype=complex)
b = xp.zeros(xp.shape(x)[0], dtype=complex)
for mm in range(xp.size(rf)):
om = x @ g[mm, :]
phi = xp.sqrt(xp.abs(rf[mm]) ** 2 + om ** 2)
n = xp.column_stack((xp.real(rf[mm]) / (phi + eps),
xp.imag(rf[mm]) / (phi + eps),
om / (phi + eps)))
av = xp.cos(phi / 2) - 1j * n[:, 2] * xp.sin(phi / 2)
bv = -1j * (n[:, 0] + 1j * n[:, 1]) * xp.sin(phi / 2)
at = av * a - xp.conj(bv) * b
bt = bv * a + xp.conj(av) * b
a = at
b = bt
return a, b
def abrm_hp(rf, gamgdt, xx, dom0dt=0):
r"""1D RF pulse simulation, with non-simultaneous RF + gradient rotations.
Args:
rf (array): rf pulse samples in radians.
gamdt (array): gradient samples in radians/(units of xx).
xx (array): spatial locations.
dom0dt (float): off-resonance phase in radians.
Returns:
2-element tuple containing
- **a** (*array*): SLR alpha parameter.
- **b** (*array*): SLR beta parameter.
References:
Pauly, J., Le Roux, Patrick., Nishimura, D., and Macovski, A.(1991).
'Parameter Relations for the Shinnar-LeRoux Selective Excitation
Pulse Design Algorithm'.
IEEE Transactions on Medical Imaging, Vol 10, No 1, 53-65.
"""
device = backend.get_device(rf)
xp = device.xp
with device:
Ns = xp.shape(xx)
Ns = Ns[0] # Ns: # of spatial locs
Nt = xp.shape(gamgdt)
Nt = Nt[0] # Nt: # time points
a = xp.ones((Ns,))
b = xp.zeros((Ns,))
for ii in xp.arange(Nt):
# apply phase accural
z = xp.exp(-1j * (xx * gamgdt[ii, ] + dom0dt))
b = b * z
# apply rf
C = xp.cos(xp.abs(rf[ii]) / 2)
S = 1j * xp.exp(1j * xp.angle(rf[ii])) * xp.sin(xp.abs(rf[ii]) / 2)
at = a * C - b * xp.conj(S)
bt = a * S + b * C
a = at
b = bt
z = xp.exp(1j / 2 * (xx * xp.sum(gamgdt, axis=0) + Nt * dom0dt))
a = a * z
b = b * z
return a, b
| 30.207317 | 79 | 0.489705 |
6c31d41587961f18210049838ec530657f3a5618 | 821 | py | Python | lint.py | fmacpro/apache-virtualhost-configuration-examples | 4c341ccb96c2e17cb9063bf1cae342ad4ee72e26 | [
"MIT"
] | null | null | null | lint.py | fmacpro/apache-virtualhost-configuration-examples | 4c341ccb96c2e17cb9063bf1cae342ad4ee72e26 | [
"MIT"
] | null | null | null | lint.py | fmacpro/apache-virtualhost-configuration-examples | 4c341ccb96c2e17cb9063bf1cae342ad4ee72e26 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# apachelint - simple tool to cleanup apache conf files
# USAGE: apachelint [conffile]
# -*-Python-*-
import sys
import re
filename = sys.argv[1]
indentlevel = 0
indentstep = 4
prevlineblank = False
with open(filename) as f:
for line in f:
# strip leading & trailing space / line ending
line = re.sub('\s+$', '', line)
line = re.sub('^\s+', '', line)
# compress blank lines
if line == "":
if prevlineblank:
next
else:
prevlineblank = True
else:
prevlineblank = False
if re.search('</', line):
indentlevel -= 1
indent = ' ' * indentlevel * indentstep
print indent + line
if re.search('<(?!/)', line):
indentlevel += 1 | 22.805556 | 55 | 0.528624 |
6c3476c6da1a1f1dd98c6128ba3f0c07c7db39ea | 931 | py | Python | spefit/common/basic.py | watsonjj/spefit | 7a3931c5a59e43a73e2e48fe3cd352e20a706460 | [
"BSD-3-Clause"
] | null | null | null | spefit/common/basic.py | watsonjj/spefit | 7a3931c5a59e43a73e2e48fe3cd352e20a706460 | [
"BSD-3-Clause"
] | null | null | null | spefit/common/basic.py | watsonjj/spefit | 7a3931c5a59e43a73e2e48fe3cd352e20a706460 | [
"BSD-3-Clause"
] | null | null | null | """Vectorized math formulae"""
from numba import vectorize, int64, float64
from math import lgamma, exp, isnan, log
__all__ = ["binom", "xlogy"]
| 29.09375 | 81 | 0.657358 |
6c35a72377a5c784fe27610ba9ce572e4beeb277 | 1,171 | py | Python | main.py | Yash7689/copy-data-from-1-txt-to-another | 1ab88c89209088b04c2105c5db4342029a079219 | [
"Apache-2.0"
] | null | null | null | main.py | Yash7689/copy-data-from-1-txt-to-another | 1ab88c89209088b04c2105c5db4342029a079219 | [
"Apache-2.0"
] | null | null | null | main.py | Yash7689/copy-data-from-1-txt-to-another | 1ab88c89209088b04c2105c5db4342029a079219 | [
"Apache-2.0"
] | null | null | null | # @Copyright [2021] [Yash Bajaj]
import fileinput as fi
# This module replaces the word <|SPACE|> with a new line (code line 18)
# This is a input function whatever you will write that will come in input.txt
# This is a function to copy data from input.txt and paste it in copyied.txt
# This function replaces <|SPACE|> with new line this will also create one backup file with extention .bak
if __name__ == '__main__':
writer()
copy()
editer()
# This will run the code
| 37.774194 | 123 | 0.681469 |
6c36540f75ff0aa4e3d1fa481631b799e5a9132c | 1,041 | py | Python | portfolio_pj/portfolio_app/views.py | duynb92/portfolio_site | f6898e8d1c3a67aa8dc6eafc7e4804e81dc46063 | [
"MIT"
] | null | null | null | portfolio_pj/portfolio_app/views.py | duynb92/portfolio_site | f6898e8d1c3a67aa8dc6eafc7e4804e81dc46063 | [
"MIT"
] | null | null | null | portfolio_pj/portfolio_app/views.py | duynb92/portfolio_site | f6898e8d1c3a67aa8dc6eafc7e4804e81dc46063 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from models import *
# Create your views here. | 33.580645 | 80 | 0.727185 |
6c3666e9b94187f8c2b912f96ab0492447c6ab94 | 16,981 | py | Python | torchfurnace/engine.py | tianyu-su/torchfurnace | 2f4a9a0655a8d3c3e231c86611085f834e03c2f8 | [
"MIT"
] | 8 | 2020-03-20T13:49:30.000Z | 2021-12-04T07:41:27.000Z | torchfurnace/engine.py | tianyu-su/torchfurnace | 2f4a9a0655a8d3c3e231c86611085f834e03c2f8 | [
"MIT"
] | null | null | null | torchfurnace/engine.py | tianyu-su/torchfurnace | 2f4a9a0655a8d3c3e231c86611085f834e03c2f8 | [
"MIT"
] | 1 | 2020-04-01T11:01:09.000Z | 2020-04-01T11:01:09.000Z | # -*- coding: utf-8 -*-
# Date: 2020/3/17 12:16
"""
an engine for deep learning task
"""
__author__ = 'tianyu'
import abc
import random
import time
import warnings
import numpy as np
import torch.backends.cudnn
import torch.nn.functional as F
import torch.utils.data
from torch.optim.lr_scheduler import StepLR
from .options import Parser
from .tracer import Tracer
from .utils import tracer_component as tc
from .utils.function import *
def _before_evaluate(self, model):
"""
load checkpoint
"""
for pth, m in zip(self._args.evaluate, [model] if not isinstance(model, list) else model):
if os.path.isfile(pth):
log("=> loading checkpoint '{}'".format(pth))
checkpoint = torch.load(pth, map_location='cpu')
m.load_state_dict(checkpoint['state_dict'])
log("=> loaded checkpoint '{}' (epoch {} Acc@1 {})"
.format(pth, checkpoint['epoch'], checkpoint['best_acc1']))
else:
assert False, "=> no checkpoint found at '{}'".format(pth)
def _after_evaluate(self):
"""
execute something after evaluation
"""
pass
def _on_end_epoch(self, model, optimizer, is_best):
"""save more than one model and optimizer, for example GAN"""
postfix = f'_{self._args.extension}'
if self._args.extension == '': postfix = ''
for m, optim in zip([model] if not isinstance(model, list) else model,
[optimizer] if not isinstance(optimizer, list) else optimizer):
self._tracer.store(tc.Model(
f"{model.__class__.__name__}{postfix}.pth.tar",
{
'epoch': self._state['epoch'] + 1,
'arch': str(m),
'state_dict': m.state_dict(),
'best_acc1': self._state['best_acc1'],
'optimizer': optim.state_dict(),
}, is_best))
def _on_start_batch(self, data):
"""override to adapt yourself dataset __getitem__"""
inp, target = data
if self._args.gpu is not None:
return inp.cuda(self._args.gpu), target.cuda(self._args.gpu), target.size(0)
else:
return inp, target, target.size(0)
def _add_on_end_batch_log(self, training):
""" user can add some log information with _on_start_epoch using all kinds of meters in _on_end_batch"""
if training:
pass
else:
pass
return ""
def _add_on_end_batch_tb(self, training):
""" user can add some tensorboard operations with _on_start_epoch using all kinds of meters"""
if training:
pass
else:
pass
def _on_end_batch(self, data_loader, optimizer=None):
""" print log and visualization"""
training_iterations = self._state['training_iterations']
if self._switch_training:
if self._state['iteration'] != 0 and self._state['iteration'] % self._args.print_freq == 0:
print_process_bar = {'p_bar': self._args.p_bar, 'current_batch': self._state['iteration'], 'total_batch': len(data_loader)}
if self._args.p_bar:
prefix_info = "Epoch:[{0}] "
else:
prefix_info = 'Epoch: [{0}][{1}/{2}]\t'
fix_log = prefix_info + 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' \
'Data {data_time.val:.3f} ({data_time.avg:.3f})\tLoss {loss.val:.4f} ({loss.avg:.4f})\t' \
'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t' \
'Acc@5 {top5.val:.3f} ({top5.avg:.3f})\t'
fix_log = fix_log.format(
self._state['epoch'], self._state['iteration'], len(data_loader), batch_time=self._meters.batch_time,
data_time=self._meters.data_time, loss=self._meters.losses,
top1=self._meters.top1, top5=self._meters.top5)
log(fix_log + self._add_on_end_batch_log(True), **print_process_bar)
if self._args.no_tb:
self._tracer.tb.add_scalars('data/loss', {
'training': self._meters.losses.avg,
}, training_iterations)
self._tracer.tb.add_scalar('data/epochs', self._state['epoch'], training_iterations)
for oi, optim in enumerate([optimizer] if not isinstance(optimizer, list) else optimizer):
self._tracer.tb.add_scalars(f'data/learning_rate', {f'lr_optim_{oi + 1}': optim.param_groups[-1]['lr']}, training_iterations)
self._tracer.tb.add_scalars('data/precision/top1', {
'training': self._meters.top1.avg,
}, training_iterations)
self._tracer.tb.add_scalars('data/precision/top5', {
'training': self._meters.top5.avg
}, training_iterations)
self._tracer.tb.add_scalars('data/runtime', {
'batch_time': self._meters.batch_time.avg,
'data_time': self._meters.data_time.avg
}, training_iterations)
self._add_on_end_batch_tb(True)
elif not self._args.evaluate:
fix_log = ('Testing: Epoch [{0}] Acc@1 {top1.avg:.3f}\tAcc@5 {top5.avg:.3f}\tLoss {loss.avg:.4f}\t[best:{best_acc}]\t'
.format(self._state['epoch'], top1=self._meters.top1, top5=self._meters.top5,
loss=self._meters.losses, best_acc=self._state['best_acc1']))
log(fix_log + self._add_on_end_batch_log(False), color="green")
if self._args.no_tb:
self._tracer.tb.add_scalars('data/loss', {
'validation': self._meters.losses.avg,
}, training_iterations)
self._tracer.tb.add_scalars('data/precision/top1', {
'validation': self._meters.top1.avg,
}, training_iterations)
self._tracer.tb.add_scalars('data/precision/top5', {
'validation': self._meters.top5.avg
}, training_iterations)
self._add_on_end_batch_tb(False)
def learning(self, model, optimizer, train_dataset, val_dataset):
"""
Core function of engine to organize training process
:param val_dataset: training dataset
:param train_dataset: validation dataset
:param model: one or list
:param optimizer: one or list
"""
# save config
cfg = {f"optimizer{i + 1}": optim for i, optim in enumerate([optimizer] if not isinstance(optimizer, list) else optimizer)}
self._tracer.store(tc.Config({**cfg, **vars(self._args)}))
train_loader = self._warp_loader(True, train_dataset)
val_loader = self._warp_loader(False, val_dataset)
log('==> Start ...', color="red")
if self._args.resume:
self._resume(model, optimizer)
# cuda setup
if self._args.gpu is not None:
[m.cuda(self._args.gpu) for m in (model if isinstance(model, list) else [model])]
if self._args.evaluate:
self._before_evaluate(model)
self._validate(model, val_loader)
self._after_evaluate()
else:
ajlr = None
if self._args.adjust_lr:
ajlr = self._get_lr_scheduler(optimizer)
for epoch in range(self._args.start_epoch, self._args.epochs):
# train for one epoch
self._train(model, train_loader, optimizer, epoch)
# evaluate on validation set
acc1 = self._validate(model, val_loader)
# remember best acc@1 and save checkpoint
is_best = acc1 > self._state['best_acc1']
self._state['best_acc1'] = max(acc1, self._state['best_acc1'])
self._on_end_epoch(model, optimizer, is_best)
if self._args.adjust_lr:
[lr.step() for lr in ajlr]
print(f"Best Acc1:{self._state['best_acc1']}")
self._close()
return self._state['best_acc1']
| 39.955294 | 149 | 0.572876 |
6c36c7337778993804185f55e34f582ccb3e038c | 3,736 | py | Python | tests/test_ninjadog.py | knowsuchagency/ninjadog | 54f0c98da1006d97b6e39d39d0e4e056288f52d0 | [
"MIT"
] | 26 | 2017-06-23T02:18:54.000Z | 2022-02-19T08:45:11.000Z | tests/test_ninjadog.py | knowsuchagency/ninjadog | 54f0c98da1006d97b6e39d39d0e4e056288f52d0 | [
"MIT"
] | 21 | 2017-06-22T07:30:20.000Z | 2022-03-26T02:23:24.000Z | tests/test_ninjadog.py | knowsuchagency/ninjadog | 54f0c98da1006d97b6e39d39d0e4e056288f52d0 | [
"MIT"
] | 2 | 2018-06-20T01:16:27.000Z | 2020-07-14T19:55:27.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tests for `ninjadog` package."""
# TODO: test raises ValueError when pug cli can't be found and not passed explicitly to renderer
| 27.470588 | 112 | 0.635974 |
6c37074352737689850fbeed83a2fff6562b2609 | 1,610 | py | Python | core/views.py | Hassan-gholipoor/Todo_App_API | 19f9c141868fa0b01a11ed2a20f665d97b877340 | [
"MIT"
] | null | null | null | core/views.py | Hassan-gholipoor/Todo_App_API | 19f9c141868fa0b01a11ed2a20f665d97b877340 | [
"MIT"
] | null | null | null | core/views.py | Hassan-gholipoor/Todo_App_API | 19f9c141868fa0b01a11ed2a20f665d97b877340 | [
"MIT"
] | null | null | null | from rest_framework import viewsets, permissions
from rest_framework_simplejwt.authentication import JWTAuthentication
from rest_framework.response import Response
from rest_framework import status
from core.serializers import TodoSerializer, TodoDetailSerializer
from core.models import Todo
| 36.590909 | 81 | 0.709317 |
6c3ba5d9b3babe444d2c4d3c2e6c46f0cd91ef11 | 27 | py | Python | ep_ws/devel/lib/python3/dist-packages/realsense2_camera/srv/__init__.py | fsrlab/FSR_ROS_SIM | f22dfbd19ca1f2f1c7456fc51fb382509f9d7c62 | [
"MIT"
] | null | null | null | ep_ws/devel/lib/python3/dist-packages/realsense2_camera/srv/__init__.py | fsrlab/FSR_ROS_SIM | f22dfbd19ca1f2f1c7456fc51fb382509f9d7c62 | [
"MIT"
] | null | null | null | ep_ws/devel/lib/python3/dist-packages/realsense2_camera/srv/__init__.py | fsrlab/FSR_ROS_SIM | f22dfbd19ca1f2f1c7456fc51fb382509f9d7c62 | [
"MIT"
] | null | null | null | from ._DeviceInfo import *
| 13.5 | 26 | 0.777778 |
6c3c2ae1bdf2d29f699c3d8948c8a02e1af7dcc8 | 788 | py | Python | users/forms.py | yeezy-na-izi/YlDjango | 6fd0763183d76e4f7ca4a9686170d0665d7c04e9 | [
"MIT"
] | 6 | 2022-03-06T10:43:06.000Z | 2022-03-24T13:00:12.000Z | users/forms.py | yeezy-na-izi/YlDjango | 6fd0763183d76e4f7ca4a9686170d0665d7c04e9 | [
"MIT"
] | 6 | 2022-03-09T13:22:41.000Z | 2022-03-25T09:21:37.000Z | users/forms.py | yeezy-na-izi/YlDjango | 6fd0763183d76e4f7ca4a9686170d0665d7c04e9 | [
"MIT"
] | null | null | null | from django.contrib.auth.forms import UserCreationForm
from users.models import User, Profile
from django import forms
| 24.625 | 63 | 0.623096 |
6c3c5ab25d2cf06474ae606ac7def120213405ed | 2,513 | py | Python | kbqa/create_question_data.py | terrifyzhao/neo4j_graph | 71f8ad1530805d0cca7ae2131f81a96a6b519d02 | [
"Apache-2.0"
] | 3 | 2020-06-01T01:45:44.000Z | 2021-05-10T06:05:18.000Z | kbqa/create_question_data.py | terrifyzhao/neo4j_graph | 71f8ad1530805d0cca7ae2131f81a96a6b519d02 | [
"Apache-2.0"
] | null | null | null | kbqa/create_question_data.py | terrifyzhao/neo4j_graph | 71f8ad1530805d0cca7ae2131f81a96a6b519d02 | [
"Apache-2.0"
] | 2 | 2021-04-05T03:09:09.000Z | 2021-09-19T11:29:38.000Z | from py2neo import Graph
import numpy as np
import pandas as pd
graph = Graph("http://192.168.50.179:7474", auth=("neo4j", "qwer"))
q1 = create_entity_question()
q2 = create_attribute_question()
q3 = create_relation_question()
df = pd.DataFrame()
df['question'] = q1 + q2 + q3
df['label'] = [0] * len(q1) + [1] * len(q2) + [2] * len(q3)
df.to_csv('question_classification.csv', encoding='utf_8_sig', index=False)
| 29.22093 | 96 | 0.54994 |
6c3ca74700c452639c1abd59ef05386a970cf094 | 1,095 | py | Python | src/detect_utils.py | iglaweb/HippoYD | da2c40be8017c43a7b7b6c029e2df30cf7d54932 | [
"Apache-2.0"
] | 7 | 2021-07-02T03:57:20.000Z | 2022-03-20T13:23:32.000Z | src/detect_utils.py | filipul1s/HippoYD | da2c40be8017c43a7b7b6c029e2df30cf7d54932 | [
"Apache-2.0"
] | null | null | null | src/detect_utils.py | filipul1s/HippoYD | da2c40be8017c43a7b7b6c029e2df30cf7d54932 | [
"Apache-2.0"
] | 3 | 2021-07-02T16:07:28.000Z | 2022-03-20T13:23:33.000Z | import cv2
from scipy.spatial import distance as dist
| 36.5 | 117 | 0.675799 |
6c3cce245cb8dd51640bae04fe6b64d1a7249903 | 3,626 | py | Python | rna_format.py | thedinak/Genetics-to-Therapuetics | f38cc76ceb8b9217b3f4b19f985a255c1c1dd98d | [
"MIT"
] | null | null | null | rna_format.py | thedinak/Genetics-to-Therapuetics | f38cc76ceb8b9217b3f4b19f985a255c1c1dd98d | [
"MIT"
] | null | null | null | rna_format.py | thedinak/Genetics-to-Therapuetics | f38cc76ceb8b9217b3f4b19f985a255c1c1dd98d | [
"MIT"
] | null | null | null | import pandas as pd
import os
import tarfile
import glob
import json
def unzip_rna_seq_data(file_name, desired_folder_name):
''' Downloaded RNA files are tarfiles, this unzips them'''
if 'tar' in file_name:
open_tar = tarfile.open(file_name)
open_tar.extractall(f'{desired_folder_name}')
open_tar.close()
else:
print('Not a tarfile')
def unzip_individual_rna_seq_files(root_dir):
''' Tarfile unzip results in gz files, which need to be further unzipped'''
files_to_unpack = []
dfs = []
meta_data_file = ''.join(glob.glob('**/**metadata.cart**', recursive=True))
with open(meta_data_file, 'r') as f:
meta_data = json.load(f)
convert_filename_caseuuid = {meta_data[i]['file_id']:
meta_data[i]['associated_entities'][0]
['case_id'] for i in range(0, len(meta_data))}
# dictionary of file_id:case_id
for directory in os.listdir(root_dir):
try:
for filename in os.listdir(os.path.join(root_dir, directory)):
if ".gz" in filename:
files_to_unpack.append(os.path.join(root_dir,
directory, filename))
except NotADirectoryError:
continue
for file in files_to_unpack:
dfs.append(pd.read_csv
(file, compression='gzip', sep="\t", names=['gene',
convert_filename_caseuuid[os.path.split(os.path.dirname
(file))[1]]],
index_col='gene'))
# these dfs already have the correct case id name
return files_to_unpack, dfs, convert_filename_caseuuid
def concat_all_rna_seq(dfs):
''' Takes each individual rna seq file and concatenates them into one '''
rna_seq_data = pd.concat(dfs, join="outer", axis=1).T
if type(rna_seq_data.index[0]) == str:
rna_seq_data.reset_index(inplace=True)
return rna_seq_data
def convert_ensg_to_gene_name(dataframe_with_genes):
'''TCGA data is listed with ensemble names, this converts to gene
names for greater readability '''
change_name_file = 'mart_export.txt'
gene_names = {}
with open(change_name_file) as fh:
for line in fh:
ensg, gene_name = line.split(',', 1)
gene_names[gene_name.split('.')[0]] = ensg
dataframe = (dataframe_with_genes.rename
(columns=lambda x: x.split('.')[0]).rename(
columns=gene_names))
genes = dataframe.columns[1:-1].tolist()
return dataframe, genes, gene_names
def concat_rna_to_clinical_data(clinical_dataframe, rna_dataframe):
''' Combines clinical data and the rna seq data. Clinical dataframe should
have bcr_patient_uuid as the index. '''
full_data = pd.merge(rna_dataframe, clinical_dataframe,
how='right', left_on=['index'],
right_on=['bcr_patient_uuid'])
return full_data
def limit_full_data_for_pca(full_data, genes):
''' Removes rna seq files where there is no drug name available and limits
columns to rna seq data, drug name and vital status '''
limit_full_data = (full_data.loc[(full_data.standard_drugs != '')
& (full_data.standard_drugs != '[not available]')
& (full_data.standard_drugs != '[unknown]')].copy())
limit_full_data.dropna(subset=['index'], inplace=True)
columns_needed = genes+['standard_drugs', 'vital_status']
return limit_full_data.loc[:, columns_needed]
| 39.413043 | 79 | 0.619967 |
6c3cdcc2642ae1e7ae2f269889189d138f16d4af | 7,268 | py | Python | fasturl/fasturl.py | evite/django-fasturls | 52e397c5f4b4b2b7d6c5cd2bf9cc8cac1b4efa9b | [
"MIT"
] | null | null | null | fasturl/fasturl.py | evite/django-fasturls | 52e397c5f4b4b2b7d6c5cd2bf9cc8cac1b4efa9b | [
"MIT"
] | null | null | null | fasturl/fasturl.py | evite/django-fasturls | 52e397c5f4b4b2b7d6c5cd2bf9cc8cac1b4efa9b | [
"MIT"
] | null | null | null | import re
from collections import OrderedDict
from django.conf.urls import url as django_url, include
from django.core.urlresolvers import RegexURLResolver, RegexURLPattern
from django.utils.encoding import force_text
import logging
# Using FastUrl has a couple of caveats:
# 1. FastUrl tries to keep the resolution order the same as declared, but we cannot guarantee that the order will
# be exactly the same which could cause the wrong view to be returned if you have urlpatterns that overlap.
# 2. Detection of regexes within urlpatterns is very ad-hock, it would be easy to deliberately cause it to fail, but
# in practice it should cover most cases. Any errors should occur during url building rather than at resolution time
# Usage:
# Build your urlpatterns using 'FastUrl' instead of 'url' and then rebuild your urlpatterns with
# urlpatterns = render_fast_urls(urlpatterns)
def _is_django_regex(ob):
if isinstance(ob, RegexURLPattern) or isinstance(ob, RegexURLResolver):
return True
return False
def _add_url_to_tree(tree, url):
if isinstance(url, FastUrl):
url.add_to_tree(tree)
if _is_django_regex(url):
tree[('djangourl', _add_url_to_tree.django_urls)] = url
_add_url_to_tree.django_urls += 1
_add_url_to_tree.django_urls = 0 # counter for django only urls
merged_count = 0
_merge_single_children.count = 0
| 35.627451 | 128 | 0.589158 |
6c3d59a46c15d1afca1d52fd4d95d34b6fd700b1 | 6,679 | py | Python | experiments/2_training.py | helenacuesta/multif0-estimation-polyvocals | 4960f5415f8a170f2ff8d5b776bfd4cb5576d3ba | [
"MIT"
] | 36 | 2020-09-13T12:30:41.000Z | 2022-02-15T08:52:58.000Z | experiments/2_training.py | helenacuesta/multif0-estimation-polyvocals | 4960f5415f8a170f2ff8d5b776bfd4cb5576d3ba | [
"MIT"
] | 6 | 2020-09-04T11:14:14.000Z | 2022-02-09T23:49:59.000Z | experiments/2_training.py | helenacuesta/multif0-estimation-polyvocals | 4960f5415f8a170f2ff8d5b776bfd4cb5576d3ba | [
"MIT"
] | null | null | null | import os
import json
import keras
import numpy as np
import csv
from experiments import config
import utils
import utils_train
import models
import argparse
def experiment(save_key, model, data_splits_file, batch_size, active_str, muxrate):
"""
This should be common code for all experiments
"""
exper_dir = config.exper_output
(save_path, _, plot_save_path,
model_scores_path, _, _
) = utils_train.get_paths(exper_dir, save_key)
model_save_path = '/scratch/hc2945/data/models/'
if not os.path.exists(model_save_path):
os.mkdir(model_save_path)
model_save_path = os.path.join(model_save_path, "{}.pkl".format(save_key))
'''
# create data splits file if it doesnt exist
if not os.path.exists(
os.path.join(exper_dir, 'data_splits.json')):
create_data_splits(path_to_metadata_file='./mtracks_info.json', exper_dir=exper_dir)
'''
model, history, dat = train(model, model_save_path, data_splits_file,
batch_size, active_str, muxrate)
run_evaluation(exper_dir, save_key, history, dat, model)
print("Done! Results saved to {}".format(save_path))
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Train specified model with training set.")
parser.add_argument("--model",
dest='model_name',
type=str,
help="Name of the model you want to train.")
parser.add_argument("--save_key",
dest='save_key',
type=str,
help="String to save model-related data.")
parser.add_argument("--data_splits_file",
dest='data_splits_file',
type=str,
help="Filename of the data splits file to use in the experiment.")
main(parser.parse_args())
| 28.421277 | 104 | 0.658332 |
6c3eef3ce318f9f2ea78b8b3df0a26bfa302ee81 | 106 | py | Python | src/pythonFEA/defaults.py | honzatomek/pythonFEA | c851c20800a06cc2084ef53dfd2ab67e7dfbc3b7 | [
"MIT"
] | null | null | null | src/pythonFEA/defaults.py | honzatomek/pythonFEA | c851c20800a06cc2084ef53dfd2ab67e7dfbc3b7 | [
"MIT"
] | null | null | null | src/pythonFEA/defaults.py | honzatomek/pythonFEA | c851c20800a06cc2084ef53dfd2ab67e7dfbc3b7 | [
"MIT"
] | null | null | null | # DEFUALT SETUP FOR NUMBERS
DEFAULT_FLOAT = float
# DEFAULT SETUP FOR STRINGS
DEFAULT_LABEL_LENGTH = 120
| 17.666667 | 27 | 0.801887 |
6c3f1a1b4560f11557e8a7fa31b050b56c6becc0 | 6,666 | py | Python | backend/validators/models.py | Cryptorubic/rubic-validator | 88fd90d15da1fad538667c375189e2625d045ab0 | [
"MIT"
] | null | null | null | backend/validators/models.py | Cryptorubic/rubic-validator | 88fd90d15da1fad538667c375189e2625d045ab0 | [
"MIT"
] | null | null | null | backend/validators/models.py | Cryptorubic/rubic-validator | 88fd90d15da1fad538667c375189e2625d045ab0 | [
"MIT"
] | null | null | null | from logging import exception, info
from requests import post as request_post
from requests.exceptions import RequestException
from typing import Union
from uuid import UUID
from django.conf import settings
from django.db.models import (
CASCADE,
CharField,
ForeignKey,
OneToOneField,
)
from web3.types import HexBytes
from base.models import AbstractBaseModel
from base.support_functions.base import bytes_to_base58
from contracts.models import Contract
from backend.consts import DEFAULT_CRYPTO_ADDRESS, NETWORK_NAMES
from networks.models import (
Transaction,
CustomRpcProvider,
NearRpcProvider,
)
from networks.types import HASH_LIKE
| 29.495575 | 79 | 0.611461 |
6c40a91da29b8a959cf350b71661cacacc596d6d | 494 | py | Python | practise/remove_zero.py | mengyangbai/leetcode | e7a6906ecc5bce665dec5d0f057b302a64d50f40 | [
"MIT"
] | null | null | null | practise/remove_zero.py | mengyangbai/leetcode | e7a6906ecc5bce665dec5d0f057b302a64d50f40 | [
"MIT"
] | null | null | null | practise/remove_zero.py | mengyangbai/leetcode | e7a6906ecc5bce665dec5d0f057b302a64d50f40 | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
a = Solution()
nums = [0]
a.moveZeroes(nums) | 27.444444 | 74 | 0.465587 |
6c42601ba0916dd0c025e30a21fda4322eb4b154 | 2,838 | py | Python | scripts/train_agent.py | weepingwillowben/reward-surfaces | f27211faf3784df3305972b7cad65002fd57d7bf | [
"MIT"
] | null | null | null | scripts/train_agent.py | weepingwillowben/reward-surfaces | f27211faf3784df3305972b7cad65002fd57d7bf | [
"MIT"
] | null | null | null | scripts/train_agent.py | weepingwillowben/reward-surfaces | f27211faf3784df3305972b7cad65002fd57d7bf | [
"MIT"
] | 2 | 2021-10-03T14:51:38.000Z | 2021-11-10T02:54:26.000Z | import argparse
from reward_surfaces.agents.make_agent import make_agent
import torch
import json
import os
from glob import glob
if __name__ == "__main__":
main()
| 41.130435 | 174 | 0.65821 |
6c43b369587320014577c2dea259fb1b216358eb | 103 | py | Python | tests/test_ladder.py | devonwa/ladder2x | a8604fb61eaa193d9a6e0239474a6c0af1bc2b49 | [
"Unlicense"
] | null | null | null | tests/test_ladder.py | devonwa/ladder2x | a8604fb61eaa193d9a6e0239474a6c0af1bc2b49 | [
"Unlicense"
] | null | null | null | tests/test_ladder.py | devonwa/ladder2x | a8604fb61eaa193d9a6e0239474a6c0af1bc2b49 | [
"Unlicense"
] | null | null | null | """Tests on the base ladder structure."""
import pytest
if __name__ == "__main__":
pytest.main()
| 14.714286 | 41 | 0.669903 |
6c441485e7e7ad06c0126fe73345924ccb66fe07 | 390 | py | Python | courses/urls.py | office-for-students/wagtail-CMS | 98789c279edf48f2bbedb5415437da3317f0e12b | [
"MIT"
] | 4 | 2019-06-04T07:18:44.000Z | 2020-06-15T22:27:36.000Z | courses/urls.py | office-for-students/wagtail-CMS | 98789c279edf48f2bbedb5415437da3317f0e12b | [
"MIT"
] | 38 | 2019-05-09T13:14:56.000Z | 2022-03-12T00:54:57.000Z | courses/urls.py | office-for-students/wagtail-CMS | 98789c279edf48f2bbedb5415437da3317f0e12b | [
"MIT"
] | 3 | 2019-09-26T14:32:36.000Z | 2021-05-06T15:48:01.000Z | from django.conf.urls import url
from django.urls import path
from courses.views import courses_detail
from courses.views.translate import get_translations
urlpatterns = [
url(r'(?P<institution_id>[\w\-]+?)/(?P<course_id>[\w\-\~\$()]+?)/(?P<kis_mode>[\w\-]+?)/', courses_detail,
name='courses_detail'),
path('translations/', get_translations, name='course_translation')
]
| 32.5 | 110 | 0.697436 |
6c44a6f087fd346f5832a3d385363862360f4ae8 | 447 | py | Python | opencypher/tests/ast/test_ordering.py | globality-corp/opencypher | b60bf526fb6d5ea6c731aab867f714f3e10f629b | [
"Apache-2.0"
] | 6 | 2019-01-31T18:55:46.000Z | 2020-12-02T14:53:45.000Z | opencypher/tests/ast/test_ordering.py | globality-corp/opencypher | b60bf526fb6d5ea6c731aab867f714f3e10f629b | [
"Apache-2.0"
] | 1 | 2020-12-04T00:18:20.000Z | 2020-12-04T00:18:20.000Z | opencypher/tests/ast/test_ordering.py | globality-corp/opencypher | b60bf526fb6d5ea6c731aab867f714f3e10f629b | [
"Apache-2.0"
] | 1 | 2019-03-17T03:46:26.000Z | 2019-03-17T03:46:26.000Z | from hamcrest import assert_that, equal_to, is_
from opencypher.ast import Expression, NonEmptySequence, Order, SortItem, SortOrder
| 23.526316 | 83 | 0.590604 |
6c46983292689e2b0a8072d0c4aba99c6bfefd5f | 444 | py | Python | TD3/test.py | chenoly/DRL-MindSpore | 7e3434f2ca326a76d150903fd2ed8e8a32de5cea | [
"MIT"
] | null | null | null | TD3/test.py | chenoly/DRL-MindSpore | 7e3434f2ca326a76d150903fd2ed8e8a32de5cea | [
"MIT"
] | null | null | null | TD3/test.py | chenoly/DRL-MindSpore | 7e3434f2ca326a76d150903fd2ed8e8a32de5cea | [
"MIT"
] | null | null | null | from Model import Critic
from mindspore import Tensor
from mindspore import load_param_into_net
import copy
C1 = Critic(state_dim=2, action_dim=1)
C2 = Critic(state_dim=2, action_dim=1)
# C1.load_parameter_slice(C2.parameters_dict())
# load_param_into_net(C1, C2.parameters_dict())
c1_ = C1.parameters_dict()
c2_ = C2.parameters_dict()
for p, p1 in zip(c1_, c2_):
print(Tensor(c1_[p]))
print(Tensor(c2_[p1]))
print(c2_[p1].clone())
| 29.6 | 47 | 0.747748 |
6c46b6f196085ed15758fd855c1d14b7c05e52f5 | 351 | py | Python | reflectivipy/wrappers/expr_flatwrapper.py | StevenCostiou/reflectivipy | 750ed93cfb463304958e590d895c76169caa4b98 | [
"MIT"
] | 10 | 2019-01-18T17:45:18.000Z | 2019-10-05T08:58:17.000Z | reflectivipy/wrappers/expr_flatwrapper.py | StevenCostiou/reflectivipy | 750ed93cfb463304958e590d895c76169caa4b98 | [
"MIT"
] | null | null | null | reflectivipy/wrappers/expr_flatwrapper.py | StevenCostiou/reflectivipy | 750ed93cfb463304958e590d895c76169caa4b98 | [
"MIT"
] | null | null | null | from .flatwrapper import FlatWrapper
| 29.25 | 74 | 0.68661 |
6c489fd8b4623ac06e1c59f92467d3fce08e9f03 | 1,742 | py | Python | cricdb_data.py | ravi2013167/coursera-site | e78f10c9fa941a834f83853479ea3ee67eeacc64 | [
"MIT"
] | null | null | null | cricdb_data.py | ravi2013167/coursera-site | e78f10c9fa941a834f83853479ea3ee67eeacc64 | [
"MIT"
] | null | null | null | cricdb_data.py | ravi2013167/coursera-site | e78f10c9fa941a834f83853479ea3ee67eeacc64 | [
"MIT"
] | null | null | null | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from cricdb_setup import Team, Player, Base, Batsman, Bowler, Fielder, PlayerStrength, PlayerWeakness, PlayerMoment, Video
engine = create_engine('sqlite:///cricdb.db')
# Bind the engine to the metadata of the Base class so that the
# declaratives can be accessed through a DBSession instance
Base.metadata.bind = engine
DBSession = sessionmaker(bind=engine)
# A DBSession() instance establishes all conversations with the database
# and represents a "staging zone" for all the objects loaded into the
# database session object. Any change made against the objects in the
# session won't be persisted into the database until you call
# session.commit(). If you're not happy about the changes, you can
# revert all of them back to the last commit by calling
# session.rollback()
session = DBSession()
Team1 = Team(id = 1, name='India')
session.add(Team1)
session.commit()
# Create dummy player
Player1 = Player(id = 1, team_id = 1, name="Virat Kohli", country="India", info='Born Nov 05, 1988 (28 years) Birth Place Delhi Nickname Kohli Height 5 ft 9 in (175 cm) Role Batsman Batting Style Right Handed Bat Bowling Style Right-arm medium', career='blank', batting_style='blank', bowling_style='blank',
picture='vk.jpg')
session.add(Player1)
session.commit()
# Menu for UrbanBurger
Batsman1 = Batsman(id=1, stance_type="front on", foot_position="front foot", shot="straight drive")
session.add(Batsman1)
session.commit()
Video1 = Video(id=1, video_type='batsman', video_name='front on front foot straight drive', video_url='google.com')
session.add(Video1)
session.commit()
print ("added menu items!")
| 40.511628 | 308 | 0.74225 |
6c4921ee958b3c93f23ee76186c1ec8331428083 | 1,006 | py | Python | catalog/bindings/gmd/dq_evaluation_method_type_code_property_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/dq_evaluation_method_type_code_property_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/dq_evaluation_method_type_code_property_type.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass, field
from typing import Optional, Union
from bindings.gmd.dq_evaluation_method_type_code import DqEvaluationMethodTypeCode
from bindings.gmd.nil_reason_enumeration_value import NilReasonEnumerationValue
__NAMESPACE__ = "http://www.isotc211.org/2005/gmd"
| 32.451613 | 82 | 0.667992 |
6c4c00831838cc942a656d3b8ca70c1fdf886a13 | 3,964 | py | Python | spark/ReqTwisted.py | wensheng/spark | ab47107d000f0670f4cfe131637f72471a04cfb2 | [
"MIT"
] | null | null | null | spark/ReqTwisted.py | wensheng/spark | ab47107d000f0670f4cfe131637f72471a04cfb2 | [
"MIT"
] | null | null | null | spark/ReqTwisted.py | wensheng/spark | ab47107d000f0670f4cfe131637f72471a04cfb2 | [
"MIT"
] | null | null | null | #import time
from spark.ReqBase import ReqBase
| 29.362963 | 89 | 0.688951 |
6c4cbca2cb07bcccddf7a558df7b93567d90c79c | 11,093 | py | Python | alpha_transform/AlphaTransformUtility.py | michaelriedl/alpha-transform | add5818b168551cb0c2138c65101c9cdac2bf3d9 | [
"MIT"
] | 13 | 2016-12-21T03:25:57.000Z | 2022-03-15T03:25:04.000Z | alpha_transform/AlphaTransformUtility.py | michaelriedl/alpha-transform | add5818b168551cb0c2138c65101c9cdac2bf3d9 | [
"MIT"
] | 4 | 2020-07-11T09:49:51.000Z | 2021-12-03T07:07:34.000Z | alpha_transform/AlphaTransformUtility.py | michaelriedl/alpha-transform | add5818b168551cb0c2138c65101c9cdac2bf3d9 | [
"MIT"
] | 7 | 2018-09-23T10:58:24.000Z | 2021-09-05T01:13:57.000Z | r"""
This module contains several utility functions which can be used e.g.
for thresholding the alpha-shearlet coefficients or for using the
alpha-shearlet transform for denoising.
Finally, it also contains the functions :func:`my_ravel` and :func:`my_unravel`
which can be used to convert the alpha-shearlet coefficients into a
1-dimensional vector and back. This is in particular convenient for the
subsampled transform, where this conversion is not entirely trivial, since the
different "coefficient images" have varying dimensions.
"""
import os.path
import math
import numpy as np
import numexpr as ne
import scipy.ndimage
def find_free_file(file_template):
r"""
This function finds the first nonexistent ("free") file obtained by
"counting upwards" using the passed template/pattern.
**Required Parameter**
:param string file_template:
This should be a string whose ``format()`` method can be called
using only an integer argument, e.g. ``'/home/test_{0:0>2d}.txt'``,
which would result in ``find_free_file`` consecutively checking
the following files for existence:
`/home/test_00.txt,`
`/home/test_01.txt, ...`
**Return value**
:return:
``file_template.format(i)`` for the first value of ``i`` for which
the corresponding file does not yet exist.
"""
i = 0
while os.path.isfile(file_template.format(i)):
i += 1
return file_template.format(i)
def denoise(img, trafo, noise_lvl, multipliers=None):
r"""
Given a noisy image :math:`\tilde f`, this function performs a denoising
procedure based on shearlet thresholding. More precisely:
#. A scale dependent threshold parameter :math:`c=(c_j)_j` is calculated
according to :math:`c_j=m_j\cdot \lambda / \sqrt{N_1\cdot N_2}`, where
:math:`m_j` is a multiplier for the jth scale, :math:`\lambda` is the
noise level present in the image :math:`\tilde f` and
:math:`N_1\times N_2` are its dimensions.
#. The alpha-shearlet transform of :math:`\tilde f` is calculated
using ``trafo``.
#. Hard thesholding with threshold parameter (cutoff) :math:`c` is
performed on alpha-shearlet coefficients, i.e., for each scale ``j``,
each of the coefficients belonging to the jth scale is set to zero if
its absolute value is smaller than :math:`c_j` and otherwise it is
left unchanged.
#. The (pseudo)-inverse of the alpha-shearlet transform is applied to the
thresholded coefficients and this reconstruction is the return value
of the function.
**Required parameters**
:param numpy.ndarray img:
The image (2 dimensional array) that should be denoised.
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
This object is used to calculate the (inverse) alpha-shearlet
transform during the denoising procedure.
The dimension of the transform and of ``img`` need to coincide.
:param float noise_lvl:
The (presumed) noise level present in ``img``.
If ``img = img_clean + noise``, then ``noise_lvl`` should be
approximately equal to the :math:`\ell^2` norm of ``noise``.
In particular, if ``im`` is obtained by adding Gaussian noise with
standard deviation :math:`\sigma` (in each entry) to a noise free
image :math:`f`, then the noise level :math:`\lambda` is given by
:math:`\lambda= \sigma\cdot \sqrt{N_1\cdot N_2}`; see also
:func:`AdaptiveAlpha.optimize_denoising`.
**Keyword parameter**
:param list multipliers:
A list of multipliers (floats) for each scale. ``multipliers[j]``
determines the value of :math:`m_j` and thus of the cutoff
:math:`c_j = m_j \cdot \lambda / \sqrt{N_1 \cdot N_2}` for scale ``j``.
In particular, ``len(multipliers)`` needs
to be equal to the number of the scales of ``trafo``.
**Return value**
:return:
The denoised image, i.e., the result of the denoising procedure
described above.
"""
coeff_gen = trafo.transform_generator(img, do_norm=True)
if multipliers is None:
# multipliers = [1] + ([2.5] * (trafo.num_scales - 1)) + [5]
multipliers = [3] * trafo.num_scales + [4]
width = trafo.width
height = trafo.height
thresh_lvls = [multi * noise_lvl / math.sqrt(width * height)
for multi in multipliers]
thresh_coeff = (coeff * (np.abs(coeff) >= thresh_lvls[scale + 1])
for (coeff, scale) in zip(coeff_gen, scale_gen(trafo)))
recon = trafo.inverse_transform(thresh_coeff, real=True, do_norm=True)
return recon
def image_load(path):
r"""
Given a '.npy' or '.png' file, this function loads the file and returns
its content as a two-dimensional :class:`numpy.ndarray` of :class:`float`
values.
For '.png' images, the pixel values are normalized to be between 0 and 1
(instead of between 0 and 255) and color images are converted to
grey-scale.
**Required parameter**
:param string path:
Path to the image to be converted, either of a '.png' or '.npy' file.
**Return value**
:return:
The loaded image as a two-dimensional :class:`numpy.ndarray`.
"""
image_extension = path[path.rfind('.'):]
if image_extension == '.npy':
return np.array(np.load(path), dtype='float64')
elif image_extension == '.png':
return np.array(scipy.ndimage.imread(path, flatten=True) / 255.0,
dtype='float64')
else:
raise ValueError("This function can only load .png or .npy files.")
def _print_listlist(listlist):
for front, back, l in zip(['['] + ([' '] * (len(listlist) - 1)),
([''] * (len(listlist) - 1)) + [']'],
listlist):
print(front + str(l) + back)
def my_ravel(coeff):
r"""
The subsampled alpha-shearlet transform returns a list of differently
sized(!) two-dimensional arrays. Likewise, the fully sampled transform
yields a three dimensional numpy array containing the coefficients.
The present function can be used (in both cases) to convert this list into
a single *one-dimensional* numpy array.
.. note::
In order to invert this conversion to a one-dimensional array,
use the associated function :func:`my_unravel`. Precisely,
:func:`my_unravel` satisfies
``my_unravel(my_trafo, my_ravel(coeff)) == coeff``,
if coeff is obtained from calling ``my_trafo.transform(im)``
for some image ``im``.
The preceding equality holds at least up to (negligible)
differences (the left-hand side is a generator while the
right-hand side could also be a list).
**Required parameter**
:param list coeff:
A list (or a generator) containing/producing two-dimensional
numpy arrays.
**Return value**
:return:
A one-dimensional :class:`numpy.ndarray` from which **coeff** can
be reconstructed.
"""
return np.concatenate([c.ravel() for c in coeff])
def my_unravel(trafo, coeff):
r"""
This method is a companion method to :func:`my_ravel`.
See the documentation of that function for more details.
**Required parameters**
:param trafo:
An object of class :class:`AlphaTransform.AlphaShearletTransform`.
:param numpy.ndarray coeff:
A one-dimensional numpy array, obtained via
``my_ravel(coeff_unrav)``, where ``coeff_unrav`` is of the same
dimensions as the output of ``trafo.transform(im)``, where
``im`` is an image.
**Return value**
:return:
A generator producing the same values as ``coeff_unrav``, i.e.,
an "unravelled" version of ``coeff``.
"""
coeff_sizes = [spec.shape for spec in trafo.spectrograms]
split_points = np.cumsum([spec.size for spec in trafo.spectrograms])
return (c.reshape(size)
for size, c in zip(coeff_sizes, np.split(coeff, split_points)))
| 34.557632 | 79 | 0.63166 |
6c50ce676f3a6dc75c4d1900f6d996ce7fd69ed7 | 2,692 | py | Python | tests/provider/dwd/radar/test_api_latest.py | waltherg/wetterdienst | 3c5c63b5b8d3e19511ad789bb499bdaa9b1976d9 | [
"MIT"
] | 1 | 2021-09-01T12:53:09.000Z | 2021-09-01T12:53:09.000Z | tests/provider/dwd/radar/test_api_latest.py | waltherg/wetterdienst | 3c5c63b5b8d3e19511ad789bb499bdaa9b1976d9 | [
"MIT"
] | null | null | null | tests/provider/dwd/radar/test_api_latest.py | waltherg/wetterdienst | 3c5c63b5b8d3e19511ad789bb499bdaa9b1976d9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2018-2021, earthobservations developers.
# Distributed under the MIT License. See LICENSE for more info.
import re
from datetime import datetime
import pytest
from tests.provider.dwd.radar import station_reference_pattern_unsorted
from wetterdienst.provider.dwd.radar import DwdRadarValues
from wetterdienst.provider.dwd.radar.metadata import DwdRadarDate, DwdRadarParameter
from wetterdienst.provider.dwd.radar.sites import DwdRadarSite
from wetterdienst.util.datetime import round_minutes
| 30.247191 | 108 | 0.69688 |
6c5274b4da8bf2db8410e4efcd81dcd874ad4000 | 710 | py | Python | tests/conftest.py | transferwise/cloudflare-exporter | d5efd4e9068bf9896a16ec6913d3345e3754d7c8 | [
"Apache-2.0"
] | 1 | 2021-08-06T15:09:26.000Z | 2021-08-06T15:09:26.000Z | tests/conftest.py | transferwise/cloudflare-exporter | d5efd4e9068bf9896a16ec6913d3345e3754d7c8 | [
"Apache-2.0"
] | 16 | 2021-09-20T04:10:29.000Z | 2022-03-14T04:26:01.000Z | tests/conftest.py | transferwise/cloudflare-exporter | d5efd4e9068bf9896a16ec6913d3345e3754d7c8 | [
"Apache-2.0"
] | 2 | 2021-08-21T18:48:15.000Z | 2021-11-19T16:52:25.000Z | # -*- coding: utf-8 -*-
import pytest
import json
from pathlib import Path
| 26.296296 | 87 | 0.712676 |
6c5353e05ae0337f97754129d22ee251e890227f | 4,529 | py | Python | scripts/delay_analysis.py | welvin21/pysimt | 6250b33dc518b3195da4fc9cc8d32ba7ada958c0 | [
"MIT"
] | 34 | 2020-09-21T10:49:57.000Z | 2022-01-08T04:50:42.000Z | scripts/delay_analysis.py | welvin21/pysimt | 6250b33dc518b3195da4fc9cc8d32ba7ada958c0 | [
"MIT"
] | 2 | 2021-01-08T03:52:51.000Z | 2021-09-10T07:45:05.000Z | scripts/delay_analysis.py | welvin21/pysimt | 6250b33dc518b3195da4fc9cc8d32ba7ada958c0 | [
"MIT"
] | 5 | 2021-04-23T09:30:51.000Z | 2022-01-09T08:40:45.000Z | #!/usr/bin/env python
import os
import sys
import glob
import argparse
from pathlib import Path
from collections import defaultdict
from hashlib import sha1
import numpy as np
import sacrebleu
import tabulate
from pysimt.metrics.simnmt import AVPScorer, AVLScorer, CWMScorer, CWXScorer
"""This script should be run from within the parent folder where each pysimt
experiment resides."""
if __name__ == '__main__':
parser = argparse.ArgumentParser(
prog='delay-analysis',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="Compute delay metrics for multiple runs",
argument_default=argparse.SUPPRESS)
parser.add_argument('-r', '--ref-file', required=True, type=str,
help='The reference file for BLEU evaluation.')
parser.add_argument('act_files', nargs='+',
help='List of action files')
args = parser.parse_args()
refs = [read_lines_from_file(args.ref_file)]
test_set = Path(args.ref_file).name.split('.')[0]
results = {}
# Automatically fetch .acts files
acts = [Path(p) for p in args.act_files]
# unique experiments i.e. nmt and mmt for example
exps = set([p.parent for p in acts])
scorers = [
AVPScorer(add_trg_eos=False),
AVLScorer(add_trg_eos=False),
#CWMScorer(add_trg_eos=False),
#CWXScorer(add_trg_eos=False),
]
for exp in exps:
# get actions for this experiment
exp_acts = [p for p in acts if p.parent == exp]
parts = [p.name.split('.') for p in exp_acts]
# different run prefixes
runs = list(set([p[0] for p in parts]))
# type of decodings i.e. wait if diff, waitk, etc.
types = list(set([p[2] for p in parts]))
# Evaluate baseline consecutive systems as well
baseline_bleus = []
for run in runs:
hyp_fname = f'{exp}/{run}.{test_set}.gs'
if os.path.exists(hyp_fname):
bleu = compute_bleu(Path(hyp_fname), refs)
baseline_bleus.append(bleu)
else:
baseline_bleus.append(-1)
results[exp.name] = {m.name: '0' for m in scorers}
results[exp.name]['Q2AVP'] = '0'
baseline_bleus = np.array(baseline_bleus)
results[exp.name]['BLEU'] = f'{baseline_bleus.mean():2.2f} ({baseline_bleus.std():.4f})'
# Evaluate each decoding type and keep multiple run scores
for typ in types:
scores = defaultdict(list)
for run in runs:
act_fname = f'{exp}/{run}.{test_set}.{typ}.acts'
hyp_fname = f'{exp}/{run}.{test_set}.{typ}.gs'
# Compute BLEU
bleu = compute_bleu(Path(hyp_fname), refs)
scores['BLEU'].append(bleu)
if os.path.exists(act_fname):
# Compute delay metrics
run_scores = [s.compute_from_file(act_fname) for s in scorers]
for sc in run_scores:
scores[sc.name].append(sc.score)
scores['Q2AVP'] = bleu / scores['AVP'][-1]
# aggregate
scores = {k: np.array(v) for k, v in scores.items()}
means = {k: v.mean() for k, v in scores.items()}
sdevs = {k: v.std() for k, v in scores.items()}
str_scores = {m: f'{means[m]:4.2f} ({sdevs[m]:.2f})' for m in scores.keys()}
results[f'{exp.name}_{typ}'] = str_scores
headers = ['Name'] + [sc.name for sc in scorers] + ['BLEU', 'Q2AVP']
results = [[name, *[scores[key] for key in headers[1:]]] for name, scores in results.items()]
# alphabetical sort
results = sorted(results, key=lambda x: x[0].rsplit('_', 1)[-1])
# print
print(tabulate.tabulate(results, headers=headers))
| 34.310606 | 97 | 0.59108 |
6c56a8517956b8fdd74335b60fe24a921ed77b5c | 3,713 | py | Python | canvas_course_site_wizard/views.py | Harvard-University-iCommons/django-canvas-course-site-wizard | 0210849e959407e5a850188f50756eb69b9a4dc2 | [
"MIT"
] | null | null | null | canvas_course_site_wizard/views.py | Harvard-University-iCommons/django-canvas-course-site-wizard | 0210849e959407e5a850188f50756eb69b9a4dc2 | [
"MIT"
] | 5 | 2018-05-10T19:49:43.000Z | 2021-01-29T19:39:34.000Z | canvas_course_site_wizard/views.py | Harvard-University-iCommons/django-canvas-course-site-wizard | 0210849e959407e5a850188f50756eb69b9a4dc2 | [
"MIT"
] | null | null | null | import logging
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.shortcuts import redirect
from .controller import (
create_canvas_course,
start_course_template_copy,
finalize_new_canvas_course,
get_canvas_course_url
)
from .mixins import CourseSiteCreationAllowedMixin
from icommons_ui.mixins import CustomErrorPageMixin
from .exceptions import NoTemplateExistsForSchool
from .models import CanvasCourseGenerationJob
from braces.views import LoginRequiredMixin
logger = logging.getLogger(__name__)
| 51.569444 | 121 | 0.736062 |
6c58884fde7690dcd1123dcef567073872ba2ad9 | 7,389 | py | Python | brie/utils/count.py | huangyh09/brie | 59563baafcdb95d1d75a81203e5cc29983f66c2f | [
"Apache-2.0"
] | 38 | 2017-01-06T00:18:46.000Z | 2022-01-25T19:44:10.000Z | brie/utils/count.py | huangyh09/brie | 59563baafcdb95d1d75a81203e5cc29983f66c2f | [
"Apache-2.0"
] | 28 | 2017-01-11T09:12:57.000Z | 2022-02-14T14:53:48.000Z | brie/utils/count.py | huangyh09/brie | 59563baafcdb95d1d75a81203e5cc29983f66c2f | [
"Apache-2.0"
] | 12 | 2018-02-13T20:23:00.000Z | 2022-01-05T18:39:19.000Z | import sys
import numpy as np
from .sam_utils import load_samfile, fetch_reads
def _check_SE_event(gene):
"""Check SE event"""
if (len(gene.trans) != 2 or
gene.trans[0].exons.shape[0] != 3 or
gene.trans[1].exons.shape[0] != 2 or
np.mean(gene.trans[0].exons[[0, 2], :] ==
gene.trans[1].exons) != 1):
return False
else:
return True
def _get_segment(exons, read):
"""Get the length of segments by devidinig a read into exons.
The segments include one for each exon and two edges.
"""
if read is None:
return None
_seglens = [0] * (exons.shape[0] + 2)
_seglens[0] = np.sum(read.positions < exons[0, 0])
_seglens[-1] = np.sum(read.positions > exons[-1, -1])
for i in range(exons.shape[0]):
_seglens[i + 1] = np.sum(
(read.positions >= exons[i, 0]) * (read.positions <= exons[i, 1]))
return _seglens
def check_reads_compatible(transcript, reads, edge_hang=10, junc_hang=2):
"""Check if reads are compatible with a transcript
"""
is_compatible = [True] * len(reads)
for i in range(len(reads)):
_segs = _get_segment(transcript.exons, reads[i])
# check mismatch to regions not in this transcript
if len(reads[i].positions) - sum(_segs) >= junc_hang:
is_compatible[i] = False
continue
# check if edge hang is too short
if (_segs[0] > 0 or _segs[-1] > 0) and sum(_segs[1:-1]) < edge_hang:
is_compatible[i] = False
continue
# check if exon has been skipped
if len(_segs) > 4:
for j in range(2, len(_segs) - 2):
if (_segs[j-1] >= junc_hang and _segs[j+1] >= junc_hang and
transcript.exons[j-1, 1] - transcript.exons[j-1, 0] -
_segs[j] >= junc_hang):
is_compatible[i] = False
break
return np.array(is_compatible)
def SE_reads_count(gene, samFile, edge_hang=10, junc_hang=2, **kwargs):
"""Count the categorical reads mapped to a splicing event
rm_duplicate=True, inner_only=True,
mapq_min=0, mismatch_max=5, rlen_min=1, is_mated=True
"""
# Check SE event
if _check_SE_event(gene) == False:
print("This is not exon-skipping event!")
exit()
# Fetch reads (TODO: customise fetch_reads function, e.g., FLAG)
reads = fetch_reads(samFile, gene.chrom, gene.start, gene.stop, **kwargs)
# Check reads compatible
is_isoform1 = check_reads_compatible(gene.trans[0], reads["reads1"])
is_isoform2 = check_reads_compatible(gene.trans[1], reads["reads1"])
if len(reads["reads2"]) > 0:
is_isoform1 *= check_reads_compatible(gene.trans[0], reads["reads2"])
is_isoform2 *= check_reads_compatible(gene.trans[1], reads["reads2"])
is_isoform1 = np.append(is_isoform1,
check_reads_compatible(gene.trans[0], reads["reads1u"]))
is_isoform2 = np.append(is_isoform2,
check_reads_compatible(gene.trans[1], reads["reads1u"]))
is_isoform1 = np.append(is_isoform1,
check_reads_compatible(gene.trans[0], reads["reads2u"]))
is_isoform2 = np.append(is_isoform2,
check_reads_compatible(gene.trans[1], reads["reads2u"]))
# return Reads matrix
Rmat = np.zeros((len(is_isoform1), 2), dtype=bool)
Rmat[:, 0] = is_isoform1
Rmat[:, 1] = is_isoform2
return Rmat
def SE_probability(gene, rlen=75, edge_hang=10, junc_hang=2):
"""Get read categorical probability of each isoform.
In exon-skipping (SE) event, there are two isoform:
isoform1 for exon inclusion and isoform2 for exon exclusion.
Here, we only treat single-end reads. For paired-end reads,
we treat it as the single-end by only using the most informative
mate, namely the mate mapped to least number of isoform(s).
isoform1: l1 + l2 + l3 + rlen - 2 * edge_hang
p1: l2 + rlen - 2 * junc_hang
p3: l1 + l3 - 2 * edge_hang + 2 * junc_hang
isoform2: l1 + l3 + rlen - 2 * edge_hang
p1: rlen - 2 * junc_hang
p3: l1 + l3 - 2 * edge_hang + 2 * junc_hang
"""
# check SE event
if _check_SE_event(gene) == False:
print("This is not exon-skipping event: %s! %(gene.geneID)")
exit()
l1, l2, l3 = gene.trans[0].exons[:, 1] - gene.trans[0].exons[:, 0]
prob_mat = np.zeros((2, 3))
# Isoform 1
len_isoform1 = l1 + l2 + l3 + rlen - 2 * edge_hang
prob_mat[0, 0] = (l2 + rlen - 2 * junc_hang) / len_isoform1
prob_mat[0, 2] = (l1 + l3 - 2 * edge_hang + 2 * junc_hang) / len_isoform1
# Isoform 2
len_isoform2 = l1 + l3 + rlen - 2 * edge_hang
prob_mat[1, 1] = (rlen - 2 * junc_hang) / len_isoform2
prob_mat[1, 2] = (l1 + l3 - 2 * edge_hang + 2 * junc_hang) / len_isoform2
return prob_mat
def SE_effLen(gene, rlen=75, edge_hang=10, junc_hang=2):
"""Get effective length matrix for three read categories from two isoforms.
In exon-skipping (SE) event, there are two isoform:
isoform1 for exon inclusion and isoform2 for exon exclusion.
and three read groups:
group1: uniquely from isoform1
group2: uniquely from isoform2
group3: ambiguous identity
Here, we only treat single-end reads. For paired-end reads,
we treat it as the single-end by only using the most informative
mate, namely the mate mapped to least number of isoform(s).
isoform1: l1 + l2 + l3 + rlen - 2 * edge_hang
read group1: l2 + rlen - 2 * junc_hang
read group3: l1 + l3 - 2 * edge_hang + 2 * junc_hang
isoform2: l1 + l3 + rlen - 2 * edge_hang
read group2: rlen - 2 * junc_hang
read group3: l1 + l3 - 2 * edge_hang + 2 * junc_hang
"""
# check SE event
if _check_SE_event(gene) == False:
print("This is not exon-skipping event: %s! %(gene.geneID)")
exit()
l1, l2, l3 = gene.trans[0].exons[:, 1] - gene.trans[0].exons[:, 0]
isoLen_mat = np.zeros((2, 3))
# isoform length
len_isoform1 = l1 + l2 + l3 + rlen - 2 * edge_hang
len_isoform2 = l1 + l3 + rlen - 2 * edge_hang
# segments
isoLen_mat[0, 0] = l2 + rlen - 2 * junc_hang
isoLen_mat[1, 1] = rlen - 2 * junc_hang
isoLen_mat[0, 2] = l1 + l3 - 2 * edge_hang + 2 * junc_hang
isoLen_mat[1, 2] = l1 + l3 - 2 * edge_hang + 2 * junc_hang
# prob_mat = isoLen_mat / isoLen_mat.sum(1, keepdims=True)
return isoLen_mat
| 35.354067 | 79 | 0.603194 |
6c5b5d2beb7892b3713dc1291924921532e74885 | 1,795 | py | Python | encommon/tests/test_times.py | enasisnetwork/encommon-py | c2bb1412171c84fe2917a23b535a6db1b5f523c1 | [
"MIT"
] | null | null | null | encommon/tests/test_times.py | enasisnetwork/encommon-py | c2bb1412171c84fe2917a23b535a6db1b5f523c1 | [
"MIT"
] | null | null | null | encommon/tests/test_times.py | enasisnetwork/encommon-py | c2bb1412171c84fe2917a23b535a6db1b5f523c1 | [
"MIT"
] | null | null | null | #==============================================================================#
# Enasis Network Common Libraries #
# Python Functions Time Processing #
#==============================================================================#
# Primary Functions for Time Processing #
# : - - - - - - - - - - - - - - - - - - -- - - - - - - - - - - - - - - - - - - #
# : Standard Time Converting timeformat #
#==============================================================================#
#------------------------------------------------------------------------------#
# Primary Functions for Time Processing #
#------------------------------------------------------------------------------#
#
#~~ Standard Time Converting ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Conditionally perform the conversions to and from epoch and timestamp string
#-----------------------------------------------------------------------------
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
#------------------------------------------------------------------------------#
| 54.393939 | 80 | 0.325905 |
6c5bb6b2d92f0865bef01adbf1214af8685dd82e | 2,661 | py | Python | source/dashboard.py | R0htg0r/Automatic-comments-for-Instagram- | 0a4e02d45f02be1462fb44fc6ebf5c8eb11fbd04 | [
"Apache-2.0"
] | 3 | 2021-04-03T19:39:03.000Z | 2021-04-06T13:03:43.000Z | source/dashboard.py | R0htg0r/Automatic-comments-for-Instagram- | 0a4e02d45f02be1462fb44fc6ebf5c8eb11fbd04 | [
"Apache-2.0"
] | null | null | null | source/dashboard.py | R0htg0r/Automatic-comments-for-Instagram- | 0a4e02d45f02be1462fb44fc6ebf5c8eb11fbd04 | [
"Apache-2.0"
] | null | null | null | from colorama import Fore, Back, Style, init
import pyautogui
import time
import os
os.system("mode 120, 30")
Poxtrop()
| 42.238095 | 114 | 0.311161 |
6c5c369d85c41ace1c62ddc67471055b462a3df1 | 1,527 | py | Python | ledshimdemo/display_options.py | RatJuggler/led-shim-effects | 3c63f5f2ce3f35f52e784489deb9212757c18cd2 | [
"MIT"
] | 1 | 2021-04-17T16:18:14.000Z | 2021-04-17T16:18:14.000Z | ledshimdemo/display_options.py | RatJuggler/led-shim-effects | 3c63f5f2ce3f35f52e784489deb9212757c18cd2 | [
"MIT"
] | 12 | 2019-07-26T18:01:56.000Z | 2019-08-31T15:35:17.000Z | ledshimdemo/display_options.py | RatJuggler/led-shim-demo | 3c63f5f2ce3f35f52e784489deb9212757c18cd2 | [
"MIT"
] | null | null | null | import click
from typing import Callable, List
from .effect_parade import AbstractEffectParade
DISPLAY_OPTIONS = [
click.option('-p', '--parade', type=click.Choice(AbstractEffectParade.get_parade_options()),
help="How the effects are displayed.", default=AbstractEffectParade.get_default_option(),
show_default=True),
click.option('-d', '--duration', type=click.IntRange(1, 180),
help="How long to display each effect for, in seconds (1-180).", default=10, show_default=True),
click.option('-r', '--repeat', type=click.IntRange(1, 240),
help="How many times to run the effects before stopping (1-240).", default=1, show_default=True),
click.option('-b', '--brightness', type=click.IntRange(1, 10),
help="How bright the effects will be (1-10).", default=8, show_default=True),
click.option('-i', '--invert', is_flag=True, help="Change the display orientation.")
]
def add_options(options: List[click.option]) -> Callable:
"""
Create a decorator to apply Click options to a function.
:param options: Click options to be applied
:return: Decorator function
"""
def _add_options(func: Callable):
"""
Apply click options to the supplied function.
:param func: To add click options to.
:return: The function with the click options added.
"""
for option in reversed(options):
func = option(func)
return func
return _add_options
| 41.27027 | 114 | 0.64833 |
6c5e382a6852be827146dfca1422cff18cd4ad2e | 587 | py | Python | download_data_folder.py | MelvinYin/Defined_Proteins | 75da20be82a47d85d27176db29580ab87d52b670 | [
"BSD-3-Clause"
] | 2 | 2021-01-05T02:55:57.000Z | 2021-04-16T15:49:08.000Z | download_data_folder.py | MelvinYin/Defined_Proteins | 75da20be82a47d85d27176db29580ab87d52b670 | [
"BSD-3-Clause"
] | null | null | null | download_data_folder.py | MelvinYin/Defined_Proteins | 75da20be82a47d85d27176db29580ab87d52b670 | [
"BSD-3-Clause"
] | 1 | 2021-01-05T08:12:38.000Z | 2021-01-05T08:12:38.000Z | import boto3
import os
import tarfile
if __name__ == "__main__":
s3 = boto3.client('s3', aws_access_key_id="AKIAY6UR252SQUQ3OSWZ",
aws_secret_access_key="08LQj"
"+ryk9SMojG18vERXKKzhNSYk5pLhAjrIAVX")
output_path = "./data.tar.gz"
with open(output_path, 'wb') as f:
s3.download_fileobj('definedproteins', "data.tar.gz", f)
assert os.path.isfile(output_path)
print("Download succeeded")
tar = tarfile.open(output_path, "r:gz")
tar.extractall()
tar.close()
os.remove(output_path) | 34.529412 | 82 | 0.626917 |
6c600ba2b9e8dfbbc98654347a117e7d18a03ded | 8,247 | py | Python | splotch/utils_visium.py | adaly/cSplotch | c79a5cbd155f2cd5bcc1d8b04b1824923feb1442 | [
"BSD-3-Clause"
] | 1 | 2021-12-20T16:13:16.000Z | 2021-12-20T16:13:16.000Z | splotch/utils_visium.py | adaly/cSplotch | c79a5cbd155f2cd5bcc1d8b04b1824923feb1442 | [
"BSD-3-Clause"
] | null | null | null | splotch/utils_visium.py | adaly/cSplotch | c79a5cbd155f2cd5bcc1d8b04b1824923feb1442 | [
"BSD-3-Clause"
] | null | null | null | import os, sys
import logging
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from scipy.ndimage import label
from .utils import watershed_tissue_sections, get_spot_adjacency_matrix
# Read in a series of Loupe annotation files and return the set of all unique categories.
# NOTE: "Undefined"
# Annotataion matrix from Loupe annotation file
# Converts from pseudo-hex indexing of Visium (in which xdim is doubled and odd-indexed)
# rows are offset by 1) to standard array indexing with odd rows implicitly shifted.
# Converts from pseudo-hex indexing of Visium (in which xdim is doubled and odd-indexed)
# rows are offset by 1) to Cartesian coordinates where neighbors are separated by unit distance.
''' Determines connected components by recursively checking neighbors in a hex grid.
bin_oddr_matrix - binary odd-right indexed matrix where 1 indicates annotated spot.
'''
''' Analog of detect_tissue_sections for hexagonally packed ST grids (Visium)
'''
''' Create a boolean vector indicating which spots from the coordinate list belong to the
tissue section being considered (tissue_idx, spots_tissue_section_labeled obtained by
connected component analysis in detect_tissue_sections_hex).
'''
''' Return spot adjacency matrix given a list of coordinates in pseudo-hex:
'''
from scipy.ndimage.measurements import label
from splotch.utils import read_array, filter_arrays, detect_tissue_sections, get_tissue_section_spots
import glob
if __name__ == "__main__":
annot_files = glob.glob('../data/Visium_test/*.csv')
aars = unique_annots_loupe(annot_files)
loupe_file = '../data/Visium_test/V014-CGND-MA-00765-A_loupe_AARs.csv'
position_file = '../data/Visium_test/V014-CGND-MA-00765-A/outs/spatial/tissue_positions_list.csv'
annot_frame = read_annot_matrix_loupe(loupe_file, position_file, aars)
array_coordinates_float = np.array([list(map(float, c.split("_"))) for c in annot_frame.columns.values])
unique_labels, spots_labeled = detect_tissue_sections_hex(array_coordinates_float, True, 600)
plt.figure()
plt.imshow(spots_labeled)
plt.show()
for tissue_idx in unique_labels:
tissue_section_spots = get_tissue_section_spots_hex(tissue_idx,array_coordinates_float,
spots_labeled)
tissue_section_coordinates_float = array_coordinates_float[tissue_section_spots]
tissue_section_coordinates_string = ["%.2f_%.2f" % (c[0],c[1]) for c in tissue_section_coordinates_float]
tissue_section_W = get_spot_adjacency_matrix_hex(tissue_section_coordinates_float)
print(np.sum(tissue_section_W))
df = pd.DataFrame(tissue_section_W, index=tissue_section_coordinates_string,
columns=tissue_section_coordinates_string)
| 36.0131 | 122 | 0.751789 |
6c609ad8257f94c3be0be69725b48962c792c7f1 | 1,729 | py | Python | floa/routes.py | rsutton/loa | 31ca8cc3f7be011b21f22ed2ce509d135a4b866b | [
"MIT"
] | null | null | null | floa/routes.py | rsutton/loa | 31ca8cc3f7be011b21f22ed2ce509d135a4b866b | [
"MIT"
] | null | null | null | floa/routes.py | rsutton/loa | 31ca8cc3f7be011b21f22ed2ce509d135a4b866b | [
"MIT"
] | null | null | null | from flask import (
Blueprint,
render_template,
request,
session,
current_app as app
)
from flask_login import current_user
from floa.extensions import loa
from floa.models.library import Library
bp = Blueprint(
name='home',
import_name=__name__,
url_prefix="/"
)
| 24.7 | 79 | 0.657606 |
6c619fe8bbdf105e5a1586be4e70bb3d3697916a | 3,496 | py | Python | api/async/__init__.py | lampwins/orangengine-ui | 8c864cd297176aa0ff9ead9682f2085f9fd3f1c0 | [
"MIT"
] | 1 | 2017-10-28T00:21:43.000Z | 2017-10-28T00:21:43.000Z | api/async/__init__.py | lampwins/orangengine-ui | 8c864cd297176aa0ff9ead9682f2085f9fd3f1c0 | [
"MIT"
] | null | null | null | api/async/__init__.py | lampwins/orangengine-ui | 8c864cd297176aa0ff9ead9682f2085f9fd3f1c0 | [
"MIT"
] | 4 | 2017-01-26T23:31:32.000Z | 2019-04-17T14:02:00.000Z |
import logging
import orangengine
from api.models import Device as DeviceModel
from celery.utils.log import get_task_logger
from api import debug
celery_logger = get_task_logger(__name__)
if debug:
celery_logger.setLevel(logging.DEBUG)
celery_logger.debug('Enabled Debug mode')
| 33.615385 | 92 | 0.663043 |
6c62a1650704041514fc09b42720dad2d27e5799 | 753 | py | Python | app1/migrations/0060_auto_20201222_2131.py | vashuteotia123/zbcvit | da29b3281ccc87481a264b63c5b6c3a549945f33 | [
"MIT"
] | 6 | 2021-09-16T16:46:56.000Z | 2022-02-06T13:00:08.000Z | app1/migrations/0060_auto_20201222_2131.py | vashuteotia123/zbcvit | da29b3281ccc87481a264b63c5b6c3a549945f33 | [
"MIT"
] | null | null | null | app1/migrations/0060_auto_20201222_2131.py | vashuteotia123/zbcvit | da29b3281ccc87481a264b63c5b6c3a549945f33 | [
"MIT"
] | 1 | 2021-09-14T09:26:58.000Z | 2021-09-14T09:26:58.000Z | # Generated by Django 2.2.7 on 2020-12-22 16:01
import datetime
from django.db import migrations, models
| 25.965517 | 100 | 0.589641 |
6c63b62274efc319d7d5ff5ab63d36ad70596229 | 240 | py | Python | stacks/tests/test_decode_string.py | ahcode0919/python-ds-algorithms | 0d617b78c50b6c18da40d9fa101438749bfc82e1 | [
"MIT"
] | null | null | null | stacks/tests/test_decode_string.py | ahcode0919/python-ds-algorithms | 0d617b78c50b6c18da40d9fa101438749bfc82e1 | [
"MIT"
] | null | null | null | stacks/tests/test_decode_string.py | ahcode0919/python-ds-algorithms | 0d617b78c50b6c18da40d9fa101438749bfc82e1 | [
"MIT"
] | 3 | 2020-10-07T20:24:45.000Z | 2020-12-16T04:53:19.000Z | from stacks.decode_string import decode_string
| 30 | 61 | 0.7 |
6c65225c18ab757299cb8993ab36ee8beae952c4 | 239 | py | Python | receiver_udp.py | pabitra0177/ITR-internship | 3d1909b9e4a1b980ad4f6cb4b8c1fb17811c2d75 | [
"MIT"
] | null | null | null | receiver_udp.py | pabitra0177/ITR-internship | 3d1909b9e4a1b980ad4f6cb4b8c1fb17811c2d75 | [
"MIT"
] | null | null | null | receiver_udp.py | pabitra0177/ITR-internship | 3d1909b9e4a1b980ad4f6cb4b8c1fb17811c2d75 | [
"MIT"
] | null | null | null | #
import socket
ip = "127.0.0.1"
port = 5001
s = socket.socket(socket.AF_INET,socket.SOCK_DGRAM)
s.bind((ip,port))
i=0
while True:
data, addr = s.recvfrom(1024)
print "Received from ",addr
print "Received ",data
s.close()
| 14.058824 | 51 | 0.656904 |
6c6938ad771712cddf43056b1ad20a6d5a62ca66 | 4,240 | py | Python | yolov3/utils/checkpoint.py | hysts/pytorch_yolov3 | 6d4c7a1e42d366894effac8ca52f7116f891b5ab | [
"MIT"
] | 13 | 2019-03-22T15:22:22.000Z | 2021-09-30T21:15:37.000Z | yolov3/utils/checkpoint.py | hysts/pytorch_yolov3 | 6d4c7a1e42d366894effac8ca52f7116f891b5ab | [
"MIT"
] | null | null | null | yolov3/utils/checkpoint.py | hysts/pytorch_yolov3 | 6d4c7a1e42d366894effac8ca52f7116f891b5ab | [
"MIT"
] | null | null | null | import copy
import logging
import pathlib
import torch
import torch.nn as nn
from yolov3.config import get_default_config
from yolov3.utils.config_node import ConfigNode
| 36.551724 | 79 | 0.624764 |
6c6a82e95bf8ebf0eb518403b616adac59f096b0 | 505 | py | Python | autograd/tests/test_z_playground.py | pmaederyork/Dragrongrad | 32794d561f8d0273592ed55d315013eab2c24b8b | [
"MIT"
] | 3 | 2018-12-17T16:24:11.000Z | 2020-06-03T22:40:50.000Z | autograd/tests/test_z_playground.py | cs207-project-group4/project-repo | d5ee88d2a7d16477d816d830ba90d241a05e3b48 | [
"MIT"
] | 2 | 2018-10-18T17:59:26.000Z | 2018-12-08T16:06:34.000Z | autograd/tests/test_z_playground.py | cs207-project-group4/project-repo | d5ee88d2a7d16477d816d830ba90d241a05e3b48 | [
"MIT"
] | 1 | 2019-08-19T06:06:13.000Z | 2019-08-19T06:06:13.000Z | # -*- coding: utf-8 -*-
from autograd.blocks.trigo import sin, cos
from autograd.variable import Variable
import numpy as np
import autograd as ad
from autograd import config
t=sub(2)
t.parent()
| 15.78125 | 42 | 0.552475 |
6c6a90b147afe488a76460582fd0b95042612fc0 | 135 | py | Python | PySpace/using_sys.py | dralee/LearningRepository | 4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4 | [
"Apache-2.0"
] | null | null | null | PySpace/using_sys.py | dralee/LearningRepository | 4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4 | [
"Apache-2.0"
] | null | null | null | PySpace/using_sys.py | dralee/LearningRepository | 4324d3c5ac1a12dde17ae70c1eb7f3d36a047ba4 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python3
# using_sys.py
import sys
print('')
for i in sys.argv:
print(i)
print('\n\nPython',sys.path,'\n') | 15 | 37 | 0.674074 |
6c6f498aea5f5f14a181bf4e682dea6414249ebe | 1,749 | py | Python | gaussian_filter.py | baiching/Paper-Implementations | 56136a88a64885270adbefd6999815a1ad6f56a2 | [
"MIT"
] | null | null | null | gaussian_filter.py | baiching/Paper-Implementations | 56136a88a64885270adbefd6999815a1ad6f56a2 | [
"MIT"
] | null | null | null | gaussian_filter.py | baiching/Paper-Implementations | 56136a88a64885270adbefd6999815a1ad6f56a2 | [
"MIT"
] | null | null | null | import math
import numbers
import torch
from torch import nn
from torch.nn import functional as F
def gaussian_filter(in_channel, out_channel, kernel_size=15, sigma=3):
"""
This method returns 2d gaussian filter
input :
in_channel : Number of input channels
out_channel : Expected number of output channels
kernel_size : size of the filter (H x H)
sigma : sigma
output:
returns : gaussian_filter
"""
# Create a x, y coordinate grid of shape (kernel_size, kernel_size, 2)
x_cord = torch.arange(kernel_size)
x_grid = x_cord.repeat(kernel_size).view(kernel_size, kernel_size)
y_grid = x_grid.t()
xy_grid = torch.stack([x_grid, y_grid], dim=-1)
mean = (kernel_size - 1)/2.
variance = sigma**2.
# Calculate the 2-dimensional gaussian kernel which is
# the product of two gaussian distributions for two different
# variables (in this case called x and y)
gaussian_kernel = (1./(2.*math.pi*variance)) *\
torch.exp(
-torch.sum((xy_grid - mean)**2., dim=-1) /\
(2*variance)
)
# Make sure sum of values in gaussian kernel equals 1.
gaussian_kernel = gaussian_kernel / torch.sum(gaussian_kernel)
# Reshape to 2d depthwise convolutional weight
gaussian_kernel = gaussian_kernel.view(1, 1, kernel_size, kernel_size)
gaussian_kernel = gaussian_kernel.repeat(in_channel, 1, 1, 1)
gaussian_filter = nn.Conv2d(in_channels=in_channel, out_channels=out_channel,
kernel_size=kernel_size, groups=in_channel, bias=False)
gaussian_filter.weight.data = gaussian_kernel
gaussian_filter.weight.requires_grad = False
return gaussian_filter | 34.98 | 83 | 0.675815 |
6c6ff29fbade9a404f47dd54164a91e8e0704f4b | 664 | py | Python | opfu/stock.py | XavierDingRotman/OptionsFutures | bab0de0d66efe39f05e9ddf59460ec76547d9ada | [
"Apache-2.0"
] | 1 | 2020-07-05T20:54:15.000Z | 2020-07-05T20:54:15.000Z | opfu/stock.py | XavierDingRotman/OptionsFutures | bab0de0d66efe39f05e9ddf59460ec76547d9ada | [
"Apache-2.0"
] | null | null | null | opfu/stock.py | XavierDingRotman/OptionsFutures | bab0de0d66efe39f05e9ddf59460ec76547d9ada | [
"Apache-2.0"
] | null | null | null | from opfu.security import Security
| 23.714286 | 55 | 0.554217 |
6c7066dd2f2223bc38f4edca28dbdaad3e0c39bc | 172 | py | Python | ABC103/ABC103a.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | ABC103/ABC103a.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | ABC103/ABC103a.py | VolgaKurvar/AtCoder | 21acb489f1594bbb1cdc64fbf8421d876b5b476d | [
"Unlicense"
] | null | null | null | # ABC103a
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10**6)
a = list(map(int, input().split()))
a.sort(reverse=True)
print(abs(a[1]-a[0])+abs(a[1]-a[2]))
| 19.111111 | 36 | 0.674419 |
6c707e42c5437ebc563efca0ace739aedca17496 | 283 | py | Python | awsthreatprep/config.py | cclauss/ThreatPrep | b1881be239e7b86d86acc70a207989d459bd9d79 | [
"MIT"
] | 50 | 2016-08-05T03:33:00.000Z | 2022-02-16T13:52:15.000Z | awsthreatprep/config.py | cclauss/ThreatPrep | b1881be239e7b86d86acc70a207989d459bd9d79 | [
"MIT"
] | null | null | null | awsthreatprep/config.py | cclauss/ThreatPrep | b1881be239e7b86d86acc70a207989d459bd9d79 | [
"MIT"
] | 14 | 2017-06-26T02:54:43.000Z | 2021-11-17T07:38:52.000Z | import os
config = {
#iam
'ACCOUNT_INACTIVE_DAYS': 30, #Accounts are inactive if not used for 30 days
'PASSWORD_ROTATION_DAYS': 90, #Paswords should be rotated every 90 days
'ACCESS_KEY_ROTATION_DAYS': 90 #Access Keys should be rotated every 90 days
}
| 31.444444 | 85 | 0.696113 |
6c72586f407f6e08ecae9c71f47245060e33b3dd | 28,356 | py | Python | widgets/RichTextCtrl.py | iubica/wx-portfolio | 12101986db72bcaffd9b744d514d6f9f651ad5a1 | [
"MIT"
] | 3 | 2018-03-19T07:57:10.000Z | 2021-07-05T08:55:14.000Z | widgets/RichTextCtrl.py | iubica/wx-portfolio | 12101986db72bcaffd9b744d514d6f9f651ad5a1 | [
"MIT"
] | 6 | 2020-03-24T15:40:18.000Z | 2021-12-13T19:46:09.000Z | widgets/RichTextCtrl.py | iubica/wx-portfolio | 12101986db72bcaffd9b744d514d6f9f651ad5a1 | [
"MIT"
] | 4 | 2018-03-29T21:59:55.000Z | 2019-12-16T14:56:38.000Z | #!/usr/bin/env python
from six import BytesIO
import wx
import wx.richtext as rt
import images
#----------------------------------------------------------------------
#----------------------------------------------------------------------
#----------------------------------------------------------------------
def runTest(frame, nb, log):
win = TestPanel(nb, log)
return win
#----------------------------------------------------------------------
overview = """<html><body>
<h2><center>wx.richtext.RichTextCtrl</center></h2>
</body></html>
"""
if __name__ == '__main__':
import sys,os
import run
run.main(['', os.path.basename(sys.argv[0])] + sys.argv[1:])
| 38.684857 | 302 | 0.608513 |
6c74c309dcd00dafc4c1aae00a0c378fd733102d | 1,105 | py | Python | src/user/models.py | fga-gpp-mds/2017.2-Grupo12 | a90f94d0d497f625ab82ef44a907561f3bfa835f | [
"MIT"
] | 6 | 2017-10-02T12:07:40.000Z | 2017-12-14T11:40:07.000Z | src/user/models.py | fga-gpp-mds/2017.2-Grupo12 | a90f94d0d497f625ab82ef44a907561f3bfa835f | [
"MIT"
] | 92 | 2017-09-30T19:14:21.000Z | 2017-12-14T04:41:16.000Z | src/user/models.py | fga-gpp-mds/2017.2-Grupo12 | a90f94d0d497f625ab82ef44a907561f3bfa835f | [
"MIT"
] | 3 | 2017-09-06T00:49:38.000Z | 2018-07-13T00:32:37.000Z | from django.db import models
from django.contrib.auth.models import User
| 29.078947 | 78 | 0.673303 |
6c768de90390e5fd0ea2640bab37a8869d234309 | 1,784 | py | Python | lcd/nodemcu_gpio_lcd_test.py | petrkr/python_lcd | 92e5d0211e5cef4dcc9078905f4bd53dc2cc78b4 | [
"MIT"
] | 237 | 2015-07-19T21:33:01.000Z | 2022-03-30T00:19:46.000Z | lcd/nodemcu_gpio_lcd_test.py | petrkr/python_lcd | 92e5d0211e5cef4dcc9078905f4bd53dc2cc78b4 | [
"MIT"
] | 25 | 2015-07-19T20:44:31.000Z | 2022-01-26T10:42:07.000Z | lcd/nodemcu_gpio_lcd_test.py | petrkr/python_lcd | 92e5d0211e5cef4dcc9078905f4bd53dc2cc78b4 | [
"MIT"
] | 107 | 2015-09-05T12:54:55.000Z | 2022-03-28T15:36:13.000Z | """Implements a HD44780 character LCD connected via NodeMCU GPIO pins."""
from machine import Pin
from utime import sleep, ticks_ms
from nodemcu_gpio_lcd import GpioLcd
# Wiring used for this example:
#
# 1 - Vss (aka Ground) - Connect to one of the ground pins on you NodeMCU board.
# 2 - VDD - Connect to 3V
# 3 - VE (Contrast voltage) - I'll discuss this below
# 4 - RS (Register Select) connect to D0 (as per call to GpioLcd)
# 5 - RW (Read/Write) - connect to ground
# 6 - EN (Enable) connect to D1 (as per call to GpioLcd)
# 7 - D0 - leave unconnected
# 8 - D1 - leave unconnected
# 9 - D2 - leave unconnected
# 10 - D3 - leave unconnected
# 11 - D4 - connect to D2 (as per call to GpioLcd)
# 12 - D5 - connect to D3 (as per call to GpioLcd)
# 13 - D6 - connect to D4 (as per call to GpioLcd)
# 14 - D7 - connect to D5 (as per call to GpioLcd)
# 15 - A (BackLight Anode) - Connect to 3V
# 16 - K (Backlight Cathode) - Connect to Ground
#
# On 14-pin LCDs, there is no backlight, so pins 15 & 16 don't exist.
#
# The Contrast line (pin 3) typically connects to the center tap of a
# 10K potentiometer, and the other 2 legs of the 10K potentiometer are
# connected to pins 1 and 2 (Ground and VDD)
def test_main():
"""Test function for verifying basic functionality."""
print("Running test_main")
lcd = GpioLcd(rs_pin=Pin(16),
enable_pin=Pin(5),
d4_pin=Pin(4),
d5_pin=Pin(0),
d6_pin=Pin(2),
d7_pin=Pin(14),
num_lines=2, num_columns=20)
lcd.putstr("It Works!\nSecond Line")
sleep(3)
lcd.clear()
count = 0
while True:
lcd.move_to(0, 0)
lcd.putstr("%7d" % (ticks_ms() // 1000))
sleep(1)
count += 1
| 34.307692 | 81 | 0.627242 |
6c77d3d22c710ab0e8e3582be4b79df9edb68531 | 11,579 | py | Python | apps/life_sci/examples/reaction_prediction/rexgen_direct/utils.py | LunaBlack/dgl | bd1e48a51e348b0e8e25622325adeb5ddea1c0ea | [
"Apache-2.0"
] | 2 | 2021-12-09T12:36:13.000Z | 2022-03-01T21:22:36.000Z | apps/life_sci/examples/reaction_prediction/rexgen_direct/utils.py | sherry-1001/dgl | 60d2e7d3c928d43bbb18e7ab17c066451c49f649 | [
"Apache-2.0"
] | null | null | null | apps/life_sci/examples/reaction_prediction/rexgen_direct/utils.py | sherry-1001/dgl | 60d2e7d3c928d43bbb18e7ab17c066451c49f649 | [
"Apache-2.0"
] | 2 | 2020-12-07T09:34:01.000Z | 2020-12-13T06:18:58.000Z | import dgl
import errno
import numpy as np
import os
import random
import torch
from collections import defaultdict
from rdkit import Chem
def mkdir_p(path):
"""Create a folder for the given path.
Parameters
----------
path: str
Folder to create
"""
try:
os.makedirs(path)
print('Created directory {}'.format(path))
except OSError as exc:
if exc.errno == errno.EEXIST and os.path.isdir(path):
print('Directory {} already exists.'.format(path))
else:
raise
def setup(args, seed=0):
"""Setup for the experiment:
1. Decide whether to use CPU or GPU for training
2. Fix random seed for python, NumPy and PyTorch.
Parameters
----------
seed : int
Random seed to use.
Returns
-------
args
Updated configuration
"""
assert args['max_k'] >= max(args['top_ks']), \
'Expect max_k to be no smaller than the possible options ' \
'of top_ks, got {:d} and {:d}'.format(args['max_k'], max(args['top_ks']))
if torch.cuda.is_available():
args['device'] = 'cuda:0'
else:
args['device'] = 'cpu'
# Set random seed
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
mkdir_p(args['result_path'])
return args
def collate(data):
"""Collate multiple datapoints
Parameters
----------
data : list of 7-tuples
Each tuple is for a single datapoint, consisting of
a reaction, graph edits in the reaction, an RDKit molecule instance for all reactants,
a DGLGraph for all reactants, a complete graph for all reactants, the features for each
pair of atoms and the labels for each pair of atoms.
Returns
-------
reactions : list of str
List of reactions.
graph_edits : list of str
List of graph edits in the reactions.
mols : list of rdkit.Chem.rdchem.Mol
List of RDKit molecule instances for the reactants.
batch_mol_graphs : DGLGraph
DGLGraph for a batch of molecular graphs.
batch_complete_graphs : DGLGraph
DGLGraph for a batch of complete graphs.
batch_atom_pair_labels : float32 tensor of shape (V, 10)
Labels of atom pairs in the batch of graphs.
"""
reactions, graph_edits, mols, mol_graphs, complete_graphs, \
atom_pair_feats, atom_pair_labels = map(list, zip(*data))
batch_mol_graphs = dgl.batch(mol_graphs)
batch_mol_graphs.set_n_initializer(dgl.init.zero_initializer)
batch_mol_graphs.set_e_initializer(dgl.init.zero_initializer)
batch_complete_graphs = dgl.batch(complete_graphs)
batch_complete_graphs.set_n_initializer(dgl.init.zero_initializer)
batch_complete_graphs.set_e_initializer(dgl.init.zero_initializer)
batch_complete_graphs.edata['feats'] = torch.cat(atom_pair_feats, dim=0)
batch_atom_pair_labels = torch.cat(atom_pair_labels, dim=0)
return reactions, graph_edits, mols, batch_mol_graphs, \
batch_complete_graphs, batch_atom_pair_labels
def reaction_center_prediction(device, model, mol_graphs, complete_graphs):
"""Perform a soft prediction on reaction center.
Parameters
----------
device : str
Device to use for computation, e.g. 'cpu', 'cuda:0'
model : nn.Module
Model for prediction.
mol_graphs : DGLGraph
DGLGraph for a batch of molecular graphs
complete_graphs : DGLGraph
DGLGraph for a batch of complete graphs
Returns
-------
scores : float32 tensor of shape (E_full, 5)
Predicted scores for each pair of atoms to perform one of the following
5 actions in reaction:
* The bond between them gets broken
* Forming a single bond
* Forming a double bond
* Forming a triple bond
* Forming an aromatic bond
biased_scores : float32 tensor of shape (E_full, 5)
Comparing to scores, a bias is added if the pair is for a same atom.
"""
node_feats = mol_graphs.ndata.pop('hv').to(device)
edge_feats = mol_graphs.edata.pop('he').to(device)
node_pair_feats = complete_graphs.edata.pop('feats').to(device)
return model(mol_graphs, complete_graphs, node_feats, edge_feats, node_pair_feats)
def rough_eval_on_a_loader(args, model, data_loader):
"""A rough evaluation of model performance in the middle of training.
For final evaluation, we will eliminate some possibilities based on prior knowledge.
Parameters
----------
args : dict
Configurations fot the experiment.
model : nn.Module
Model for reaction center prediction.
data_loader : torch.utils.data.DataLoader
Loader for fetching and batching data.
Returns
-------
str
Message for evluation result.
"""
model.eval()
num_correct = {k: [] for k in args['top_ks']}
for batch_id, batch_data in enumerate(data_loader):
batch_reactions, batch_graph_edits, batch_mols, batch_mol_graphs, \
batch_complete_graphs, batch_atom_pair_labels = batch_data
with torch.no_grad():
pred, biased_pred = reaction_center_prediction(
args['device'], model, batch_mol_graphs, batch_complete_graphs)
rough_eval(batch_complete_graphs, biased_pred, batch_atom_pair_labels, num_correct)
msg = '|'
for k, correct_count in num_correct.items():
msg += ' acc@{:d} {:.4f} |'.format(k, np.mean(correct_count))
return msg
def eval(complete_graphs, preds, reactions, graph_edits, num_correct, max_k, easy):
"""Evaluate top-k accuracies for reaction center prediction.
Parameters
----------
complete_graphs : DGLGraph
DGLGraph for a batch of complete graphs
preds : float32 tensor of shape (E_full, 5)
Soft predictions for reaction center, E_full being the number of possible
atom-pairs and 5 being the number of possible bond changes
reactions : list of str
List of reactions.
graph_edits : list of str
List of graph edits in the reactions.
num_correct : dict
Counting the number of datapoints for meeting top-k accuracies.
max_k : int
Maximum number of atom pairs to be selected. This is intended to be larger
than max(num_correct.keys()) as we will filter out many atom pairs due to
considerations such as avoiding duplicates.
easy : bool
If True, reactants not contributing atoms to the product will be excluded in
top-k atom pair selection, which will make the task easier.
"""
# 0 for losing the bond
# 1, 2, 3, 1.5 separately for forming a single, double, triple or aromatic bond.
bond_change_to_id = {0.0: 0, 1:1, 2:2, 3:3, 1.5:4}
id_to_bond_change = {v: k for k, v in bond_change_to_id.items()}
num_change_types = len(bond_change_to_id)
batch_size = complete_graphs.batch_size
start = 0
for i in range(batch_size):
# Decide which atom-pairs will be considered.
reaction_i = reactions[i]
reaction_atoms_i = []
reaction_bonds_i = defaultdict(bool)
reactants_i, _, product_i = reaction_i.split('>')
product_mol_i = Chem.MolFromSmiles(product_i)
product_atoms_i = set([atom.GetAtomMapNum() for atom in product_mol_i.GetAtoms()])
for reactant in reactants_i.split('.'):
reactant_mol = Chem.MolFromSmiles(reactant)
reactant_atoms = [atom.GetAtomMapNum() for atom in reactant_mol.GetAtoms()]
if (len(set(reactant_atoms) & product_atoms_i) > 0) or (not easy):
reaction_atoms_i.extend(reactant_atoms)
for bond in reactant_mol.GetBonds():
end_atoms = sorted([bond.GetBeginAtom().GetAtomMapNum(),
bond.GetEndAtom().GetAtomMapNum()])
bond = tuple(end_atoms + [bond.GetBondTypeAsDouble()])
reaction_bonds_i[bond] = True
num_nodes = complete_graphs.batch_num_nodes[i]
end = start + complete_graphs.batch_num_edges[i]
preds_i = preds[start:end, :].flatten()
candidate_bonds = []
topk_values, topk_indices = torch.topk(preds_i, max_k)
for j in range(max_k):
preds_i_j = topk_indices[j].cpu().item()
# A bond change can be either losing the bond or forming a
# single, double, triple or aromatic bond
change_id = preds_i_j % num_change_types
change_type = id_to_bond_change[change_id]
pair_id = preds_i_j // num_change_types
atom1 = pair_id // num_nodes + 1
atom2 = pair_id % num_nodes + 1
# Avoid duplicates and an atom cannot form a bond with itself
if atom1 >= atom2:
continue
if atom1 not in reaction_atoms_i:
continue
if atom2 not in reaction_atoms_i:
continue
candidate = (int(atom1), int(atom2), float(change_type))
if reaction_bonds_i[candidate]:
continue
candidate_bonds.append(candidate)
gold_bonds = []
gold_edits = graph_edits[i]
for edit in gold_edits.split(';'):
atom1, atom2, change_type = edit.split('-')
atom1, atom2 = int(atom1), int(atom2)
gold_bonds.append((min(atom1, atom2), max(atom1, atom2), float(change_type)))
for k in num_correct.keys():
if set(gold_bonds) <= set(candidate_bonds[:k]):
num_correct[k] += 1
start = end
def reaction_center_final_eval(args, model, data_loader, easy):
"""Final evaluation of model performance.
args : dict
Configurations fot the experiment.
model : nn.Module
Model for reaction center prediction.
data_loader : torch.utils.data.DataLoader
Loader for fetching and batching data.
easy : bool
If True, reactants not contributing atoms to the product will be excluded in
top-k atom pair selection, which will make the task easier.
Returns
-------
msg : str
Summary of the top-k evaluation.
"""
model.eval()
num_correct = {k: 0 for k in args['top_ks']}
for batch_id, batch_data in enumerate(data_loader):
batch_reactions, batch_graph_edits, batch_mols, batch_mol_graphs, \
batch_complete_graphs, batch_atom_pair_labels = batch_data
with torch.no_grad():
pred, biased_pred = reaction_center_prediction(
args['device'], model, batch_mol_graphs, batch_complete_graphs)
eval(batch_complete_graphs, biased_pred, batch_reactions,
batch_graph_edits, num_correct, args['max_k'], easy)
msg = '|'
for k, correct_count in num_correct.items():
msg += ' acc@{:d} {:.4f} |'.format(k, correct_count / len(data_loader.dataset))
return msg
| 37.112179 | 97 | 0.648847 |
6c7908e8770d3b372e9f758cbbc3bb105b2fcb1e | 8,602 | py | Python | scripts/CMU/preprocess.py | Vidhan/allennlp | 3f360d6da2b06ecb8afe03e7802791b9c5cd74d1 | [
"Apache-2.0"
] | null | null | null | scripts/CMU/preprocess.py | Vidhan/allennlp | 3f360d6da2b06ecb8afe03e7802791b9c5cd74d1 | [
"Apache-2.0"
] | null | null | null | scripts/CMU/preprocess.py | Vidhan/allennlp | 3f360d6da2b06ecb8afe03e7802791b9c5cd74d1 | [
"Apache-2.0"
] | 1 | 2018-04-30T08:46:34.000Z | 2018-04-30T08:46:34.000Z | import json
import os
import re
import uuid
from knowledge_graph_attr import KnowledgeGraph, Dijkstra
total = 0.0
ignored = 0.0
if __name__ == "__main__":
p = Preprocessor()
path = "/Users/prasoon/Desktop/train"
files = os.listdir(path)
student_train_json = {'data': []}
bug_train_json = {'data': []}
dept_train_json = {'data': []}
meet_train_json = {'data': []}
shop_train_json = {'data': []}
student_dev_json = {'data': []}
bug_dev_json = {'data': []}
dept_dev_json = {'data': []}
meet_dev_json = {'data': []}
shop_dev_json = {'data': []}
for index, each_file in enumerate(files):
if not os.path.isfile(each_file):
print("Dir", each_file)
inner_files = os.listdir(path + "/" + each_file)
for filename in inner_files:
if not filename.endswith("with_hints"):
print("Ignored file", filename)
continue
if filename.startswith('student'):
train_json = student_train_json
elif filename.startswith('bug'):
train_json = bug_train_json
elif filename.startswith('department'):
train_json = dept_train_json
elif filename.startswith('meetings'):
train_json = meet_train_json
elif filename.startswith('shopping'):
train_json = shop_train_json
else:
print("Ignored file", filename)
continue
if len(train_json['data']) > 100:
if filename.startswith('student'):
train_json = student_dev_json
elif filename.startswith('bug'):
train_json = bug_dev_json
elif filename.startswith('department'):
train_json = dept_dev_json
elif filename.startswith('meetings'):
train_json = meet_dev_json
elif filename.startswith('shopping'):
train_json = shop_dev_json
else:
print("Ignored file", filename)
continue
if len(train_json['data']) > 20:
continue
real_path = path + "/" + each_file + "/" + filename
print("Preprocessing:", index, filename)
train_json['data'].append(p.preprocess(real_path))
path += "/"
print(ignored, "/", total)
save_json(student_train_json, path + 'final/student_train.json')
save_json(bug_train_json, path + 'final/bug_train.json')
save_json(dept_train_json, path + 'final/department_train.json')
save_json(meet_train_json, path + 'final/meetings_train.json')
save_json(shop_train_json, path + 'final/shopping_train.json')
save_json(student_dev_json, path + 'final/student_dev.json')
save_json(bug_dev_json, path + 'final/bug_dev.json')
save_json(dept_dev_json, path + 'final/department_dev.json')
save_json(meet_dev_json, path + 'final/meetings_dev.json')
save_json(shop_dev_json, path + 'final/shopping_dev.json')
train = {'data': student_train_json['data'] + bug_train_json['data'] + dept_train_json['data'] +
meet_train_json['data']}
dev = shop_train_json
save_json(train, path + 'final/train.json')
save_json(dev, path + 'final/dev.json')
| 34.546185 | 100 | 0.514299 |
6c79a93effba00b7b6196ac9c718d0c037c656b9 | 5,168 | py | Python | src/figures/violin_plot_sp_performance.py | espottesmith/hydrobench | e117774c94cff11debd764d231757174ec211e99 | [
"MIT"
] | 1 | 2022-03-16T19:19:15.000Z | 2022-03-16T19:19:15.000Z | src/figures/violin_plot_sp_performance.py | espottesmith/hydrobench | e117774c94cff11debd764d231757174ec211e99 | [
"MIT"
] | null | null | null | src/figures/violin_plot_sp_performance.py | espottesmith/hydrobench | e117774c94cff11debd764d231757174ec211e99 | [
"MIT"
] | null | null | null | import csv
import os
import difflib
import statistics
import numpy as np
import matplotlib.pyplot as plt
SMALL_SIZE = 12
MEDIUM_SIZE = 14
LARGE_SIZE = 18
plt.rc('font', size=SMALL_SIZE) # controls default text sizes
# plt.rc('title', titlesize=MEDIUM_SIZE) # fontsize of the axes title
plt.rc('axes', labelsize=LARGE_SIZE, titlesize=LARGE_SIZE) # fontsize of the x and y labels
plt.rc('xtick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
plt.rc('ytick', labelsize=MEDIUM_SIZE) # fontsize of the tick labels
base_dir = "/Users/ewcss/data/ssbt/20220211_benchmark"
methods = {"GGA": ["PBE", "PBE-D3(BJ)", "BLYP", "BLYP-D3(BJ)", "B97-D", "B97-D3", "mPW91", "mPW91-D3(BJ)", "VV10", "rVV10"],
"meta-GGA": ["M06-L", "M06-L-D3(0)", "SCAN", "SCAN-D3(BJ)", "TPSS", "TPSS-D3(BJ)", "MN12-L", "MN12-L-D3(BJ)", "B97M-rV"],
"hybrid GGA": ["PBE0", "PBE0-D3(BJ)", "B3LYP", "B3LYP-D3(BJ)", "CAM-B3LYP", "CAM-B3LYP-D3(0)", "mPW1PW91", "mPW1PW91-D3(BJ)", "wB97X", "wB97XD", "wB97XD3", "wB97XV"],
"hybrid meta-GGA": ["M06-2X", "M06-2X-D3(0)", "M06-HF", "M08-SO", "M11", "MN15", "BMK", "BMK-D3(BJ)", "TPSSh", "TPSSh-D3(BJ)", "SCAN0", "mPWB1K", "mPWB1K-D3(BJ)", "wB97M-V"]}
vac_mae = {x: dict() for x in methods}
vac_rel = {x: dict() for x in methods}
pcm_mae = {x: dict() for x in methods}
pcm_rel = {x: dict() for x in methods}
with open(os.path.join(base_dir, "abserrs_vacuum.csv")) as file:
reader = csv.reader(file)
for i, row in enumerate(reader):
if i == 0:
continue
elif row[0].lower() == "average" or "3c" in row[0].lower():
continue
funct = row[0]
# if funct == "M06-HF":
# continue
avg = float(row[-1])
for group, functs in methods.items():
if funct in functs:
vac_mae[group][funct] = avg
with open(os.path.join(base_dir, "abserrs_rel_vacuum.csv")) as file:
reader = csv.reader(file)
for i, row in enumerate(reader):
if i == 0:
continue
elif row[0].lower() == "average" or "3c" in row[0].lower():
continue
funct = row[0]
avg = float(row[-1])
# if funct == "M06-HF":
# continue
for group, functs in methods.items():
if funct in functs:
vac_rel[group][funct] = avg
# with open(os.path.join(base_dir, "abserrs_IEF-PCM.csv")) as file:
# reader = csv.reader(file)
# for i, row in enumerate(reader):
# if i == 0:
# continue
# elif row[0].lower() == "average" or "3c" in row[0].lower():
# continue
# funct = row[0]
# avg = float(row[-1])
#
# # if funct == "M06-HF":
# # continue
#
# for group, functs in methods.items():
# if funct in functs:
# pcm_mae[group][funct] = avg
#
# with open(os.path.join(base_dir, "abserrs_rel_IEF-PCM.csv")) as file:
# reader = csv.reader(file)
# for i, row in enumerate(reader):
# if i == 0:
# continue
# elif row[0].lower() == "average" or "3c" in row[0].lower():
# continue
# funct = row[0]
# avg = float(row[-1])
#
# # if funct == "M06-HF":
# # continue
#
# for group, functs in methods.items():
# if funct in functs:
# pcm_rel[group][funct] = avg
fig, axs = plt.subplots(2, 1, figsize=(14, 6), sharex=True)
for i, dset in enumerate([vac_mae, vac_rel]):
ax = axs[i]
if i == 0:
ax.set_ylabel("MAE (eV)")
else:
ax.set_ylabel("MRAE (unitless)")
xs = ["GGA", "meta-GGA", "hybrid GGA", "hybrid meta-GGA"]
avgs = list()
lowlims = list()
uplims = list()
data = list()
for group in xs:
data.append(np.array(sorted(list(dset[group].values()))))
ax.violinplot(data, [1,2,3,4], showmeans=False, showmedians=False, showextrema=False)
quartile1 = np.zeros(4)
medians = np.zeros(4)
quartile3 = np.zeros(4)
for i, d in enumerate(data):
q1, m, q3 = np.percentile(d, [25, 50, 75])
quartile1[i] = q1
medians[i] = m
quartile3[i] = q3
whiskers = np.array([adjacent_values(sorted_array, q1, q3)
for sorted_array, q1, q3 in zip(data, quartile1, quartile3)])
whiskers_min, whiskers_max = whiskers[:, 0], whiskers[:, 1]
inds = np.arange(1, len(medians) + 1)
ax.scatter(inds, medians, marker='o', color='white', s=30, zorder=3)
ax.vlines(inds, quartile1, quartile3, color='k', linestyle='-', lw=5)
ax.vlines(inds, whiskers_min, whiskers_max, color='k', linestyle='-', lw=1)
ax.set_xticks([1, 2, 3, 4])
ax.set_xticklabels(xs)
plt.tight_layout()
fig.savefig("sp_performance_violin.png", dpi=150)
plt.show() | 32.917197 | 186 | 0.572175 |
6c7aa53b02ade1969b440eeb2dca4bdd3802359c | 205 | py | Python | submissions/abc083/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 1 | 2021-05-10T01:16:28.000Z | 2021-05-10T01:16:28.000Z | submissions/abc083/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | 3 | 2021-05-11T06:14:15.000Z | 2021-06-19T08:18:36.000Z | submissions/abc083/a.py | m-star18/atcoder | 08e475810516602fa088f87daf1eba590b4e07cc | [
"Unlicense"
] | null | null | null | # sys.stdin.readline()
import sys
input = sys.stdin.readline
a, b, c, d = map(int, input().split())
if a+b > c+d:
ans = 'Left'
elif a+b == c+d:
ans = 'Balanced'
else:
ans = 'Right'
print(ans)
| 15.769231 | 38 | 0.57561 |
6c7b64c2f62afaf0967618c5e7f57887d35fa040 | 3,972 | py | Python | dafny_comparison/print_table.py | gleissen/goolong | 2bc38024204f9747ed9818502c5df3d36b96dd7d | [
"Apache-2.0"
] | 1 | 2019-05-21T18:16:58.000Z | 2019-05-21T18:16:58.000Z | dafny_comparison/print_table.py | gleissen/goolong | 2bc38024204f9747ed9818502c5df3d36b96dd7d | [
"Apache-2.0"
] | 2 | 2020-08-06T15:19:12.000Z | 2020-08-06T15:23:19.000Z | dafny_comparison/print_table.py | gokhankici/goolong | ac5689c374ddaa0156693f234be392059f318b3a | [
"Apache-2.0"
] | 2 | 2020-10-27T09:06:58.000Z | 2021-12-07T16:30:38.000Z | #!/usr/bin/env python
import copy
import os.path as op
NAME_FMT = "%-20s"
def update_stats(filename, stat):
if not op.isfile(filename):
return
with open(filename, 'r') as f:
for line in f:
l = line.rstrip()
for c in FileStats.ANNOTS:
if l.endswith("%s %s" % (stat.comment, c)):
stat[c] += 1
break
if __name__ == '__main__':
THIS_FOLDER = op.dirname(op.abspath(op.realpath(__file__)))
ICET_FOLDER = op.join(THIS_FOLDER, 'icet')
DAFNY_FOLDER = op.join(THIS_FOLDER, 'dafny')
FILES = [(('twophase.icet', 'twophase.dfy'),
'Two-Phase Commit',
GlobalStats()),
(('raft.icet', 'raft.dfy'),
'Raft Leader Election',
GlobalStats()),
(('paxos.icet', 'paxos.dfy'),
'Single-Decree Paxos',
GlobalStats())]
stat_total = GlobalStats()
print " | ".join([" " * 20, "%-36s" % "IceT", "%-49s" % "Dafny"])
print " " * 20, "-" * 90
print " | ".join(["%-20s" % "Name", stat_total.header()])
print "-" * 111
for ((icet_filename, dafny_filename), name, both_stat) in FILES:
update_stats(op.join(ICET_FOLDER, icet_filename), both_stat.icet_stats)
update_stats(op.join(DAFNY_FOLDER, dafny_filename), both_stat.dafny_stats)
print " | ".join([NAME_FMT % name, both_stat.row()])
stat_total += both_stat
print "-" * 111
print " | ".join([NAME_FMT % "Total", stat_total.row()])
| 30.790698 | 84 | 0.527442 |
6c7c6ab3c977d309a6e23ab36c08b279c63de1a3 | 3,822 | py | Python | src/po_utils/common_actions/element_interactions.py | matthew-bahloul/browser-utils | 22372d1a6718d8a7fd4eebf116c728aaa06e68ee | [
"MIT"
] | null | null | null | src/po_utils/common_actions/element_interactions.py | matthew-bahloul/browser-utils | 22372d1a6718d8a7fd4eebf116c728aaa06e68ee | [
"MIT"
] | null | null | null | src/po_utils/common_actions/element_interactions.py | matthew-bahloul/browser-utils | 22372d1a6718d8a7fd4eebf116c728aaa06e68ee | [
"MIT"
] | null | null | null | """
by_locator : tuple --> (<selenium By object>, <selector string>)
x_offset : int --> integer value of x offset in pixels
y_offset : int --> integer value of y offset in pixels
x_destination : int --> integer value of x location on page
y_desitination : int --> integer value of y location on page
by_locator_source : tuple --> (<selenium By object>, <selector string>)
by_locator_target : tuple --> (<selenium By object>, <selector string>)
clear_first : bool --> toggle for clearing input field before writing text to it
press_enter : bool --> toggle for sending the ENTER key to an input field after writing to it
"""
from selenium.webdriver import ActionChains
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.keys import Keys
from po_utils.common_actions.waits import wait_for_page_to_load, wait_until_displayed, wait_until_not_displayed
| 53.083333 | 205 | 0.75641 |
6c7e366d11f836cc2b4028018db9d96639fae992 | 174 | py | Python | Topics/Custom generators/Even numbers/main.py | valenciarichards/hypernews-portal | 0b6c4d8aefe4f8fc7dc90d6542716e98f52515b3 | [
"MIT"
] | 1 | 2021-07-26T03:06:14.000Z | 2021-07-26T03:06:14.000Z | Topics/Custom generators/Even numbers/main.py | valenciarichards/hypernews-portal | 0b6c4d8aefe4f8fc7dc90d6542716e98f52515b3 | [
"MIT"
] | null | null | null | Topics/Custom generators/Even numbers/main.py | valenciarichards/hypernews-portal | 0b6c4d8aefe4f8fc7dc90d6542716e98f52515b3 | [
"MIT"
] | null | null | null | n = int(input())
for number in range(n):
print(next(even(number)))
# Don't forget to print out the first n numbers one by one here
| 13.384615 | 63 | 0.62069 |
6c7f914b76e891552a3b496827a2a433ae7084c1 | 2,096 | py | Python | cronman/cron_jobs/run_cron_tasks.py | ryancheley/django-cronman | 5be5d9d5eecba0f110808c9e7a97ef89ef620ade | [
"BSD-3-Clause"
] | 17 | 2018-09-25T16:28:36.000Z | 2022-01-31T14:43:24.000Z | cronman/cron_jobs/run_cron_tasks.py | ryancheley/django-cronman | 5be5d9d5eecba0f110808c9e7a97ef89ef620ade | [
"BSD-3-Clause"
] | 14 | 2018-11-04T14:45:14.000Z | 2022-02-01T04:02:47.000Z | cronman/cron_jobs/run_cron_tasks.py | ryancheley/django-cronman | 5be5d9d5eecba0f110808c9e7a97ef89ef620ade | [
"BSD-3-Clause"
] | 3 | 2018-09-25T16:28:44.000Z | 2022-02-01T04:08:23.000Z | # -*- coding: utf-8 -*-
# vi:si:et:sw=4:sts=4:ts=4
from __future__ import unicode_literals
from django.db import connections
from django.utils import timezone
from django.utils.functional import cached_property
from cronman.config import app_settings
from cronman.job import BaseCronJob
from cronman.models import CronTask
from cronman.spawner import CronSpawner
from cronman.utils import cron_jobs_module_config
| 31.757576 | 74 | 0.648378 |
6c8301238acf3bc4525ac9e26175e629b0f3e112 | 2,893 | py | Python | day23.py | alexa-infra/advent-of-code-2018 | f14e8c87b655c479097ae713572bb0260ec993fc | [
"MIT"
] | null | null | null | day23.py | alexa-infra/advent-of-code-2018 | f14e8c87b655c479097ae713572bb0260ec993fc | [
"MIT"
] | null | null | null | day23.py | alexa-infra/advent-of-code-2018 | f14e8c87b655c479097ae713572bb0260ec993fc | [
"MIT"
] | null | null | null | import re
parse_re = re.compile(
r'pos\=\<(?P<x>-?\d+),(?P<y>-?\d+),(?P<z>-?\d+)\>, r\=(?P<r>\d+)'
)
if __name__ == '__main__':
test1()
test2()
main()
| 27.037383 | 69 | 0.412375 |
6c83ee69fde6360b183bb19fa3bcf09e78de7fd6 | 381 | py | Python | setup.py | connormullett/lib_elo_calculator | 1a699f233dd440b4295e8958b02422ce64b27c70 | [
"MIT"
] | null | null | null | setup.py | connormullett/lib_elo_calculator | 1a699f233dd440b4295e8958b02422ce64b27c70 | [
"MIT"
] | null | null | null | setup.py | connormullett/lib_elo_calculator | 1a699f233dd440b4295e8958b02422ce64b27c70 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from setuptools import find_packages, setup
setup(
name="lib_elo_calculator",
packages=find_packages(include=['lib_elo_calculator']),
version='0.1.0',
description='contains functions and formulas for calculating elo',
author='Connor Mullett',
license='MIT',
setup_requires=['pytest-runner'],
tests_require=['pytest'],
test_suite='tests'
)
| 23.8125 | 68 | 0.737533 |
6c85751be92171445c98d3494d9be709e143efc5 | 1,526 | py | Python | examples/intro-example/dags/tutorial.py | rfim/QoalaMoviesKaggle | 3cf5486f012487c5585bbe86d3a2bc1c58979bac | [
"MIT"
] | null | null | null | examples/intro-example/dags/tutorial.py | rfim/QoalaMoviesKaggle | 3cf5486f012487c5585bbe86d3a2bc1c58979bac | [
"MIT"
] | null | null | null | examples/intro-example/dags/tutorial.py | rfim/QoalaMoviesKaggle | 3cf5486f012487c5585bbe86d3a2bc1c58979bac | [
"MIT"
] | null | null | null | tot_name = os.path.join(os.path.dirname(__file__),'src/data', file_name)
# open the json datafile and read it in
with open(tot_name, 'r') as inputfile:
doc = json.load(inputfile)
# transform the data to the correct types and convert temp to celsius
id_movie = int(doc['id'])
movie_name = str(doc['original_title'])
year = str(doc['production_companies']['production_countries']['release date'])
country_origin = str(doc['production_companies']['origin_country'])
category_1 = str(doc['genres']['name'])
category_2 = str(doc['genres']['name'])
movie_rating = float(doc['popularity'])
avg_rating = float(doc['production_companies']['production_countries']['vote_average'])
total_clicks = float(doc['production_companies']['production_countries']['vote_count'])
# check for nan's in the numeric values and then enter into the database
valid_data = True
#for valid in np.isnan([lat, lon, humid, press, min_temp, max_temp, temp]):
# if valid is False:
# valid_data = False
# break;
row = (id_movie, movie_name, year, country_origin, category_1, category_2, movie_rating,
avg_rating, total_clicks)
insert_cmd = """INSERT INTO movies
(id_movie, movie_name, year,
country_origin, category_1, category_2,
movie_rating, avg_rating, total_clicks)
VALUES
(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s);"""
print(insert_cmd,row)
if valid_data is True:
pg_hook.run(insert_cmd, parameters=row)
| 40.157895 | 92 | 0.671035 |
6c8601c43b4ff494fe3c99410a606a7250f4d9f9 | 20,189 | py | Python | hclf/multiclass.py | tfmortie/hclf | 68bdb61c12c4b8fefbb94f1ac8aa30baed8077c5 | [
"MIT"
] | null | null | null | hclf/multiclass.py | tfmortie/hclf | 68bdb61c12c4b8fefbb94f1ac8aa30baed8077c5 | [
"MIT"
] | null | null | null | hclf/multiclass.py | tfmortie/hclf | 68bdb61c12c4b8fefbb94f1ac8aa30baed8077c5 | [
"MIT"
] | null | null | null | """
Code for hierarchical multi-class classifiers.
Author: Thomas Mortier
Date: Feb. 2021
TODO:
* Add option for set-valued prediction
* Feature: allow tree structures with non-unique node labels (currently, warning is thrown)
"""
import time
import warnings
import numpy as np
from .utils import HLabelEncoder, PriorityQueue
from sklearn.base import BaseEstimator, ClassifierMixin, clone
from sklearn.utils import _message_with_time
from sklearn.utils.validation import check_X_y, check_array, check_random_state
from sklearn.exceptions import NotFittedError, FitFailedWarning
from sklearn.metrics import accuracy_score
from joblib import Parallel, delayed, parallel_backend
from collections import ChainMap
| 42.864119 | 182 | 0.553668 |
6c86a24e42a439643a1c92f29bdfc4a1de454d48 | 964 | py | Python | tests/conftest.py | jwizzle/nerdchess | 045726326abc3ff94af30bda0c66beff1ca52978 | [
"WTFPL"
] | null | null | null | tests/conftest.py | jwizzle/nerdchess | 045726326abc3ff94af30bda0c66beff1ca52978 | [
"WTFPL"
] | null | null | null | tests/conftest.py | jwizzle/nerdchess | 045726326abc3ff94af30bda0c66beff1ca52978 | [
"WTFPL"
] | null | null | null | """Fixtures for pytest."""
import pytest
from nerdchess.board import Board
from nerdchess import pieces
| 25.368421 | 67 | 0.631743 |
6c86a8871548627b9a0755d57d564bc3d174dbdd | 2,649 | py | Python | imports/language_check.py | ahmed-amr1/schtabtag | d5f1e550fccaf58cbcf9fac39528b921659cec7c | [
"MIT"
] | null | null | null | imports/language_check.py | ahmed-amr1/schtabtag | d5f1e550fccaf58cbcf9fac39528b921659cec7c | [
"MIT"
] | null | null | null | imports/language_check.py | ahmed-amr1/schtabtag | d5f1e550fccaf58cbcf9fac39528b921659cec7c | [
"MIT"
] | null | null | null |
"""
if src == "auto":
src = "Auto detect language"
if src == "en":
src = "English - English"
if src == "de":
src = "German - Deutsch"
if src == "ar":
src = "Arabic - "
if src == "es":
src = "Spanish - espaol, castellano"
if src == "ru":
src = "Russian - "
if src == "pl":
src = "Polish - Polski"
if src == "it":
src = "Italian - Italiano"
if src == "ja":
src = "Japanese - "
if src == "ga":
src = "Irish - Gaeilge"
if src == "hi":
src = "Hindi - , "
if src == "he":
src = "Hebrew - "
if src == "fr":
src = "French - Franais"
if src == "nl":
src = "Dutch - Nederlands"
if src == "cs":
src = "Czech - esky, etina"
if src == "da":
src = "Danish - Dansk"
if src == "zh":
src = "Chinese - , Zhngwn"
if src == "fa":
src = "Persian - "
if dst == "en":
dst = "English - English"
if dst == "de":
dst = "German - Deutsch"
if dst == "ar":
dst = "Arabic - "
if dst == "es":
dst = "Spanish - espaol, castellano"
if dst == "ru":
dst = "Russian - "
if dst == "pl":
dst = "Polish - Polski"
if dst == "it":
dst = "Italian - Italiano"
if dst == "ja":
dst = "Japanese - "
if dst == "ga":
dst = "Irish - Gaeilge"
if dst == "hi":
dst = "Hindi - , "
if dst == "he":
dst = "Hebrew - "
if dst == "fr":
dst = "French - Franais"
if dst == "nl":
dst = "Dutch - Nederlands"
if dst == "cs":
dst = "Czech - esky, etina"
if dst == "da":
dst = "Danish - Dansk"
if dst == "zh":
dst = "Chinese - , Zhngwn"
if dst == "fa":
dst = "Persian - "
""" | 23.442478 | 42 | 0.495659 |
6c86adac816e4b256e05f833e885292823f8146c | 1,003 | py | Python | puppo/decorator_functions/display_decorators.py | JHowell45/Pupper | 5c863eba8651a5b1130c04321cc6cefacb71c7b2 | [
"MIT"
] | null | null | null | puppo/decorator_functions/display_decorators.py | JHowell45/Pupper | 5c863eba8651a5b1130c04321cc6cefacb71c7b2 | [
"MIT"
] | 1 | 2021-06-01T21:54:15.000Z | 2021-06-01T21:54:15.000Z | puppo/decorator_functions/display_decorators.py | JHowell45/Pupper | 5c863eba8651a5b1130c04321cc6cefacb71c7b2 | [
"MIT"
] | null | null | null | """Decorator unctions for displaying commands."""
from functools import wraps
from shutil import get_terminal_size
import click
def command_handler(command_title, colour='green'):
"""Use this decorator for surrounding the functions with banners."""
def decorator(function):
"""Nested decorator function."""
terminal_width = int(get_terminal_size()[0])
title = ' {} '.format(command_title)
banner_length = int((terminal_width - len(title)) / 2)
banner = '-' * banner_length
command_banner = '|{0}{1}{0}|'.format(
banner, title.title())
lower_banner = '|{}|'.format('-' * int(len(command_banner) - 2))
return wrapper
return decorator
| 34.586207 | 72 | 0.617149 |
6c87ac082f2ea2bf7c87cad18eaf0cdd7451709c | 869 | py | Python | opennem/api/schema.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
] | 22 | 2020-06-30T05:27:21.000Z | 2022-02-21T12:13:51.000Z | opennem/api/schema.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
] | 71 | 2020-08-07T13:06:30.000Z | 2022-03-15T06:44:49.000Z | opennem/api/schema.py | paulculmsee/opennem | 9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1 | [
"MIT"
] | 13 | 2020-06-30T03:28:32.000Z | 2021-12-30T08:17:16.000Z | from typing import List, Optional
from pydantic import BaseModel, Field
| 21.725 | 86 | 0.696203 |
6c88a8da20ae18c022b5a983db40aed8a4ffb346 | 304 | py | Python | test-examples/issue_678_reproduce.py | tlambert03/image-demos | a2974bcc7f040fd4d14e659c4cbfeabcf726c707 | [
"BSD-3-Clause"
] | null | null | null | test-examples/issue_678_reproduce.py | tlambert03/image-demos | a2974bcc7f040fd4d14e659c4cbfeabcf726c707 | [
"BSD-3-Clause"
] | null | null | null | test-examples/issue_678_reproduce.py | tlambert03/image-demos | a2974bcc7f040fd4d14e659c4cbfeabcf726c707 | [
"BSD-3-Clause"
] | null | null | null | """
Test adding 4D followed by 5D image layers to the viewer
Intially only 2 sliders should be present, then a third slider should be
created.
"""
import numpy as np
from skimage import data
import napari
with napari.gui_qt():
viewer = napari.view_image(np.random.random((2, 10, 50, 100, 100)))
| 19 | 72 | 0.733553 |
6c8a1fd6e1a402d55f7841fa1d528a488bdf0b86 | 49,401 | py | Python | tests/subsystem_tests.py | Goodpaster/QSoME | 8b24d58dfab5ac0d90fd84b8519b25864eee6f74 | [
"Apache-2.0"
] | 7 | 2018-09-28T21:40:08.000Z | 2021-06-10T10:44:39.000Z | tests/subsystem_tests.py | Goodpaster/QSoME | 8b24d58dfab5ac0d90fd84b8519b25864eee6f74 | [
"Apache-2.0"
] | 1 | 2021-07-06T12:28:32.000Z | 2021-07-29T20:34:13.000Z | tests/subsystem_tests.py | Goodpaster/QSoME | 8b24d58dfab5ac0d90fd84b8519b25864eee6f74 | [
"Apache-2.0"
] | 1 | 2021-04-08T12:28:44.000Z | 2021-04-08T12:28:44.000Z | # A module to tests the methods of the Subsystem
import unittest
import os
import shutil
import re
from copy import copy
from qsome import cluster_subsystem, cluster_supersystem
from pyscf import gto, lib, scf, dft, cc, mp, mcscf, tools
from pyscf.cc import ccsd_t, uccsd_t
import numpy as np
import tempfile
| 43.182692 | 172 | 0.638125 |
6c8c154f105569426c30727bc7ab8defbef28f73 | 1,051 | py | Python | scripts/undeploy_service.py | Suremaker/consul-deployment-agent | 466c36d3fcb9f8bfa144299dde7cb94f4341907b | [
"Apache-2.0"
] | 6 | 2016-10-10T09:26:07.000Z | 2018-09-20T08:59:42.000Z | scripts/undeploy_service.py | Suremaker/consul-deployment-agent | 466c36d3fcb9f8bfa144299dde7cb94f4341907b | [
"Apache-2.0"
] | 11 | 2016-10-10T12:11:07.000Z | 2018-05-09T22:11:02.000Z | scripts/undeploy_service.py | Suremaker/consul-deployment-agent | 466c36d3fcb9f8bfa144299dde7cb94f4341907b | [
"Apache-2.0"
] | 16 | 2016-09-28T16:00:58.000Z | 2019-02-25T16:52:12.000Z | #!/usr/bin/env python
import argparse
import consulate
options = Options()
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--name', required=True, help='service name')
parser.add_argument('-s', '--slice', help='slice name (optional)')
parser.add_argument('-r', '--role', required=True, help='server role name')
parser.add_argument('-e', '--environment', required=True, help='environment name')
args = parser.parse_args(namespace=options)
print('[Initiating service removal]')
print(' Service: %s' % args.name)
print(' Slice: %s' % args.slice)
print(' Role: %s' % args.role)
print(' Environment: %s' % args.environment)
consul_session = consulate.Consul()
if args.slice is None:
deployment_key = 'enviroments/{0}/roles/{1}/services/{2}'.format(args.environment, args.role, args.name)
else:
deployment_key = 'enviroments/{0}/roles/{1}/services/{2}/{3}'.format(args.environment, args.role, args.name, args.slice)
del consul_session.kv[deployment_key]
print('Service removal triggered.')
| 30.911765 | 124 | 0.713606 |
6c8d08da4457f70f71f8796a1ee31a832ff90488 | 190 | py | Python | day08/test04.py | jaywoong/python | 99daedd5a9418b72b2d5c3b800080e730eb9b3ea | [
"Apache-2.0"
] | null | null | null | day08/test04.py | jaywoong/python | 99daedd5a9418b72b2d5c3b800080e730eb9b3ea | [
"Apache-2.0"
] | null | null | null | day08/test04.py | jaywoong/python | 99daedd5a9418b72b2d5c3b800080e730eb9b3ea | [
"Apache-2.0"
] | null | null | null | from value import Account
acc1 = Account(10000, 3.2)
print(acc1)
acc1.__balance = 100000000
print(acc1)
print(acc1.getBalance())
print(acc1.getInterest())
acc1.setInterest(2.8)
print(acc1)
| 17.272727 | 26 | 0.763158 |
6c8e315e18d51be8398247d53085f6019815be6e | 2,717 | py | Python | tests/functional/conftest.py | charmed-kubernetes/ceph-csi-operator | 06a6a9fed6055e3f0e0bfde835d7f607febcf6ea | [
"Apache-2.0"
] | null | null | null | tests/functional/conftest.py | charmed-kubernetes/ceph-csi-operator | 06a6a9fed6055e3f0e0bfde835d7f607febcf6ea | [
"Apache-2.0"
] | null | null | null | tests/functional/conftest.py | charmed-kubernetes/ceph-csi-operator | 06a6a9fed6055e3f0e0bfde835d7f607febcf6ea | [
"Apache-2.0"
] | 1 | 2022-03-24T19:17:47.000Z | 2022-03-24T19:17:47.000Z | # Copyright 2021 Martin Kalcok
# See LICENSE file for licensing details.
"""Pytest fixtures for functional tests."""
# pylint: disable=W0621
import logging
import tempfile
from pathlib import Path
import pytest
from kubernetes import client, config
from pytest_operator.plugin import OpsTest
logger = logging.getLogger(__name__)
| 36.226667 | 98 | 0.670593 |
6657771a019db8ff3764b551b4d27a9c8de3eee0 | 3,922 | py | Python | caronte/allauth/utils.py | simodalla/django-caronte | e47175849605924c26441c3a3d6d94f4340b9df7 | [
"BSD-3-Clause"
] | null | null | null | caronte/allauth/utils.py | simodalla/django-caronte | e47175849605924c26441c3a3d6d94f4340b9df7 | [
"BSD-3-Clause"
] | null | null | null | caronte/allauth/utils.py | simodalla/django-caronte | e47175849605924c26441c3a3d6d94f4340b9df7 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, absolute_import
from django.contrib.auth import get_user_model
from django.contrib.admin.templatetags.admin_urls import admin_urlname
from django.core.mail import mail_admins
from django.core.urlresolvers import reverse
from django.shortcuts import redirect
from django.template import loader, Context
from django.utils.html import strip_tags
from allauth.exceptions import ImmediateHttpResponse
from ..models import LoginAuthorization, LogUnauthorizedLogin, AuthorizedDomain
User = get_user_model()
def copy_fields(self, source_user, fields=None, dest_update=True):
"""
Update fields from list param 'fields' to 'dest_user' User from
'source_user' User.
"""
fields = fields or []
changed = False
for field in fields:
social_field = getattr(source_user, field)
if not (getattr(self.user, field) == social_field):
setattr(self.user, field, social_field)
changed = True
if changed and dest_update:
self.user.save()
return changed | 35.017857 | 79 | 0.624936 |
665885ddd8b1d1e99097726c1613e0a5986ad3d5 | 15,918 | py | Python | Task/data.py | sndnyang/GMMC | e9cd85c9d55a7de411daad490c8db84dfe9c0455 | [
"Apache-2.0"
] | 4 | 2021-05-09T16:00:12.000Z | 2021-12-16T12:31:25.000Z | Task/data.py | sndnyang/GMMC | e9cd85c9d55a7de411daad490c8db84dfe9c0455 | [
"Apache-2.0"
] | null | null | null | Task/data.py | sndnyang/GMMC | e9cd85c9d55a7de411daad490c8db84dfe9c0455 | [
"Apache-2.0"
] | null | null | null | from tensorflow.python.platform import flags
from tensorflow.contrib.data.python.ops import batching
import tensorflow as tf
import json
from torch.utils.data import Dataset
import pickle
import os.path as osp
import os
import numpy as np
import time
from scipy.misc import imread, imresize
from torchvision.datasets import CIFAR10, MNIST, SVHN, CIFAR100, ImageFolder
from torchvision import transforms
import torch
import torchvision
FLAGS = flags.FLAGS
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
# Dataset Options
flags.DEFINE_string('dsprites_path',
'/root/data/dsprites-dataset/dsprites_ndarray_co1sh3sc6or40x32y32_64x64.npz',
'path to dsprites characters')
flags.DEFINE_string('imagenet_datadir', '/root/imagenet_big', 'whether cutoff should always in image')
flags.DEFINE_bool('dshape_only', False, 'fix all factors except for shapes')
flags.DEFINE_bool('dpos_only', False, 'fix all factors except for positions of shapes')
flags.DEFINE_bool('dsize_only', False, 'fix all factors except for size of objects')
flags.DEFINE_bool('drot_only', False, 'fix all factors except for rotation of objects')
flags.DEFINE_bool('dsprites_restrict', False, 'fix all factors except for rotation of objects')
flags.DEFINE_string('imagenet_path', '/root/imagenet', 'path to imagenet images')
flags.DEFINE_string('load_path', '/root/imagenet', 'path to imagenet images')
flags.DEFINE_string('load_type', 'npy', 'npy or png')
flags.DEFINE_bool('single', False, 'single ')
flags.DEFINE_string('datasource', 'random', 'default or noise or negative or single')
# Data augmentation options
# flags.DEFINE_bool('cutout_inside', False, 'whether cutoff should always in image')
# flags.DEFINE_float('cutout_prob', 1.0, 'probability of using cutout')
# flags.DEFINE_integer('cutout_mask_size', 16, 'size of cutout')
# flags.DEFINE_bool('cutout', False, 'whether to add cutout regularizer to data')
flags.DEFINE_string('eval', '', '')
flags.DEFINE_string('init', '', '')
flags.DEFINE_string('norm', '', '')
flags.DEFINE_string('n_steps', '', '')
flags.DEFINE_string('reinit_freq', '', '')
flags.DEFINE_string('print_every', '', '')
flags.DEFINE_string('n_sample_steps', '', '')
flags.DEFINE_integer('gpu-id', 0, '')
| 33.441176 | 102 | 0.537379 |
6658e5fae0f2feb228f15d275ba5e7cdca6b1e61 | 3,751 | py | Python | controller/Specialty.py | ryltar/GSB-Planning-API | 919ad95e4e7bdcac43028fa4026bb800ec6bdb2a | [
"Apache-2.0"
] | null | null | null | controller/Specialty.py | ryltar/GSB-Planning-API | 919ad95e4e7bdcac43028fa4026bb800ec6bdb2a | [
"Apache-2.0"
] | null | null | null | controller/Specialty.py | ryltar/GSB-Planning-API | 919ad95e4e7bdcac43028fa4026bb800ec6bdb2a | [
"Apache-2.0"
] | null | null | null | from flask import jsonify, g, request
from flask_restful import Resource
from Authentication import *
from Service import *
def get_service():
""" Gets an instance of 'Service' from the 'g' environment. """
if not hasattr(g, 'service'):
g.service = Service()
return g.service
def specialty_queries():
""" Gets an instance of 'SpecialtyQueries' from the 'g' environment. """
if not hasattr(g, 'spec_queries'):
g.spec_queries = SpecialtyQueries()
return g.spec_queries
| 31.788136 | 89 | 0.640363 |