hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 11
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
251
| max_stars_repo_name
stringlengths 4
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
251
| max_issues_repo_name
stringlengths 4
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
116k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
251
| max_forks_repo_name
stringlengths 4
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.05M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.04M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
27981338330ee315b120f4f29b8d0163c165b34b
| 4,453
|
py
|
Python
|
st_model.py
|
saras108/Sentiment_Analysis
|
7e4e84637161cd005ebbcd303f68417726b5f098
|
[
"MIT"
] | null | null | null |
st_model.py
|
saras108/Sentiment_Analysis
|
7e4e84637161cd005ebbcd303f68417726b5f098
|
[
"MIT"
] | null | null | null |
st_model.py
|
saras108/Sentiment_Analysis
|
7e4e84637161cd005ebbcd303f68417726b5f098
|
[
"MIT"
] | null | null | null |
#importing necessary libraries
import numpy as np
import pandas as pd
import string
import streamlit as st
header = st.container()
dataset = st.container()
fearure = st.container()
model_training = st.container()
with header:
st.title("Emotion detection using Text")
with dataset:
st.header("Emotion Detection Datasets")
df = get_data("1-P-3-ISEAR.csv")
df.columns = ['sn','Target','Sentence']
df.drop('sn',inplace=True,axis =1)
df.head()
df.duplicated().sum()
df.drop_duplicates(inplace = True)
st.subheader("Lets check if the dataset is fairly distrributed.")
col1 , col2 = st.columns(2)
target_count = df['Target'].value_counts()
col1.table(target_count)
col2.text("Line Chart of the total output counts")
col2.line_chart(target_count )
st.markdown("From the above data, we can easily say the data iss fairly distributed.")
with fearure:
st.header("Learning about Feature and converting them")
# df['Sentence'] = df['Sentence'].apply(lowercase)
df['Sentence'] = df['Sentence'].apply(lowercase).apply(remove_punc)
#Removing the stop words
import nltk
nltk.download('omw-1.4')
nltk.download('stopwords')
from nltk.corpus import stopwords
df['Sentence'] = df['Sentence'].apply(remove_stopwords)
#Lemmatization i.e changing words into it's root form
from nltk.stem import WordNetLemmatizer
nltk.download('wordnet')
from nltk.corpus import wordnet
lemmatizer = WordNetLemmatizer()
df['Sentence'] = df['Sentence'].apply(lemmatize)
st.markdown('As the part of data pre-processing, we have done the following things:')
st.text(" - Converting the sentence to lowercase ")
st.text(" -Removing the Punction ")
st.text(" -Removing the stop words ")
st.text(" -Lemmatization i.e changing words into it is root form ,")
st.markdown("After all these our data looks like-")
st.dataframe(df.head())
st.markdown("After doing Train Test split we will apply TGIF, It is technique to transform text into a meaningful vector of numbers. TFIDF penalizes words that come up too often and dont really have much use. So it rescales the frequency of words that are common which makes scoring more balanced")
with model_training:
from sklearn.model_selection import train_test_split
X = df['Sentence']
y = df['Target']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2,random_state=10)
from sklearn.feature_extraction.text import TfidfVectorizer
tfidf = TfidfVectorizer(min_df=2, max_df=0.5, ngram_range=(1, 2))
train_tfidf = tfidf.fit_transform(X_train)
test_tfidf = tfidf.transform(X_test)
from sklearn.linear_model import LogisticRegression
logistic = LogisticRegression(max_iter=1000)
logistic.fit(train_tfidf,y_train)
from sklearn.naive_bayes import MultinomialNB
nb = MultinomialNB()
nb.fit(train_tfidf,y_train)
st.header('Checking The Accuracy using diffrent model.')
import joblib
joblib.dump(logistic, './mymodel/logistic_model.joblib')
joblib.dump(nb, './mymodel/naive_bayes_model.joblib')
joblib.dump(tfidf, './mymodel/tfidf_model.joblib')
sel_col , disp_col = st.columns(2)
with sel_col:
sel_col.subheader("Logistic Regression")
sel_col.markdown("Logistic Regression Train Error")
sel_col.write(logistic.score(train_tfidf, y_train))
sel_col.markdown("Logistic Regression Test Error")
sel_col.write( logistic.score(test_tfidf, y_test))
with disp_col:
disp_col.subheader("Naive Bias")
disp_col.markdown("Naive Bias Train Error")
disp_col.write(nb.score(train_tfidf, y_train))
disp_col.markdown("Naive Bias Test Error")
disp_col.write(nb.score(test_tfidf, y_test))
| 29.885906
| 302
| 0.688974
|
279b274296a91748dfbdabae0134ce96287057e9
| 540
|
py
|
Python
|
config.py
|
sheepmen/SpiderManage
|
850d6357fd1117c16684dabb5d1e79de1854e61c
|
[
"MIT"
] | 1
|
2018-06-13T00:38:53.000Z
|
2018-06-13T00:38:53.000Z
|
config.py
|
sheepmen/SpiderManage
|
850d6357fd1117c16684dabb5d1e79de1854e61c
|
[
"MIT"
] | null | null | null |
config.py
|
sheepmen/SpiderManage
|
850d6357fd1117c16684dabb5d1e79de1854e61c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
SECRET_KEY = 'some secret key'
TEMPLATES_AUTO_RELOAD = True
PROJECT_NAME = 'SpiderManage'
# Redis Config
REDIS_HOST = '120.25.227.8'
REDIS_PORT = 6379
REDIS_PASSWORD = 'xuxinredis'
# SQLite
# SQLALCHEMY_DATABASE_URI = 'sqlite:///C:/Users/sheep3/workplace/SpiderManage/data.db'
# SQLALCHEMY_TRACK_MODIFICATIONS = True
# MYSQL
SQLALCHEMY_DATABASE_URI = 'mysql://root:xuxin.mysql@120.25.227.8:3306/spider_db?charset=utf8'
SQLALCHEMY_COMMIT_ON_TEARDOWN = True
SQLALCHEMY_TRACK_MODIFICATIONS = True
| 30
| 93
| 0.777778
|
279b776bfdce89147881347913d489e839a74293
| 3,989
|
py
|
Python
|
PyPrometheus.py
|
RusDavies/PyPrometheus
|
8c0bb9489f42423942982829024d7359a374d7b1
|
[
"MIT"
] | null | null | null |
PyPrometheus.py
|
RusDavies/PyPrometheus
|
8c0bb9489f42423942982829024d7359a374d7b1
|
[
"MIT"
] | null | null | null |
PyPrometheus.py
|
RusDavies/PyPrometheus
|
8c0bb9489f42423942982829024d7359a374d7b1
|
[
"MIT"
] | null | null | null |
from PyPrometheusQueryClient import PrometheusQueryClient
import json
from pathlib import Path
from datetime import datetime
| 37.990476
| 134
| 0.613688
|
279d9301e8e9b967d31f0c36c000b8b79e8eab38
| 5,557
|
py
|
Python
|
tests/validate_schema_guide.py
|
dieghernan/citation-file-format
|
cfad34b82aa882d8169a0bcb6a21ad19cb4ff401
|
[
"CC-BY-4.0"
] | 257
|
2017-12-18T14:09:32.000Z
|
2022-03-28T17:58:19.000Z
|
tests/validate_schema_guide.py
|
Seanpm2001-DIGITAL-Command-Language/citation-file-format
|
52647a247e9b1a5b04154934f39615b5ee8c4d65
|
[
"CC-BY-4.0"
] | 307
|
2017-10-16T12:17:40.000Z
|
2022-03-18T11:18:49.000Z
|
tests/validate_schema_guide.py
|
Seanpm2001-DIGITAL-Command-Language/citation-file-format
|
52647a247e9b1a5b04154934f39615b5ee8c4d65
|
[
"CC-BY-4.0"
] | 344
|
2018-09-19T03:00:26.000Z
|
2022-03-31T01:39:11.000Z
|
import pytest
import os
import json
import jsonschema
from ruamel.yaml import YAML
| 57.28866
| 171
| 0.479935
|
279ec732b8ac9028c087cef84952b665f1b41600
| 188
|
py
|
Python
|
vkrpg/scripts/hello.py
|
Augmeneco/VKRPG
|
b071a490ae45a574a028af1eb831fff96782c06c
|
[
"Apache-2.0"
] | null | null | null |
vkrpg/scripts/hello.py
|
Augmeneco/VKRPG
|
b071a490ae45a574a028af1eb831fff96782c06c
|
[
"Apache-2.0"
] | null | null | null |
vkrpg/scripts/hello.py
|
Augmeneco/VKRPG
|
b071a490ae45a574a028af1eb831fff96782c06c
|
[
"Apache-2.0"
] | null | null | null |
import vkrpg
| 31.333333
| 87
| 0.659574
|
27a5750fd3834a5dd24fb63cbde3fd11a0fdfdd0
| 4,613
|
py
|
Python
|
flaskprediction/routes.py
|
killswitchh/flask-prediction-app
|
a8bdff96fa2dc05544991a705970d1550ac9a034
|
[
"MIT"
] | null | null | null |
flaskprediction/routes.py
|
killswitchh/flask-prediction-app
|
a8bdff96fa2dc05544991a705970d1550ac9a034
|
[
"MIT"
] | 1
|
2020-08-29T18:39:05.000Z
|
2020-08-30T09:43:47.000Z
|
flaskprediction/routes.py
|
killswitchh/flask-prediction-app
|
a8bdff96fa2dc05544991a705970d1550ac9a034
|
[
"MIT"
] | null | null | null |
import secrets
from flask import Flask , render_template , url_for , send_from_directory
from flaskprediction import app
from flaskprediction.utils.predict import Predictor
from flaskprediction.forms import CarDetailsForm , TitanicDetailsForm , BostonDetailsForm , HeightDetailsForm, CatImageForm
from PIL import Image
import os
def save_picture(form_picture):
random_hex = secrets.token_hex(8)
_, f_ext = os.path.splitext(form_picture.filename)
picture_fn = random_hex + f_ext
picture_path = os.path.join(app.root_path, 'static/pics', picture_fn)
output_size = (64, 64)
i = Image.open(form_picture)
i.thumbnail(output_size)
i.save(picture_path)
return picture_path
| 39.767241
| 182
| 0.681986
|
27a61a2b957091652c5a6b1dfcf40f524e7bd75a
| 14,349
|
py
|
Python
|
air/gather.py
|
krisbukovi/AutomatedIngestReport
|
87cc65c9028382a0860069d86b69b8517d93f59c
|
[
"MIT"
] | null | null | null |
air/gather.py
|
krisbukovi/AutomatedIngestReport
|
87cc65c9028382a0860069d86b69b8517d93f59c
|
[
"MIT"
] | null | null | null |
air/gather.py
|
krisbukovi/AutomatedIngestReport
|
87cc65c9028382a0860069d86b69b8517d93f59c
|
[
"MIT"
] | null | null | null |
import requests
import re
from time import sleep
from datetime import datetime
import shutil
import elasticsearch2
from elasticsearch_dsl import Search, Q
from collections import OrderedDict
from sqlalchemy import create_engine
from subprocess import Popen, PIPE, STDOUT
import shlex
import glob
import csv
# from apiclient.discovery import build
from utils import Filename, FileType, Date, conf, logger, sort
| 45.122642
| 160
| 0.566451
|
27a686fae11eeb59ce17fe8f4cf6412be0900891
| 8,180
|
py
|
Python
|
python_aternos/atconf.py
|
DarkCat09/python-aternos
|
a75d729e938a181449f304e849762dd9bb0e51f3
|
[
"Apache-2.0"
] | 11
|
2021-10-01T13:04:44.000Z
|
2022-03-31T19:19:48.000Z
|
python_aternos/atconf.py
|
DarkCat09/python-aternos
|
a75d729e938a181449f304e849762dd9bb0e51f3
|
[
"Apache-2.0"
] | 7
|
2021-10-01T14:00:20.000Z
|
2022-03-21T12:29:24.000Z
|
python_aternos/atconf.py
|
DarkCat09/python-aternos
|
a75d729e938a181449f304e849762dd9bb0e51f3
|
[
"Apache-2.0"
] | 4
|
2022-01-07T13:47:39.000Z
|
2022-02-22T21:51:28.000Z
|
import enum
import re
import lxml.html
from typing import Any, Dict, List, Union, Optional
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from .atserver import AternosServer
DAT_PREFIX = 'Data:'
DAT_GR_PREFIX = 'Data:GameRules:'
# checking timezone format
tzcheck = re.compile(r'(^[A-Z]\w+\/[A-Z]\w+$)|^UTC$')
# options types converting
convert = {
'config-option-number': int,
'config-option-select': int,
'config-option-toggle': bool
}
| 24.638554
| 82
| 0.667604
|
27a6c1cdc477a10a4c9b691137650bb8e9980229
| 11,859
|
py
|
Python
|
examples/cadre_dymos.py
|
johnjasa/CADRE
|
a4ffd61582b8474953fc309aa540838a14f29dcf
|
[
"Apache-2.0"
] | null | null | null |
examples/cadre_dymos.py
|
johnjasa/CADRE
|
a4ffd61582b8474953fc309aa540838a14f29dcf
|
[
"Apache-2.0"
] | null | null | null |
examples/cadre_dymos.py
|
johnjasa/CADRE
|
a4ffd61582b8474953fc309aa540838a14f29dcf
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import unittest
import numpy as np
from openmdao.api import Problem, Group, pyOptSparseDriver, DirectSolver, SqliteRecorder
from dymos import Phase
from dymos.utils.indexing import get_src_indices_by_row
from dymos.phases.components import ControlInterpComp
from CADRE.odes_dymos.cadre_orbit_ode import CadreOrbitODE
from CADRE.attitude_dymos.angular_velocity_comp import AngularVelocityComp
from CADRE.odes_dymos.cadre_systems_ode import CadreSystemsODE
GM = 398600.44
rmag = 7000.0
period = 2 * np.pi * np.sqrt(rmag ** 3 / GM)
vcirc = np.sqrt(GM / rmag)
duration = period
duration = 6 * 3600.0
p = Problem(model=Group())
p.driver = pyOptSparseDriver()
p.driver.options['optimizer'] = 'SNOPT'
p.driver.options['dynamic_simul_derivs'] = True
p.driver.opt_settings['Major iterations limit'] = 1000
p.driver.opt_settings['Major feasibility tolerance'] = 1.0E-4
p.driver.opt_settings['Major optimality tolerance'] = 1.0E-4
p.driver.opt_settings['Major step limit'] = 0.1
p.driver.opt_settings['iSumm'] = 6
p.driver.recording_options['includes'] = ['*']
p.driver.recording_options['record_objectives'] = True
p.driver.recording_options['record_constraints'] = True
p.driver.recording_options['record_desvars'] = True
recorder = SqliteRecorder("cases.sql")
p.driver.add_recorder(recorder)
NUM_SEG = 30
TRANSCRIPTION_ORDER = 3
orbit_phase = Phase('radau-ps',
ode_class=CadreOrbitODE,
num_segments=NUM_SEG,
transcription_order=TRANSCRIPTION_ORDER,
compressed=False)
p.model.add_subsystem('orbit_phase', orbit_phase)
orbit_phase.set_time_options(fix_initial=True, fix_duration=True, duration_ref=duration)
orbit_phase.set_state_options('r_e2b_I', defect_scaler=1000, fix_initial=True, units='km')
orbit_phase.set_state_options('v_e2b_I', defect_scaler=1000, fix_initial=True, units='km/s')
# orbit_phase.set_state_options('SOC', defect_scaler=1, fix_initial=True, units=None)
# orbit_phase.add_design_parameter('P_bat', opt=False, units='W')
orbit_phase.add_control('Gamma', opt=True, lower=-90, upper=90, units='deg', ref0=-90, ref=90,
continuity=True, rate_continuity=True)
# Add a control interp comp to interpolate the rates of O_BI from the orbit phase.
faux_control_options = {'O_BI': {'units': None, 'shape': (3, 3)}}
p.model.add_subsystem('obi_rate_interp_comp',
ControlInterpComp(control_options=faux_control_options,
time_units='s',
grid_data=orbit_phase.grid_data),
promotes_outputs=[('control_rates:O_BI_rate', 'Odot_BI')])
control_input_nodes_idxs = orbit_phase.grid_data.subset_node_indices['control_input']
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3, 3))
p.model.connect('orbit_phase.rhs_all.O_BI', 'obi_rate_interp_comp.controls:O_BI',
src_indices=src_idxs, flat_src_indices=True)
p.model.connect('orbit_phase.time.dt_dstau',
('obi_rate_interp_comp.dt_dstau', 'w_B_rate_interp_comp.dt_dstau'))
# Use O_BI and Odot_BI to compute the angular velocity vector
p.model.add_subsystem('angular_velocity_comp',
AngularVelocityComp(num_nodes=orbit_phase.grid_data.num_nodes))
p.model.connect('orbit_phase.rhs_all.O_BI', 'angular_velocity_comp.O_BI')
p.model.connect('Odot_BI', 'angular_velocity_comp.Odot_BI')
# Add another interpolation comp to compute the rate of w_B
faux_control_options = {'w_B': {'units': '1/s', 'shape': (3,)}}
p.model.add_subsystem('w_B_rate_interp_comp',
ControlInterpComp(control_options=faux_control_options,
time_units='s',
grid_data=orbit_phase.grid_data),
promotes_outputs=[('control_rates:w_B_rate', 'wdot_B')])
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3,))
p.model.connect('angular_velocity_comp.w_B', 'w_B_rate_interp_comp.controls:w_B',
src_indices=src_idxs, flat_src_indices=True)
# Now add the systems phase
systems_phase = Phase('radau-ps',
ode_class=CadreSystemsODE,
num_segments=NUM_SEG,
transcription_order=TRANSCRIPTION_ORDER,
compressed=False)
p.model.add_subsystem('systems_phase', systems_phase)
systems_phase.set_time_options(fix_initial=True, fix_duration=True, duration_ref=duration)
systems_phase.set_state_options('SOC', defect_ref=10, lower=0.2, fix_initial=True, units=None)
systems_phase.set_state_options('w_RW', defect_ref=10000, fix_initial=True, units='1/s')
systems_phase.set_state_options('data', defect_ref=10, fix_initial=True, units='Gibyte')
systems_phase.set_state_options('temperature', ref0=273, ref=373, defect_ref=1000,
fix_initial=True, units='degK')
systems_phase.add_design_parameter('LD', opt=False, units='d')
systems_phase.add_design_parameter('fin_angle', opt=True, lower=0., upper=np.pi / 2.)
systems_phase.add_design_parameter('antAngle', opt=True, lower=-np.pi / 4, upper=np.pi / 4)
systems_phase.add_design_parameter('cellInstd', opt=True, lower=0.0, upper=1.0, ref=1.0)
# Add r_e2b_I and O_BI as non-optimized controls, allowing them to be connected to external sources
systems_phase.add_control('r_e2b_I', opt=False, units='km')
systems_phase.add_control('O_BI', opt=False)
systems_phase.add_control('w_B', opt=False)
systems_phase.add_control('wdot_B', opt=False)
systems_phase.add_control('P_comm', opt=True, lower=0.0, upper=30.0, units='W')
systems_phase.add_control('Isetpt', opt=True, lower=1.0E-4, upper=0.4, units='A')
systems_phase.add_objective('data', loc='final', ref=-1.0)
# Connect r_e2b_I and O_BI values from all nodes in the orbit phase to the input values
# in the attitude phase.
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3,))
p.model.connect('orbit_phase.states:r_e2b_I', 'systems_phase.controls:r_e2b_I',
src_indices=src_idxs, flat_src_indices=True)
p.model.connect('angular_velocity_comp.w_B', 'systems_phase.controls:w_B',
src_indices=src_idxs, flat_src_indices=True)
p.model.connect('wdot_B', 'systems_phase.controls:wdot_B',
src_indices=src_idxs, flat_src_indices=True)
src_idxs = get_src_indices_by_row(control_input_nodes_idxs, shape=(3, 3))
p.model.connect('orbit_phase.rhs_all.O_BI', 'systems_phase.controls:O_BI',
src_indices=src_idxs, flat_src_indices=True)
p.model.options['assembled_jac_type'] = 'csc'
p.model.linear_solver = DirectSolver(assemble_jac=True)
p.setup(check=True)
# from openmdao.api import view_model
# view_model(p.model)
# Initialize values in the orbit phase
p['orbit_phase.t_initial'] = 0.0
p['orbit_phase.t_duration'] = duration
# p['systems_phase.states:w_RW'][:, 0] = 0.0
# p['systems_phase.states:w_RW'][:, 1] = 0.0
# p['systems_phase.states:w_RW'][:, 2] = 0.0
# Default starting orbit
# [ 2.89078958e+03 5.69493134e+03 -2.55340189e+03 2.56640460e-01
# 3.00387409e+00 6.99018448e+00]
p['orbit_phase.states:r_e2b_I'][:, 0] = 2.89078958e+03
p['orbit_phase.states:r_e2b_I'][:, 1] = 5.69493134e+03
p['orbit_phase.states:r_e2b_I'][:, 2] = -2.55340189e+03
p['orbit_phase.states:v_e2b_I'][:, 0] = 2.56640460e-01
p['orbit_phase.states:v_e2b_I'][:, 1] = 3.00387409e+00
p['orbit_phase.states:v_e2b_I'][:, 2] = 6.99018448e+00
# Initialize values in the systems phase
p['systems_phase.t_initial'] = 0.0
p['systems_phase.t_duration'] = duration
# p['systems_phase.states:w_RW'][:, 0] = 0.0
# p['systems_phase.states:w_RW'][:, 1] = 0.0
# p['systems_phase.states:w_RW'][:, 2] = 0.0
p['systems_phase.states:SOC'] = systems_phase.interpolate(ys=[1, .5], nodes='state_input')
p['systems_phase.states:w_RW'] = 100.0
p['systems_phase.states:data'] = systems_phase.interpolate(ys=[0, 10], nodes='state_input')
p['systems_phase.states:temperature'] = 273.0
# p['systems_phase.states:v_e2b_I'][:, 0] = 0.0
# p['systems_phase.states:v_e2b_I'][:, 1] = vcirc
# p['systems_phase.states:v_e2b_I'][:, 2] = 0.0
p['systems_phase.controls:P_comm'] = 0.01
p['systems_phase.controls:Isetpt'] = 0.1
p['systems_phase.design_parameters:LD'] = 5233.5
p['systems_phase.design_parameters:fin_angle'] = np.radians(70.0)
p['systems_phase.design_parameters:cellInstd'] = 0.0
p.run_model()
# Simulate the orbit phase to get a (exact) guess to the orbit history solution.
exp_out = orbit_phase.simulate()
# import matplotlib.pyplot as plt
# from mpl_toolkits import mplot3d
#
# plt.figure()
# ax = plt.axes(projection='3d')
# # plt.plot(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], 'b-')
# ax.plot3D(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], exp_out.get_values('r_e2b_I')[:, 2], 'b-')
# plt.show()
p['orbit_phase.states:r_e2b_I'] = orbit_phase.interpolate(ys=exp_out.get_values('r_e2b_I'), xs=exp_out.get_values('time'), nodes='state_input')
p['orbit_phase.states:v_e2b_I'] = orbit_phase.interpolate(ys=exp_out.get_values('v_e2b_I'), xs=exp_out.get_values('time'), nodes='state_input')
p.run_driver()
r_e2b_I = p.model.orbit_phase.get_values('r_e2b_I')
v_e2b_I = p.model.orbit_phase.get_values('v_e2b_I')
rmag_e2b = p.model.orbit_phase.get_values('rmag_e2b_I')
# exp_out = systems_phase.simulate(times=500)
import matplotlib.pyplot as plt
plt.figure()
plt.plot(orbit_phase.get_values('r_e2b_I')[:, 0], orbit_phase.get_values('r_e2b_I')[:, 1], 'ro')
plt.figure()
# plt.plot(exp_out.get_values('time')[:, 0], exp_out.get_values('data')[:, 1], 'b-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('data'), 'ro')
plt.figure()
# plt.plot(exp_out.get_values('time')[:, 0], exp_out.get_values('data')[:, 1], 'b-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_comm'), 'r-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_sol'), 'b-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_RW'), 'g-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('P_bat'), 'k-')
plt.figure()
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('SOC'), 'r-')
plt.plot(systems_phase.get_values('time'), systems_phase.get_values('dXdt:SOC'), 'r--')
plt.show()
# plt.figure()
# plt.plot(exp_out.get_values('time'), exp_out.get_values('SOC'), 'b-')
# plt.plot(phase.get_values('time'), phase.get_values('SOC'), 'ro')
# assert_rel_error(self, rmag_e2b, rmag * np.ones_like(rmag_e2b), tolerance=1.0E-9)
# delta_trua = 2 * np.pi * (duration / period)
# assert_rel_error(self, r_e2b_I[-1, :],
# rmag * np.array([np.cos(delta_trua), np.sin(delta_trua), 0]),
# tolerance=1.0E-9)
# assert_rel_error(self, v_e2b_I[-1, :],
# vcirc * np.array([-np.sin(delta_trua), np.cos(delta_trua), 0]),
# tolerance=1.0E-9)
# def test_partials(self):
# np.set_printoptions(linewidth=10000, edgeitems=1024)
# cpd = self.p.check_partials(compact_print=True, out_stream=None)
# assert_check_partials(cpd, atol=1.0E-4, rtol=1.0)
#
# def test_simulate(self):
# phase = self.p.model.orbit_phase
# exp_out = phase.simulate(times=500)
#
# import matplotlib.pyplot as plt
#
# plt.figure()
# plt.plot(exp_out.get_values('r_e2b_I')[:, 0], exp_out.get_values('r_e2b_I')[:, 1], 'b-')
# plt.plot(phase.get_values('r_e2b_I')[:, 0], phase.get_values('r_e2b_I')[:, 1], 'ro')
#
# # plt.figure()
# # plt.plot(exp_out.get_values('time'), exp_out.get_values('SOC'), 'b-')
# # plt.plot(phase.get_values('time'), phase.get_values('SOC'), 'ro')
#
# plt.show()
| 42.812274
| 143
| 0.70436
|
27a730a5c6d3019f232b6aef55d357908663ff70
| 959
|
py
|
Python
|
deso/Media.py
|
AdityaChaudhary0005/DeSo.py
|
5cb3c757fb21bad472da921c0148675c8957eb17
|
[
"MIT"
] | 11
|
2021-11-12T18:20:22.000Z
|
2022-03-16T02:12:06.000Z
|
deso/Media.py
|
AdityaChaudhary0005/DeSo.py
|
5cb3c757fb21bad472da921c0148675c8957eb17
|
[
"MIT"
] | 6
|
2021-11-25T04:30:44.000Z
|
2021-12-15T12:33:24.000Z
|
deso/Media.py
|
AdityaChaudhary0005/DeSo.py
|
5cb3c757fb21bad472da921c0148675c8957eb17
|
[
"MIT"
] | 8
|
2021-11-19T19:14:50.000Z
|
2022-01-31T21:27:32.000Z
|
from deso.utils import getUserJWT
import requests
| 31.966667
| 95
| 0.554745
|
27a8998af1db32b395a9af2dbb6c8a21bc35a70c
| 169
|
py
|
Python
|
workSpace/boot.py
|
khutson/macequilt
|
a4a090ddf296fcea763825fda4243bc84b4d5f0d
|
[
"MIT"
] | null | null | null |
workSpace/boot.py
|
khutson/macequilt
|
a4a090ddf296fcea763825fda4243bc84b4d5f0d
|
[
"MIT"
] | null | null | null |
workSpace/boot.py
|
khutson/macequilt
|
a4a090ddf296fcea763825fda4243bc84b4d5f0d
|
[
"MIT"
] | null | null | null |
# This file is executed on every boot (including wake-boot from deepsleep)
import esp
esp.osdebug(None)
import wifi
wifi.connect(repl=False)
import gc
gc.collect()
| 13
| 74
| 0.763314
|
27a8cc8eee02c003f65618c441f8c80b6ada0052
| 1,790
|
py
|
Python
|
s3-scan-tar/tests/test_models.py
|
omBratteng/mottak
|
b7d2e1d063b31c2ad89c66e5414297612f91ebe9
|
[
"Apache-2.0"
] | 4
|
2021-03-05T15:39:24.000Z
|
2021-09-15T06:11:45.000Z
|
s3-scan-tar/tests/test_models.py
|
omBratteng/mottak
|
b7d2e1d063b31c2ad89c66e5414297612f91ebe9
|
[
"Apache-2.0"
] | 631
|
2020-04-27T10:39:18.000Z
|
2022-03-31T14:51:38.000Z
|
s3-scan-tar/tests/test_models.py
|
omBratteng/mottak
|
b7d2e1d063b31c2ad89c66e5414297612f91ebe9
|
[
"Apache-2.0"
] | 3
|
2020-02-20T15:48:03.000Z
|
2021-12-16T22:50:40.000Z
|
import pytest
from app.models import AVScanResult
| 29.833333
| 62
| 0.655307
|
27a97aed4e6639ade2261db847e3a6e16989a40c
| 1,424
|
py
|
Python
|
autoload/activate_this.py
|
BonaBeavis/vim-venom
|
a4ed892bd844de51c92e7b59dbc975db02c939b9
|
[
"Vim"
] | 24
|
2020-04-26T11:50:40.000Z
|
2022-02-22T08:05:36.000Z
|
autoload/activate_this.py
|
BonaBeavis/vim-venom
|
a4ed892bd844de51c92e7b59dbc975db02c939b9
|
[
"Vim"
] | 5
|
2021-01-26T12:41:12.000Z
|
2022-01-11T15:40:43.000Z
|
autoload/activate_this.py
|
BonaBeavis/vim-venom
|
a4ed892bd844de51c92e7b59dbc975db02c939b9
|
[
"Vim"
] | 4
|
2020-05-02T21:45:36.000Z
|
2022-03-25T13:51:00.000Z
|
# -*- coding: utf-8 -*-
"""Activate virtualenv for current interpreter:
Source: https://github.com/pypa/virtualenv
Use exec(open(this_file).read(), {'__file__': this_file}).
"""
import os
import site
import sys
try:
abs_file = os.path.abspath(__file__)
except NameError:
raise AssertionError(
"You must use exec(open(this_file).read(), {'__file__': this_file}))")
# Prepend bin to PATH (this file is inside the bin directory)
bin_dir = os.path.dirname(abs_file)
os.environ["PATH"] = os.pathsep.join(
[bin_dir] + os.environ.get("PATH", "").split(os.pathsep))
# Virtual env is right above bin directory
base = os.path.dirname(bin_dir)
os.environ["VIRTUAL_ENV"] = base
# Concat site-packages library path
IS_WIN = sys.platform == "win32"
IS_PYPY = hasattr(sys, "pypy_version_info")
IS_JYTHON = sys.platform.startswith("java")
if IS_JYTHON or IS_WIN:
site_packages = os.path.join(base, "Lib", "site-packages")
elif IS_PYPY:
site_packages = os.path.join(base, "site-packages")
else:
python_lib = "python{}.{}".format(*sys.version_info)
site_packages = os.path.join(base, "lib", python_lib, "site-packages")
# Add the virtual environment libraries to the host python import mechanism
prev_length = len(sys.path)
site.addsitedir(site_packages)
sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]
sys.real_prefix = sys.prefix
sys.prefix = base
# vim: set ts=4 sw=4 tw=80 et :
| 30.297872
| 78
| 0.714185
|
27abc06bb50512111945d911b3687183e05cd80c
| 2,731
|
py
|
Python
|
tattrdb/models.py
|
gmjosack/tattrdb
|
88d46eb049d05a1f0531531c49c2209c2bbbf562
|
[
"MIT"
] | 1
|
2018-11-24T02:33:15.000Z
|
2018-11-24T02:33:15.000Z
|
tattrdb/models.py
|
gmjosack/tattrdb
|
88d46eb049d05a1f0531531c49c2209c2bbbf562
|
[
"MIT"
] | null | null | null |
tattrdb/models.py
|
gmjosack/tattrdb
|
88d46eb049d05a1f0531531c49c2209c2bbbf562
|
[
"MIT"
] | null | null | null |
from sqlalchemy import create_engine
from sqlalchemy import (
Table, Column, Integer, String, Text, Boolean,
ForeignKey, Enum, DateTime
)
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import relationship, backref, sessionmaker
Session = sessionmaker()
Model = declarative_base()
def _sync(connection):
""" This will build the database for whatever connection you pass."""
Model.metadata.create_all(connection.bind)
host_tags = Table("host_tags", Model.metadata,
Column("host_id", Integer, ForeignKey("hosts.id"), primary_key=True),
Column("tag_id", Integer, ForeignKey("tags.id"), primary_key=True)
)
| 28.154639
| 103
| 0.656902
|
27abe035638fda37c09ec1990dca44e2161d8667
| 30
|
py
|
Python
|
onepassword/__init__.py
|
jemmyw/1pass
|
8dbfa5e062ce08e26c5619dbdb2b27323e5b3dc9
|
[
"MIT"
] | 1
|
2016-11-14T22:16:48.000Z
|
2016-11-14T22:16:48.000Z
|
onepassword/__init__.py
|
elliotchance/1pass
|
4bd45a52476c410c6e5b51f90fd46cbdd436807f
|
[
"MIT"
] | null | null | null |
onepassword/__init__.py
|
elliotchance/1pass
|
4bd45a52476c410c6e5b51f90fd46cbdd436807f
|
[
"MIT"
] | null | null | null |
from keychain import Keychain
| 15
| 29
| 0.866667
|
27ae7ed160d61ff6977fb0ea0dc61ee80279d33b
| 152,955
|
py
|
Python
|
modules/cockatoo/_knitnetwork.py
|
fstwn/Cockatoo
|
0c5f9c515053bfc31e62d20fddc4ae9bece09d88
|
[
"MIT"
] | 9
|
2020-09-26T03:41:21.000Z
|
2021-11-29T06:52:35.000Z
|
modules/cockatoo/_knitnetwork.py
|
fstwn/Cockatoo
|
0c5f9c515053bfc31e62d20fddc4ae9bece09d88
|
[
"MIT"
] | 9
|
2020-08-10T19:38:03.000Z
|
2022-02-24T08:41:32.000Z
|
modules/cockatoo/_knitnetwork.py
|
fstwn/Cockatoo
|
0c5f9c515053bfc31e62d20fddc4ae9bece09d88
|
[
"MIT"
] | 3
|
2020-12-26T08:43:56.000Z
|
2021-10-17T19:37:52.000Z
|
# PYTHON STANDARD LIBRARY IMPORTS ---------------------------------------------
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import deque
from collections import OrderedDict
from math import radians
from math import pi
from operator import itemgetter
# DUNDER ----------------------------------------------------------------------
__all__ = [
"KnitNetwork"
]
# THIRD PARTY MODULE IMPORTS --------------------------------------------------
import networkx as nx
# LOCAL MODULE IMPORTS --------------------------------------------------------
from cockatoo._knitnetworkbase import KnitNetworkBase
from cockatoo._knitmappingnetwork import KnitMappingNetwork
from cockatoo._knitdinetwork import KnitDiNetwork
from cockatoo.environment import RHINOINSIDE
from cockatoo.exception import KnitNetworkError
from cockatoo.exception import KnitNetworkGeometryError
from cockatoo.exception import NoEndNodesError
from cockatoo.exception import NoWeftEdgesError
from cockatoo.exception import MappingNetworkError
from cockatoo.utilities import pairwise
# RHINO IMPORTS ---------------------------------------------------------------
if RHINOINSIDE:
import rhinoinside
rhinoinside.load()
from Rhino.Geometry import Brep as RhinoBrep
from Rhino.Geometry import Curve as RhinoCurve
from Rhino.Geometry import Line as RhinoLine
from Rhino.Geometry import Interval as RhinoInterval
from Rhino.Geometry import Mesh as RhinoMesh
from Rhino.Geometry import NurbsSurface as RhinoNurbsSurface
from Rhino.Geometry import Point3d as RhinoPoint3d
from Rhino.Geometry import Polyline as RhinoPolyline
from Rhino.Geometry import Surface as RhinoSurface
from Rhino.Geometry import Vector3d as RhinoVector3d
else:
from Rhino.Geometry import Brep as RhinoBrep
from Rhino.Geometry import Curve as RhinoCurve
from Rhino.Geometry import Line as RhinoLine
from Rhino.Geometry import Interval as RhinoInterval
from Rhino.Geometry import Mesh as RhinoMesh
from Rhino.Geometry import NurbsSurface as RhinoNurbsSurface
from Rhino.Geometry import Point3d as RhinoPoint3d
from Rhino.Geometry import Polyline as RhinoPolyline
from Rhino.Geometry import Surface as RhinoSurface
from Rhino.Geometry import Vector3d as RhinoVector3d
# CLASS DECLARATION -----------------------------------------------------------
def ToString(self):
"""
Return a textual description of the network.
Returns
-------
description : str
A textual description of the network.
Notes
-----
Used for overloading the Grasshopper display in data parameters.
"""
return repr(self)
# INITIALIZATION OF POSITION CONTOUR EDGES --------------------------------
def initialize_position_contour_edges(self):
"""
Creates all initial position contour edges as neither 'warp' nor 'weft'
by iterating over all nodes in the network and grouping them based on
their 'position' attribute.
Notes
-----
This method is automatically called when creating a KnitNetwork using
the create_from_contours method!
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# get all nodes by position
posList = self.all_nodes_by_position(data=True)
for i, pos in enumerate(posList):
for j, node in enumerate(pos):
k = j + 1
if k < len(pos):
self.create_contour_edge(node, pos[k])
# INITIALIZATION OF 'WEFT' EDGES BETWEEN 'LEAF' NODES ---------------------
def initialize_leaf_connections(self):
"""
Create all initial connections of the 'leaf' nodes by iterating over
all position contours and creating 'weft' edges between the 'leaf'
nodes of the position contours.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# get all leaves
leafNodes = self.all_leaves_by_position(True)
# loop through all the positions leaves
for i, lpos in enumerate(leafNodes):
j = i + 1
# loop through pairs of leaves
if j < len(leafNodes):
startLeaf = lpos[0]
endLeaf = lpos[1]
nextStart = leafNodes[j][0]
nextEnd = leafNodes[j][1]
# add edges to the network
self.create_weft_edge(startLeaf, nextStart)
self.create_weft_edge(endLeaf, nextEnd)
# INITIALIZATION OF PRELIMINARY 'WEFT' EDGES ------------------------------
def attempt_weft_connection(self, node, candidate, source_nodes,
max_connections=4, verbose=False):
"""
Method for attempting a 'weft' connection to a candidate
node based on certain parameters.
Parameters
----------
node : :obj:`tuple`
2-tuple representing the source node for the possible 'weft' edge.
candidate ::obj:`tuple`
-tuple representing the target node for the possible 'weft' edge.
source_nodes : :obj:`list`
List of nodes on the position contour of node. Used to check if
the candidate node already has a connection.
max_connections : int, optional
The new 'weft' connection will only be made if the candidate nodes
number of connected neighbors is below this.
Defaults to ``4``.
verbose : bool, optional
If ``True``, this routine and all its subroutines will print
messages about what is happening to the console.
Defaults to ``False``.
Returns
-------
bool
``True`` if the connection has been made,
``False`` otherwise.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
# get connected neighbors
connecting_neighbors = self[candidate[0]]
# only do something if the maximum is not reached
if len(connecting_neighbors) < max_connections:
# determine if the node is already connected to a node from
# the input source nodes
isConnected = False
for cn in connecting_neighbors:
if cn in [v[0] for v in source_nodes]:
isConnected = True
# print info on verbose setting
v_print("Candidate node {} is ".format(candidate[0]) +
"already connected! " +
"Skipping to next " +
"node...")
break
# check the flag and act accordingly
if not isConnected:
# print info on verbose setting
v_print("Connecting node {} to best ".format(node[0]) +
"candidate {}.".format(candidate[0]))
# if all conditions are met, make the 'weft' connection
if node[1]["position"] < candidate[1]["position"]:
self.create_weft_edge(node, candidate)
else:
self.create_weft_edge(candidate, node)
return True
else:
return False
else:
return False
def _create_initial_weft_connections(self,
contour_set,
force_continuous_start=False,
force_continuous_end=False,
max_connections=4,
precise=False,
verbose=False):
"""
Private method for creating initial 'weft' connections for the supplied
set of contours, starting from the first contour in the set and
propagating to the last contour in the set.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
if len(contour_set) < 2:
v_print("Not enough contours in contour set!")
return
# print info on verbose output
v_print("Creating initial 'weft' connections for contour set...")
# loop over all nodes of positions (list of lists of tuples)
for i, pos in enumerate(contour_set):
# pos is a list of tuples (nodes)
if i < len(contour_set):
j = i + 1
if j == len(contour_set):
break
# get initial and target nodes without 'leaf' nodes
initial_nodes = contour_set[i][1:-1]
target_nodes = contour_set[j][1:-1]
# options for continuous start and end
if force_continuous_start:
initial_nodes = initial_nodes[1:]
target_nodes = target_nodes[1:]
if force_continuous_end:
initial_nodes = initial_nodes[:-1]
target_nodes = target_nodes[:-1]
# skip if one of the contours has no nodes
if len(initial_nodes) == 0 or len(target_nodes) == 0:
continue
# define forbidden node index
forbidden_node = -1
# loop through all nodes on the current position
for k, node in enumerate(initial_nodes):
# print info on verbose setting
v_print("Processing node {} on position {}:".format(
node[0], node[1]["position"]))
# get the geometry for the current node
thisPt = node[1]["geo"]
# filtering according to forbidden nodes
target_nodes = [tn for tn in target_nodes
if tn[0] >= forbidden_node]
if len(target_nodes) == 0:
continue
# get four closest nodes on adjacent contour
if precise:
allDists = [thisPt.DistanceTo(tv[1]["geo"])
for tv in target_nodes]
else:
allDists = [thisPt.DistanceToSquared(tv[1]["geo"])
for tv in target_nodes]
# sort the target nodes by distance to current node
allDists, sorted_target_nodes = zip(
*sorted(zip(allDists,
target_nodes),
key=itemgetter(0)))
# the four closest nodes are the possible connections
possible_connections = sorted_target_nodes[:4]
# print info on verbose setting
v_print("Possible connections: {}".format(
[pc[0] for pc in possible_connections]))
# handle edge case where there is no possible
# connection or just one
if len(possible_connections) == 0:
# skip if there are no possible connections
continue
elif len(possible_connections) == 1:
# attempt to connect to only possible candidate
fCand = possible_connections[0]
res = self.attempt_weft_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node
if res:
forbidden_node = fCand[0]
continue
# get the contours current direction
if k < len(initial_nodes)-1:
contourDir = RhinoLine(
thisPt,
initial_nodes[k+1][1]["geo"]).Direction
elif k == len(initial_nodes)-1:
contourDir = RhinoLine(
initial_nodes[k-1][1]["geo"], thisPt).Direction
contourDir.Unitize()
# get the directions of the possible connections
candidatePoints = [pc[1]["geo"]
for pc in possible_connections]
candidateDirections = [RhinoLine(
thisPt, cp).Direction for cp in candidatePoints]
[cd.Unitize() for cd in candidateDirections]
# get the angles between contour dir and possible conn dir
normals = [RhinoVector3d.CrossProduct(
contourDir, cd) for cd in candidateDirections]
angles = [RhinoVector3d.VectorAngle(
contourDir, cd, n) for cd, n in zip(
candidateDirections, normals)]
# compute deltas as a mesaure of perpendicularity
deltas = [abs(a - (0.5 * pi)) for a in angles]
# sort possible connections by distance, then by delta
allDists, deltas, angles, most_perpendicular = zip(
*sorted(zip(
allDists,
deltas,
angles,
possible_connections[:]),
key=itemgetter(0, 1)))
# get node neighbors
nNeighbors = self[node[0]]
# compute angle difference
aDelta = angles[0] - angles[1]
# CONNECTION FOR LEAST ANGLE CHANGE -----------------------
if len(nNeighbors) > 2 and aDelta < radians(6.0):
# print info on verbose setting
v_print("Using procedure for least angle " +
"change connection...")
# get previous connected edge and its direction
prevEdges = self.node_weft_edges(node[0], data=True)
if len(prevEdges) > 1:
raise KnitNetworkError(
"More than one previous 'weft' connection! " +
"This was unexpeced...")
prevDir = prevEdges[0][2]["geo"].Direction
else:
prevDir = prevEdges[0][2]["geo"].Direction
prevDir.Unitize()
# get directions for the best two candidates
mpA = most_perpendicular[0]
mpB = most_perpendicular[1]
dirA = RhinoLine(thisPt, mpA[1]["geo"]).Direction
dirB = RhinoLine(thisPt, mpB[1]["geo"]).Direction
dirA.Unitize()
dirB.Unitize()
# get normals for angle measurement
normalA = RhinoVector3d.CrossProduct(prevDir, dirA)
normalB = RhinoVector3d.CrossProduct(prevDir, dirB)
# measure the angles
angleA = RhinoVector3d.VectorAngle(
prevDir,
dirA,
normalA)
angleB = RhinoVector3d.VectorAngle(
prevDir,
dirB,
normalB)
# select final candidate for connection by angle
if angleA < angleB:
fCand = mpA
else:
fCand = mpB
# attempt to connect to final candidate
res = self.attempt_weft_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node for next pass
if res:
forbidden_node = fCand[0]
# CONNECTION FOR MOST PERPENDICULAR --------------------
else:
# print info on verbose setting
v_print("Using procedure for most " +
"perpendicular connection...")
# define final candidate
fCand = most_perpendicular[0]
# attempt to connect to final candidate node
res = self.attempt_weft_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node if connection has been made
if res:
forbidden_node = fCand[0]
def _create_second_pass_weft_connections(self,
contour_set,
include_leaves=False,
least_connected=False,
precise=False,
verbose=False):
"""
Private method for creating second pass 'weft' connections for the
given set of contours.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
v_print = print if verbose else lambda *a, **k: None
# get attributes only once
position_attributes = nx.get_node_attributes(self, "position")
num_attributes = nx.get_node_attributes(self, "num")
if len(contour_set) < 2:
v_print("Not enough contours in contour set!")
return
# print info on verbose output
v_print("Creating second pass 'weft' connections for contour set...")
# loop over all nodes of positions (list of lists of tuples)
for i, pos in enumerate(contour_set):
# get initial nodes
initial_nodes = contour_set[i]
# get target position candidates
if (i > 0 and i < len(contour_set)-1 and
i != 0 and i != len(contour_set)-1):
target_positionA = contour_set[i-1][0][1]["position"]
target_positionB = contour_set[i+1][0][1]["position"]
elif i == 0:
target_positionA = None
target_positionB = contour_set[i+1][0][1]["position"]
elif i == len(contour_set)-1:
target_positionA = contour_set[i-1][0][1]["position"]
target_positionB = None
# loop through all nodes on current position
for k, node in enumerate(initial_nodes):
# print info on verbose setting
v_print(
"Processing node {} on position {}:".format(
node[0], node[1]["position"]))
# get connecting edges on target position
conWeftEdges = self.node_weft_edges(node[0], data=True)
conPos = []
if len(conWeftEdges) == 0 and verbose:
# print info on verbose setting
v_print("No previously connected weft edges...")
for weftEdge in conWeftEdges:
weftEdgeFrom = weftEdge[0]
weftEdgeTo = weftEdge[1]
if weftEdgeFrom != node[0]:
posEdgeTarget = position_attributes[weftEdgeFrom]
elif weftEdgeTo != node[0]:
posEdgeTarget = position_attributes[weftEdgeTo]
if posEdgeTarget not in conPos:
conPos.append(posEdgeTarget)
# select target position and continue in edge case scenarios
target_positions = []
if target_positionA == None:
if target_positionB in conPos:
v_print("Node is connected. Skipping...")
continue
target_positions.append(target_positionB)
elif target_positionB == None:
if target_positionA in conPos:
v_print("Node is connected. Skipping...")
continue
target_positions.append(target_positionA)
elif ((target_positionA in conPos) and
(target_positionB in conPos)):
v_print("Node is connected. Skipping...")
continue
elif ((target_positionB in conPos) and
(target_positionA not in conPos)):
target_positions.append(target_positionA)
elif ((target_positionA in conPos) and
(target_positionB not in conPos)):
target_positions.append(target_positionB)
elif (target_positionA != None and
target_positionB != None and len(conPos) == 0):
target_positions = [target_positionA, target_positionB]
# print info on verbose setting
if verbose and len(target_positions) > 1:
v_print("Two target positions: {}, {}".format(
*target_positions))
elif verbose and len(target_positions) == 1:
v_print("Target position: {}".format(target_positions[0]))
# skip if there are no target positions
if len(target_positions) == 0:
v_print("No target position! Skipping...")
continue
# only proceed if there is a target position
for target_position in target_positions:
# get target nodes
target_nodes = self.nodes_on_position(
target_position, True)
# get the point geo of this node
thisPt = node[1]["geo"]
# get a window of possible connections on the target
# position by looking for the previos node on this contour
# connected to target position, then propagating along
# the target position to the next node that is connected
# to this position. these two nodes will define the window
# NOTE: the current node should never have a connection
# to target position (theoretically!), otherwise it should
# have fallen through the checks by now
# print info on verbose setting
v_print("Target position is {}. ".format(target_position) +
"Computing window...")
# get the previous node on this contour
prevNode = initial_nodes[k-1]
# assume that the previous node has a connection
prevCon = self.node_weft_edges(prevNode[0], data=True)
# get possible connections from previous connection
possible_connections = []
for edge in prevCon:
edgeFrom = edge[0]
edgeTo = edge[1]
if edgeFrom != prevNode[0]:
prevNodeTargetPos = position_attributes[edgeFrom]
prevNodeTargetIndex = num_attributes[edgeFrom]
elif edgeTo != prevNode[0]:
prevNodeTargetPos = position_attributes[edgeTo]
prevNodeTargetIndex = num_attributes[edgeTo]
if prevNodeTargetPos == target_position:
possible_connections.append(
target_nodes[prevNodeTargetIndex])
# the farthest connection of the previous node is the first
# point for our window
if len(possible_connections) > 1:
possible_connections.sort(key=lambda x: x[1]["num"])
possible_connections.reverse()
start_of_window = possible_connections[0]
elif len(possible_connections) == 1:
start_of_window = possible_connections[0]
elif len(possible_connections) == 0:
# print info on verbose setting
v_print("No possible connection, skipping...")
continue
# get the next node on this pos that is
# connected to target position
if k < len(initial_nodes)-1:
future_nodes = initial_nodes[k+1:]
for futurenode in future_nodes:
filteredWeftEdges = []
futureWeftEdges = self.node_weft_edges(
futurenode[0], data=True)
for futureweft in futureWeftEdges:
fwn = (futureweft[1], self.node[futureweft[1]])
fwn_pos = fwn[1]["position"]
fwn_num = fwn[1]["num"]
if (fwn_pos == target_position and
fwn_num == start_of_window[1]["num"]):
# if the start of the window is found,
# it is the only possible connection
filteredWeftEdges = [futureweft]
break
if (fwn_pos == target_position and
fwn_num > start_of_window[1]["num"]):
filteredWeftEdges.append(futureweft)
else:
continue
if (not filteredWeftEdges or
len(filteredWeftEdges) == 0):
end_of_window = None
continue
# sort the filtered weft edges based on the 'num'
# attribute of their target node
filteredWeftEdges.sort(
key=lambda x: self.node[x[1]]["num"])
# get the end of the window from the first edge on
# the target position
end_of_window = (
filteredWeftEdges[0][1],
self.node[filteredWeftEdges[0][1]])
break
else:
end_of_window = None
# define the window
if end_of_window == None:
window = [start_of_window]
elif end_of_window == start_of_window:
window = [start_of_window]
else:
window = [(n, d) for n, d
in self.nodes_iter(data=True)
if n >= start_of_window[0]
and n <= end_of_window[0]]
if len(window) == 0:
# print info on verbose setting
v_print("Length of window is 0, skipping...")
elif len(window) == 1:
# print info on verbose setting
v_print("Window has only one node.")
v_print("Connecting to node {}".format(window[0][0]) +
" on position {}...".format(
window[0][1]["position"]))
# connect weft edge
if node[1]["position"] < window[0][1]["position"]:
self.create_weft_edge(node, window[0])
else:
self.create_weft_edge(window[0], node)
else:
# print info on verbose setting
v_print("Processing window nodes: {}".format(
[w[0] for w in window]))
# sort nodes in window by distance
if precise:
allDists = [thisPt.DistanceTo(pc[1]["geo"])
for pc in window]
else:
allDists = [thisPt.DistanceToSquared(pc[1]["geo"])
for pc in window]
allDists, window = zip(*sorted(zip(allDists, window),
key=itemgetter(0)))
if least_connected:
wn_count = [len(self[n[0]]) for n in window]
wn_count, allDists, window = zip(
*sorted(zip(allDists, wn_count, window),
key=itemgetter(0, 1)))
# set final candidate node
fCand = window[0]
else:
# get the contours current direction
if k < len(initial_nodes)-1:
contourDir = RhinoLine(
thisPt,
initial_nodes[k+1][1]["geo"]).Direction
elif k == len(initial_nodes)-1:
contourDir = RhinoLine(
initial_nodes[k-1][1]["geo"],
thisPt).Direction
contourDir.Unitize()
# get the directions of the possible connections
candidatePoints = [pc[1]["geo"]
for pc in window]
candidateDirections = [
RhinoLine(thisPt, cp).Direction
for cp in candidatePoints]
[cd.Unitize() for cd in candidateDirections]
# get the angles between contour dir and window dir
normals = [RhinoVector3d.CrossProduct(
contourDir, cd)
for cd in candidateDirections]
angles = [RhinoVector3d.VectorAngle(
contourDir, cd, n) for cd, n in zip(
candidateDirections, normals)]
# compute deltas as a mesaure of perpendicularity
deltas = [abs(a - (0.5 * pi)) for a in angles]
# sort window by distance, then by delta
allDists, deltas, most_perpendicular = zip(*sorted(
zip(allDists,
deltas,
window),
key=itemgetter(0, 1)))
# set final candidate node for connection
fCand = most_perpendicular[0]
# print info on verbose setting
v_print("Connecting to node " +
"{} on position {}...".format(
fCand[0],
fCand[1]["position"]))
# connect weft edge to best target
if node[1]["position"] < fCand[1]["position"]:
self.create_weft_edge(node, fCand)
else:
self.create_weft_edge(fCand, node)
def initialize_weft_edges(self,
start_index=None,
propagate_from_center=False,
force_continuous_start=False,
force_continuous_end=False,
angle_threshold=radians(6.0),
max_connections=4,
least_connected=False,
precise=False,
verbose=False):
"""
Attempts to create all the preliminary 'weft' connections for the
network.
Parameters
----------
start_index : int, optional
This value defines at which index the list of contours is split.
If no index is supplied, will split the list at the longest
contour.
Defaults to ``None``.
propagate_from_center : bool, optional
If ``True``, will propagate left and right set of contours from
the center contour defined by start_index or the longest contour
( < | > ). Otherwise, the propagation of the contours left to the
center will start at the left boundary ( > | > ).
Defaults to ``False``
force_continuous_start : bool, optional
If ``True``, forces the first row of stitches to be continuous.
Defaults to ``False``.
force_continuous_end : bool, optional
If ``True``, forces the last row of stitches to be continuous.
Defaults to ``False``.
max_connections : int, optional
The maximum connections a node is allowed to have to be considered
for an additional 'weft' connection.
Defaults to ``4``.
least_connected : bool, optional
If ``True``, uses the least connected node from the found
candidates.
Defaults to ``False``
precise : bool, optional
If ``True``, the distance between nodes will be calculated using
the Rhino.Geometry.Point3d.DistanceTo method, otherwise the much
faster Rhino.Geometry.Point3d.DistanceToSquared method is used.
Defaults to ``False``.
verbose : bool, optional
If ``True``, this routine and all its subroutines will print
messages about what is happening to the console. Great for
debugging and analysis.
Defaults to ``False``.
Raises
------
KnitNetworkError
If the supplied splitting index is too high.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# get all the positions / contours
AllPositions = self.all_nodes_by_position(data=True)
if start_index == None:
# get index of longest contour
start_index = self.longest_position_contour()[0]
elif start_index >= len(AllPositions):
raise KnitNetworkError("Supplied splitting index is too high!")
# if continuous start is True, connect the whole first row
if force_continuous_start:
chain = [pos[1] for pos in AllPositions]
for pair in pairwise(chain):
self.create_weft_edge(pair[0], pair[1])
# if continuous end is True, connect the whole last row
if force_continuous_end:
chain = [pos[-2] for pos in AllPositions]
for pair in pairwise(chain):
self.create_weft_edge(pair[0], pair[1])
# split position list into two sets based on start index
leftContours = AllPositions[0:start_index+1]
# optional propagation from center
# NOTE: this has shown problems / weird stitch geometries
if propagate_from_center:
leftContours.reverse()
rightContours = AllPositions[start_index:]
# create the initial weft connections
self._create_initial_weft_connections(
leftContours,
force_continuous_start=force_continuous_start,
force_continuous_end=force_continuous_end,
max_connections=max_connections,
precise=precise,
verbose=verbose)
self._create_initial_weft_connections(
rightContours,
force_continuous_start=force_continuous_start,
force_continuous_end=force_continuous_end,
max_connections=max_connections,
precise=precise,
verbose=verbose)
# create second pass weft connections
self._create_second_pass_weft_connections(
leftContours,
least_connected,
precise=precise,
verbose=verbose)
self._create_second_pass_weft_connections(
rightContours,
least_connected,
precise=precise,
verbose=verbose)
return True
# INITIALIZATION OF PRELIMINARY 'WARP' EDGES ------------------------------
def initialize_warp_edges(self, contour_set=None, verbose=False):
"""
Method for initializing first 'warp' connections once all preliminary
'weft' connections are made.
Parameters
----------
contour_set : :obj:`list`, optional
List of lists of nodes to initialize 'warp' edges. If none are
supplied, all nodes ordered by thei 'position' attributes are
used.
Defaults to ``None``.
verbose : bool, optional
If ``True``, will print verbose output to the console.
Defaults to ``False``.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# if no contour set is provided, use all contours of this network
if contour_set == None:
contour_set = self.all_nodes_by_position(data=True)
# loop through all positions in the set of contours
for i, pos in enumerate(contour_set):
# get all nodes on current contour
initial_nodes = contour_set[i]
# loop through all nodes on this contour
for k, node in enumerate(initial_nodes):
connected_edges = self.edges(node[0], data=True)
numweft = len(self.node_weft_edges(node[0]))
if (len(connected_edges) > 4 or numweft > 2
or i == 0 or i == len(contour_set)-1):
# set 'end' attribute for this node
self.node[node[0]]["end"] = True
# loop through all candidate edges
for j, edge in enumerate(connected_edges):
# if it's not a 'weft' edge, assign attributes
if not edge[2]["weft"]:
connected_node = edge[1]
# set 'end' attribute to conneted node
self.node[connected_node]["end"] = True
# set 'warp' attribute to current edge
self[edge[0]][edge[1]]["warp"] = True
# ASSIGNING OF 'SEGMENT' ATTRIBUTES FOR MAPPING NETWORK -------------------
def _traverse_weft_edge_until_end(self, start_end_node, start_node,
seen_segments, way_nodes=None,
way_edges=None, end_nodes=None):
"""
Private method for traversing a path of 'weft' edges until another
'end' node is discoverd.
"""
# initialize output lists
if way_nodes == None:
way_nodes = deque()
way_nodes.append(start_node[0])
if way_edges == None:
way_edges = deque()
if end_nodes == None:
end_nodes = deque()
# get the connected edges and filter them, sort out the ones that
# already have a 'segment' attribute assigned
connected_weft_edges = self.node_weft_edges(start_node[0], data=True)
filtered_weft_edges = []
for cwe in connected_weft_edges:
if cwe[2]["segment"] != None:
continue
if cwe in way_edges:
continue
elif (cwe[1], cwe[0], cwe[2]) in way_edges:
continue
filtered_weft_edges.append(cwe)
if len(filtered_weft_edges) > 1:
print(filtered_weft_edges)
print("More than one filtered candidate weft edge! " +
"Segment complete...?")
elif len(filtered_weft_edges) == 1:
fwec = filtered_weft_edges[0]
connected_node = (fwec[1], self.node[fwec[1]])
# if the connected node is an end node, the segment is finished
if connected_node[1]["end"]:
# find out which order to set segment attributes
if start_end_node > connected_node[0]:
segStart = connected_node[0]
segEnd = start_end_node
else:
segStart = start_end_node
segEnd = connected_node[0]
if (segStart, segEnd) in seen_segments:
segIndex = len([s for s in seen_segments
if s == (segStart, segEnd)])
else:
segIndex = 0
# append the relevant data to the lists
end_nodes.append(connected_node[0])
way_edges.append(fwec)
seen_segments.append((segStart, segEnd))
# set final 'segment' attributes to all the way nodes
for waynode in way_nodes:
self.node[waynode]["segment"] = (segStart,
segEnd,
segIndex)
# set final 'segment' attributes to all the way edges
for wayedge in way_edges:
self[wayedge[0]][wayedge[1]]["segment"] = (segStart,
segEnd,
segIndex)
# return the seen segments
return seen_segments
else:
# set the initial segment attribute to the node
self.node[connected_node[0]]["segment"] = (start_end_node,
None,
None)
# set the initial segment attribute to the edge
self[fwec[0]][fwec[1]]["segment"] = (start_end_node,
None,
None)
# append the relevant data to the lists
way_nodes.append(connected_node[0])
way_edges.append(fwec)
# call this method recursively until a 'end' node is found
return self._traverse_weft_edge_until_end(
start_end_node,
connected_node,
seen_segments,
way_nodes,
way_edges,
end_nodes)
else:
return seen_segments
def traverse_weft_edges_and_set_attributes(self, start_end_node):
"""
Traverse a path of 'weft' edges starting from an 'end' node until
another 'end' node is discovered. Set 'segment' attributes to nodes
and edges along the way.
start_end_node : :obj:`tuple`
2-tuple representing the node to start the traversal.
"""
# get connected weft edges and sort them by their connected node
weft_connections = self.node_weft_edges(start_end_node[0], data=True)
weft_connections.sort(key=lambda x: x[1])
# loop through all connected weft edges
seen_segments = []
for cwe in weft_connections:
# check if connected weft edge already has a segment attribute
if cwe[2]["segment"]:
continue
# get connected node
connected_node = (cwe[1], self.node[cwe[1]])
# check the connected node. if it is an end node, we are done
if connected_node[1]["end"]:
# get segment start and end
if start_end_node[0] > connected_node[0]:
segStart = connected_node[0]
segEnd = start_end_node[0]
else:
segStart = start_end_node[0]
segEnd = connected_node[0]
# get segment index
if (segStart, segEnd) in seen_segments:
segIndex = len([s for s in seen_segments
if s == (segStart, segEnd)])
else:
segIndex = 0
# set the final segment attribute to the edge
self[cwe[0]][cwe[1]]["segment"] = (segStart, segEnd, segIndex)
seen_segments.append((segStart, segEnd))
# if the connected node is not an end node, we need to travel
# until we find one
else:
seen_segments = self._traverse_weft_edge_until_end(
start_end_node[0],
connected_node,
seen_segments,
way_edges=[cwe])
def assign_segment_attributes(self):
"""
Get the segmentation for loop generation and assign 'segment'
attributes to 'weft' edges and nodes.
"""
if len(self.weft_edges) == 0:
errMsg = ("No 'weft' edges in KnitNetwork! Segmentation " +
"is impossible.")
raise NoWeftEdgesError(errMsg)
if len(self.end_nodes) == 0:
errMsg = ("No 'end' nodes in KnitNetwork! Segmentation " +
"is impossible.")
raise NoEndNodesError(errMsg)
# remove contour and 'warp' edges and store them
warp_storage = []
contour_storage = []
for edge in self.edges(data=True):
if not edge[2]["weft"]:
if edge[2]["warp"]:
warp_storage.append(edge)
else:
contour_storage.append(edge)
self.remove_edge(edge[0], edge[1])
# get all 'end' nodes ordered by their 'position' attribute
all_ends_by_position = self.all_ends_by_position(data=True)
# loop through all 'end' nodes
for position in all_ends_by_position:
for endnode in position:
self.traverse_weft_edges_and_set_attributes(endnode)
# add all previously removed edges back into the network
[self.add_edge(edge[0], edge[1], attr_dict=edge[2])
for edge in warp_storage + contour_storage]
# CREATION OF MAPPING NETWORK ---------------------------------------------
def create_mapping_network(self):
"""
Creates the corresponding mapping network for the final loop generation
from a KnitNetwork instance with fully assigned 'segment' attributes.
The created mapping network will be part of the KnitNetwork instance.
It can be accessed using the mapping_network property.
Notes
-----
All nodes without an 'end' attribute as well as all 'weft' edges are
removed by this step. Final nodes as well as final 'weft' and 'warp'
edges can only be created using the mapping network.
Returns
-------
success : bool
``True`` if the mapping network has been successfully created,
``False`` otherwise.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# create a new KnitMappingNetwork instance
MappingNetwork = KnitMappingNetwork()
# get all edges of the current network by segment
weft_edges = sorted(self.weft_edges, key=lambda x: x[2]["segment"])
warp_edges = self.warp_edges
# initialize deque container for segment ids
segment_ids = deque()
# loop through all 'weft' edges and fill container with unique ids
for edge in weft_edges:
segment_id = edge[2]["segment"]
if segment_id not in segment_ids:
segment_ids.append(segment_id)
# error checking
if len(segment_ids) == 0:
errMsg = (
"The network contains no 'weft' edges with a 'segment' " +
"attribute assigned to them. A KnitMappingNetwork can " +
"only be created from a KnitNetwork with initialized " +
"'weft' edges for courses and corresponding 'warp' " +
"edges connecting their 'end' nodes.")
raise NoWeftEdgesError(errMsg)
# loop through all unique segment ids
for id in segment_ids:
# get the corresponding edges for this id and sort them
segment_edges = [e for e in weft_edges if e[2]["segment"] == id]
segment_edges.sort(key=lambda x: x[0])
# extract start and end nodes
start_node = (id[0], self.node[id[0]])
endNode = (id[1], self.node[id[1]])
# get all the geometry of the individual edges
segment_geo = [e[2]["geo"] for e in segment_edges]
# create a segment contour edge in the mapping network
res = MappingNetwork.create_segment_contour_edge(
start_node,
endNode,
id,
segment_geo)
if not res:
errMsg = ("SegmentContourEdge at segment id {} could not be " +
"created!")
raise KnitNetworkError(errMsg)
# add all warp edges to the mapping network to avoid lookup hassle
for warp_edge in warp_edges:
if warp_edge[0] > warp_edge[1]:
warp_from = warp_edge[1]
warp_to = warp_edge[0]
else:
warp_from = warp_edge[0]
warp_to = warp_edge[1]
MappingNetwork.add_edge(warp_from, warp_to, attr_dict=warp_edge[2])
# set mapping network property for this instance
self.mapping_network = MappingNetwork
# ditch all edges that are not 'warp' and nodes without 'end' attribute
[self.remove_node(n) for n, d in self.nodes_iter(data=True)
if not d["end"]]
[self.remove_edge(s, e) for s, e, d in self.edges_iter(data=True)
if not d["warp"]]
return True
# MAPPING NETWORK PROPERTY ------------------------------------------------
def _get_mapping_network(self):
"""
Gets the associated mapping network for this KnitNetwork instance.
"""
return self._mapping_network
def _set_mapping_network(self, mapping_network):
"""
Setter for this instance's associated mapping network.
"""
# set mapping network to instance
if (isinstance(mapping_network, KnitMappingNetwork)
or mapping_network == None):
self._mapping_network = mapping_network
else:
raise ValueError("Input is not of type KnitMappingNetwork!")
mapping_network = property(_get_mapping_network,
_set_mapping_network,
None,
"The associated mapping network of this " +
"KnitNetwork instance.")
# RETRIEVAL OF NODES AND EDGES FROM MAPPING NETWORK -----------------------
def all_nodes_by_segment(self, data=False, edges=False):
"""
Returns all nodes of the network ordered by 'segment' attribute.
Note: 'end' nodes are not included!
Parameters
----------
data : bool, optional
If ``True``, the nodes contained in the output will be represented
as 2-tuples in the form of (node_identifier, node_data).
Defaults to ``False``
edges : bool, optional
If ``True``, the returned output list will contain 3-tuples in the
form of (segment_value, segment_nodes, segment_edge).
Defaults to ``False``.
Returns
-------
nodes_by_segment : :obj:`list` of :obj:`tuple`
List of 2-tuples in the form of (segment_value, segment_nodes) or
3-tuples in the form of (segment_value, segment_nodes,
segment_edge) depending on the ``edges`` argument.
Raises
------
MappingNetworkError
If the mapping network is not available for this instance.
"""
# retrieve mappingnetwork
mapnet = self.mapping_network
if not mapnet:
errMsg = ("Mapping network has not been built for this instance!")
raise MappingNetworkError(errMsg)
allSegments = mapnet.segment_contour_edges
allSegmentNodes = [(n, d) for n, d
in self.nodes_iter(data=True) if d["segment"]]
segdict = {}
for n in allSegmentNodes:
if n[1]["segment"] not in segdict:
segdict[n[1]["segment"]] = [n]
else:
segdict[n[1]["segment"]].append(n)
anbs = []
if data and edges:
for segment in allSegments:
segval = segment[2]["segment"]
try:
segnodes = sorted(segdict[segval])
except KeyError:
segnodes = []
anbs.append((segval, segnodes, segment))
elif data and not edges:
for segment in allSegments:
segval = segment[2]["segment"]
try:
segnodes = sorted(segdict[segval])
except KeyError:
segnodes = []
anbs.append((segval, segnodes))
elif not data and edges:
for segment in allSegments:
segval = segment[2]["segment"]
try:
segnodes = sorted(segdict[segval])
except KeyError:
segnodes = []
anbs.append((segval, [sn[0] for sn in segnodes], segment))
elif not data and not edges:
for segment in allSegments:
segval = segment[2]["segment"]
try:
segnodes = sorted(segdict[segval])
except KeyError:
segnodes = []
anbs.append((segval, [sn[0] for sn in segnodes]))
return anbs
# STITCH WIDTH SAMPLING ---------------------------------------------------
def sample_segment_contours(self, stitch_width):
"""
Samples the segment contours of the mapping network with the given
stitch width. The resulting points are added to the network as nodes
and a 'segment' attribute is assigned to them based on their origin
segment contour edge.
Parameters
----------
stitch_width : float
The width of a single stitch inside the knit.
Raises
------
MappingNetworkError
If the mapping network is not available for this instance.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# retrieve mapping network
mapnet = self.mapping_network
if not mapnet:
errMsg = ("Mapping network has not been built for this " +
"instance, sampling segment contours is impossible!")
raise MappingNetworkError(errMsg)
# get the highest index of all the nodes in the network
maxNode = max(self.nodes())
# get all the segment geometry ordered by segment number
segment_contours = mapnet.segment_contour_edges
# sample all segments with the stitch width
nodeindex = maxNode + 1
for i, seg in enumerate(segment_contours):
# get the geometry of the contour and reparametreize its domain
geo = seg[2]["geo"]
geo = geo.ToPolylineCurve()
geo.Domain = RhinoInterval(0.0, 1.0)
# compute the division points
crvlen = geo.GetLength()
density = int(round(crvlen / stitch_width))
if density == 0:
continue
divT = geo.DivideByCount(density, False)
divPts = [geo.PointAt(t) for t in divT]
# set leaf attribute
# TODO: better leaf strategy - this works but assigns false
# leaf nodes. usually not a problem but it should be fixed anyway
if self.node[seg[0]]["leaf"] and self.node[seg[1]]["leaf"]:
nodeLeaf = True
else:
nodeLeaf = False
# add all the nodes to the network
for j, pt in enumerate(divPts):
# add node to network
self.node_from_point3d(
nodeindex,
pt,
position=None,
num=j,
leaf=nodeLeaf,
start=False,
end=False,
segment=seg[2]["segment"],
increase=False,
decrease=False,
color=None)
# increment node index
nodeindex += 1
# CREATION OF FINAL 'WEFT' CONNECTIONS ------------------------------------
def create_final_weft_connections(self):
"""
Loop through all the segment contour edges and create all 'weft'
connections for this network.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# get all nodes by segment contour
SegmentValues, AllNodesBySegment = zip(*self.all_nodes_by_segment(
data=True))
# loop through all the segment contours
for i, segment in enumerate(AllNodesBySegment):
segval = SegmentValues[i]
firstNode = (segval[0], self.node[segval[0]])
lastNode = (segval[1], self.node[segval[1]])
if len(segment) == 0:
self.create_weft_edge(firstNode, lastNode, segval)
elif len(segment) == 1:
self.create_weft_edge(firstNode, segment[0], segval)
self.create_weft_edge(segment[0], lastNode, segval)
else:
# loop through all nodes on the current segment and create
# the final 'weft' edges
for j, node in enumerate(segment):
if j == 0:
self.create_weft_edge(firstNode, node, segval)
self.create_weft_edge(node, segment[j+1], segval)
elif j < len(segment)-1:
self.create_weft_edge(node, segment[j+1], segval)
elif j == len(segment)-1:
self.create_weft_edge(node, lastNode, segval)
# CREATION OF FINAL 'WARP' CONNECTIONS ------------------------------------
def attempt_warp_connection(self, node, candidate, source_nodes,
max_connections=4, verbose=False):
"""
Method for attempting a 'warp' connection to a candidate
node based on certain parameters.
Parameters
----------
node : node
The starting node for the possible 'weft' edge.
candidate : node
The target node for the possible 'weft' edge.
source_nodes : :obj:`list`
List of nodes on the position contour of node. Used to check if
the candidate node already has a connection.
max_connections : int, optional
The new 'weft' connection will only be made if the candidate nodes
number of connected neighbors is below this.
Defaults to ``4``.
verbose : bool, optional
If ``True``, this routine and all its subroutines will print
messages about what is happening to the console.
Defaults to ``False``.
Returns
-------
result : bool
True if the connection has been made, otherwise false.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
connecting_neighbors = self[candidate[0]]
if len(connecting_neighbors) < max_connections:
isConnected = False
for cn in connecting_neighbors:
if cn in [v[0] for v in source_nodes]:
isConnected = True
# print info on verbose setting
v_print("Candidate node {} is ".format(candidate[0]) +
"already connected! Skipping to next node...")
break
if not isConnected:
# print info on verbose setting
v_print("Connecting node {} to best candidate {}.".format(
node[0],
candidate[0]))
# finally create the warp edge for good
self.create_warp_edge(node, candidate)
return True
else:
return False
else:
return False
def _create_initial_warp_connections(self, segment_pair, max_connections=4,
precise=False, verbose=False):
"""
Private method for creating first pass 'warp' connections for the
supplied pair of segment chains.
The pair is only defined as a list of nodes, the nodes have to be
supplied with their attribute data!
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
if len(segment_pair) < 2:
v_print("Not enough contour segments in supplied set!")
return
# print info on verbose output
v_print("Creating initial 'warp' connections for contour set...")
# get initial and target nodes without 'end' nodes
initial_nodes = segment_pair[0]
target_nodes = segment_pair[1]
# define forbidden node index
forbidden_node = -1
# do nothing if one of the sets is empty
if len(initial_nodes) == 0 or len(target_nodes) == 0:
return
# loop through all nodes on the current segment
for k, node in enumerate(initial_nodes):
# get geometry from current node
thisPt = node[1]["geo"]
# print info on verbose setting
v_print("Processing node {} on segment {}:".format(
node[0],
node[1]["segment"]))
# filtering according to forbidden nodes
if forbidden_node != -1:
target_nodes = [tnode for tx, tnode in enumerate(target_nodes)
if tx >= target_nodes.index(forbidden_node)]
if len(target_nodes) == 0:
continue
# compute distances to target nodes
if precise:
allDists = [thisPt.DistanceTo(tn[1]["geo"])
for tn in target_nodes]
else:
allDists = [thisPt.DistanceToSquared(tn[1]["geo"])
for tn in target_nodes]
# sort nodes after distances
allDists, sorted_target_nodes = zip(*sorted(
zip(allDists, target_nodes),
key=itemgetter(0)))
# the four nearest nodes are the possible connections
possible_connections = sorted_target_nodes[:4]
# print info on verbose setting
v_print("Possible connections: {}".format([pc[0] for pc in
possible_connections]))
# handle edge case where there is no possible connection or just
# one
if len(possible_connections) == 0:
continue
elif len(possible_connections) == 1:
# attempt to connect to only possible candidate
fCand = possible_connections[0]
res = self.attempt_warp_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node
if res:
forbidden_node = fCand
continue
# get the segment contours current direction
if k < len(initial_nodes)-1:
contourDir = RhinoLine(thisPt,
initial_nodes[k+1][1]["geo"]).Direction
elif k == len(initial_nodes)-1:
contourDir = RhinoLine(
initial_nodes[k-1][1]["geo"], thisPt).Direction
contourDir.Unitize()
# get the directions of the possible connections
candidatePoints = [pc[1]["geo"] for pc in possible_connections]
candidateDirections = [RhinoLine(
thisPt, cp).Direction for cp in candidatePoints]
[cd.Unitize() for cd in candidateDirections]
# get the angles between segment contour dir and possible conn dir
normals = [RhinoVector3d.CrossProduct(
contourDir, cd) for cd in candidateDirections]
angles = [RhinoVector3d.VectorAngle(
contourDir, cd, n) for cd, n in zip(
candidateDirections, normals)]
# compute deltas as a measure of perpendicularity
deltas = [abs(a - (0.5 * pi)) for a in angles]
# sort possible connections first by distance, then by delta
(allDists,
deltas,
angles,
most_perpendicular) = zip(*sorted(zip(allDists,
deltas,
angles,
possible_connections[:]),
key=itemgetter(0, 1)))
# compute angle difference
aDelta = angles[0] - angles[1]
# get node neighbors
nNeighbors = self[node[0]]
# CONNECTION FOR LEAST ANGLE CHANGE -------------------------------
if len(nNeighbors) > 2 and aDelta < radians(6.0):
# print info on verbose setting
v_print("Using procedure for least angle " +
"change connection...")
# get previous connected edge and its direction
prevEdges = self.node_warp_edges(node[0], data=True)
if len(prevEdges) > 1:
print("More than one previous " +
"'warp' connection! This was unexpected..." +
"Taking the first one..?")
prevDir = prevEdges[0][2]["geo"].Direction
else:
prevDir = prevEdges[0][2]["geo"].Direction
prevDir.Unitize()
# get directions for the best two candidates
mpA = most_perpendicular[0]
mpB = most_perpendicular[1]
dirA = RhinoLine(thisPt, mpA[1]["geo"]).Direction
dirB = RhinoLine(thisPt, mpB[1]["geo"]).Direction
dirA.Unitize()
dirB.Unitize()
# get normals for angle measurement
normalA = RhinoVector3d.CrossProduct(prevDir, dirA)
normalB = RhinoVector3d.CrossProduct(prevDir, dirB)
# measure the angles
angleA = RhinoVector3d.VectorAngle(prevDir, dirA, normalA)
angleB = RhinoVector3d.VectorAngle(prevDir, dirB, normalB)
# select final candidate for connection
if angleA < angleB:
fCand = mpA
else:
fCand = mpB
# attempt connection to final candidate
res = self.attempt_warp_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node
if res:
forbidden_node = fCand
continue
# CONNECTION FOR MOST PERPENDICULAR -------------------------------
else:
# print info on verbose setting
v_print("Using procedure for most " +
"perpendicular connection...")
# define final candidate node
fCand = most_perpendicular[0]
# attempt connection to final candidate
res = self.attempt_warp_connection(
node,
fCand,
initial_nodes,
max_connections=max_connections,
verbose=verbose)
# set forbidden node
if res:
forbidden_node = fCand
def _create_second_pass_warp_connection(self, source_nodes, source_index,
window, precise=False,
verbose=False, reverse=False):
"""
Private method for creating second pass 'warp' connections for the
given set of contours.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
if len(window) == 0:
# print info on verbose setting
v_print("Length of window is 0, skipping...")
elif len(window) == 1:
# print info on verbose setting
v_print("Window has only one node.")
v_print("Connecting to node {}.".format(window[0][0]))
# connect 'warp' edge
if reverse:
self.create_warp_edge(window[0], source_nodes[source_index])
else:
self.create_warp_edge(source_nodes[source_index], window[0])
else:
# retrive the point of the current source node
thisPt = source_nodes[source_index][1]["geo"]
# print info on verbose setting
v_print("Processing window nodes: {}".format(
[w[0] for w in window]))
# sort nodes in window by distance
if precise:
allDists = [thisPt.DistanceTo(pc[1]["geo"])
for pc in window]
else:
allDists = [thisPt.DistanceToSquared(pc[1]["geo"])
for pc in window]
allDists, window = zip(*sorted(zip(allDists, window),
key=itemgetter(0)))
# get the contours current direction
if source_index < len(source_nodes)-1:
sourceDir = RhinoLine(
thisPt,
source_nodes[source_index+1][1]["geo"]).Direction
elif source_index == len(source_nodes)-1:
sourceDir = RhinoLine(source_nodes[source_index-1][1]["geo"],
thisPt).Direction
sourceDir.Unitize()
# get the directions of the possible connections
candidatePoints = [pc[1]["geo"] for pc in window]
candidateDirections = [RhinoLine(thisPt, cp).Direction for cp
in candidatePoints]
[cd.Unitize() for cd in candidateDirections]
# get the angles between contour dir and window dir
normals = [RhinoVector3d.CrossProduct(sourceDir, cd)
for cd in candidateDirections]
angles = [RhinoVector3d.VectorAngle(sourceDir, cd, n) for cd, n
in zip(candidateDirections, normals)]
# compute deltas as a mesaure of perpendicularity
deltas = [abs(a - (0.5 * pi)) for a in angles]
# sort window by distance, then by delta
allDists, deltas, most_perpendicular = zip(*sorted(
zip(allDists,
deltas,
window),
key=itemgetter(0, 1)))
# set final candidate node for connection
fCand = most_perpendicular[0]
# print info on verbose setting
v_print("Connecting to node " +
"{} on segment {}...".format(fCand[0],
fCand[1]["segment"]))
# connect warp edge to best target
if reverse:
self.create_warp_edge(fCand, source_nodes[source_index])
else:
self.create_warp_edge(source_nodes[source_index], fCand)
def create_final_warp_connections(self, max_connections=4,
include_end_nodes=True, precise=False,
verbose=False):
"""
Create the final 'warp' connections by building chains of segment
contour edges and connecting them.
For each source chain, a target chain is found using an
'educated guessing' strategy. This means that the possible target
chains are guessed by leveraging known topology facts about the network
and its special 'end' nodes.
Parameters
----------
max_connections : int, optional
The number of maximum previous connections a candidate node for a
'warp' connection is allowed to have.
Defaults to ``4``.
include_end_nodes : bool, optional
If ``True``, 'end' nodes between adjacent segment contours in a
source chain will be included in the first pass of connecting
'warp' edges.
Defaults to ``True``.
precise : bool
If ``True``, the distance between nodes will be calculated using
the Rhino.Geometry.Point3d.DistanceTo method, otherwise the much
faster Rhino.Geometry.Point3d.DistanceToSquared method is used.
Defaults to ``False``.
verbose : bool, optional
If ``True``, this routine and all its subroutines will print
messages about what is happening to the console. Great for
debugging and analysis.
Defaults to ``False``.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# define verbose print function
v_print = print if verbose else lambda *a, **k: None
# get all segment ids, nodes per segment and edges
SegmentValues, AllNodesBySegment, SegmentContourEdges = zip(
*self.all_nodes_by_segment(data=True, edges=True))
# build a dictionary of the segments by their index
SegmentDict = dict(zip(SegmentValues,
zip(SegmentContourEdges, AllNodesBySegment)))
# build source and target chains
source_chains, target_chain_dict = self.mapping_network.build_chains(
False,
True)
# initialize container dict for connected chains
connected_chains = dict()
# initialize segment mapping dictionaries
source_to_target = OrderedDict()
target_to_source = OrderedDict()
source_to_key = dict()
target_to_key = dict()
# ITERATE OVER SOURCE SEGMENT CHAINS ----------------------------------
# loop through all source chains and find targets in target chains
# using an 'educated guess strategy'
for i, source_chain in enumerate(source_chains):
# get the first and last node ('end' nodes)
firstNode = (source_chain[0][0][0],
self.node[source_chain[0][0][0]])
lastNode = (source_chain[0][-1][1],
self.node[source_chain[0][-1][1]])
# get the chain value of the current chain
chain_value = source_chain[1]
# extract the ids of the current chain
current_ids = tuple(source_chain[0])
# extract the current chains geometry
current_chain_geo_list = [SegmentDict[id][0][2]["geo"]
for id in current_ids]
current_chain_geo = RhinoCurve.JoinCurves(
[ccg.ToPolylineCurve() for ccg in current_chain_geo_list])[0]
current_chain_spt = current_chain_geo.PointAtNormalizedLength(0.5)
# retrieve the current segments from the segment dictionary by id
current_segment_nodes = [SegmentDict[id][1] for id in current_ids]
# retrieve the current nodes from the list of current segments
current_nodes = []
for j, csn in enumerate(current_segment_nodes):
if include_end_nodes and j > 0:
current_nodes.append((current_ids[j][0],
self.node[current_ids[j][0]]))
[current_nodes.append(n) for n in csn]
# reset the target key
target_key = None
# print info on verbose setting
v_print("--------------------------------------------------------")
v_print("Processing segment chain {} ...".format(source_chain))
# CASE 1 - ENCLOSED SHORT ROW <====> ALL CASES --------------------
# look for possible targets using a guess about the chain value
possible_target_keys = [key for key in target_chain_dict
if key[0] == chain_value[0]
and key[1] == chain_value[1]
and key not in connected_chains]
if len(possible_target_keys) > 0:
# find the correct chain by using geometric distance
possible_target_chains = [target_chain_dict[tk] for tk
in possible_target_keys]
# for every chain in the possible target chains, get the
# geometry and compute a sample distance
filtered_target_keys = []
possible_target_chain_dists = []
for j, ptc in enumerate(possible_target_chains):
# retrieve possible target geometry and join into one crv
ptc_geo_list = [SegmentDict[id][0][2]["geo"] for id in ptc]
if ptc_geo_list == current_chain_geo_list:
continue
ptc_geo = RhinoCurve.JoinCurves(
[ptcg.ToPolylineCurve() for ptcg in ptc_geo_list])[0]
# get a sample point and measure the distance to the
# source chain sample point
ptc_spt = ptc_geo.PointAtNormalizedLength(0.5)
if precise:
ptc_dist = current_chain_spt.DistanceTo(ptc_spt)
else:
ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt)
# append the filtered key to the key list
filtered_target_keys.append(possible_target_keys[j])
# append the measured distance to the distance list
possible_target_chain_dists.append(ptc_dist)
if len(filtered_target_keys) > 0:
# sort filtered target keys using the distances
possible_target_chain_dists, filtered_target_keys = zip(
*sorted(zip(
possible_target_chain_dists,
filtered_target_keys),
key=itemgetter(0)))
# set target key
target_key = filtered_target_keys[0]
else:
target_key = None
else:
target_key = None
# attempt warp connections if we have found a correct key
if target_key:
# get the guessed target chain from the chain dictionary
target_chain = target_chain_dict[target_key]
# extract the ids for node retrieval
target_ids = tuple([seg for seg in target_chain])
# retrieve the target nodes from the segment dictionary by id
target_segment_nodes = [SegmentDict[id][1]
for id in target_ids]
target_nodes = []
for j, tsn in enumerate(target_segment_nodes):
if include_end_nodes and j > 0:
target_nodes.append((
target_ids[j][0], self.node[target_ids[j][0]]))
[target_nodes.append(n) for n in tsn]
# print info on verbose setting
v_print("<=====> detected. Connecting to " +
"segment chain {}.".format(target_key))
# we have successfully verified our target segment and
# can create some warp edges!
segment_pair = [current_nodes, target_nodes]
# fill mapping dictionaries
if current_ids not in source_to_target:
source_to_target[current_ids] = target_ids
if current_ids not in source_to_key:
source_to_key[current_ids] = chain_value
if target_ids not in target_to_source:
target_to_source[target_ids] = current_ids
if target_ids not in target_to_key:
target_to_key[target_ids] = target_key
# create initial warp connections between the chains
connected_chains[target_key] = True
self._create_initial_warp_connections(
segment_pair,
max_connections=max_connections,
precise=precise,
verbose=verbose)
continue
# CASE 2 - SHORT ROW TO THE RIGHT <=====/ ALL CASES ---------------
# look for possible targets using a guess about the chain value
possible_target_keys = [key for key in target_chain_dict
if key[0] == chain_value[0]
and key[1] == chain_value[1]+1
and key not in connected_chains]
if len(possible_target_keys) == 1:
target_key = possible_target_keys[0]
elif len(possible_target_keys) > 1:
# find the correct chain by using geometric distance
possible_target_chains = [target_chain_dict[tk] for tk
in possible_target_keys]
# for every chain in the possible target chains, get the
# geometry and compute a sample distance
possible_target_chain_dists = []
for ptc in possible_target_chains:
# retrieve possible target geometry and join into one crv
ptc_geo = [SegmentDict[id][0][2]["geo"] for id in ptc]
ptc_geo = RhinoCurve.JoinCurves([pg.ToPolylineCurve()
for pg in ptc_geo])[0]
# get a sample point and measure the distance to the
# source chain sample point
ptc_spt = ptc_geo.PointAtNormalizedLength(0.5)
if precise:
ptc_dist = current_chain_spt.DistanceTo(ptc_spt)
else:
ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt)
# append the measured distance to the list
possible_target_chain_dists.append(ptc_dist)
# sort possible target keys using the distances
possible_target_chain_dists, possible_target_keys = zip(
*sorted(zip(possible_target_chain_dists,
possible_target_keys),
key=itemgetter(0)))
target_key = possible_target_keys[0]
else:
target_key = None
# attempt warp connections if we have found a correct key
if target_key:
# get the guessed target chain from the chain dictionary
target_chain = target_chain_dict[target_key]
# extract the ids for node retrieval
target_ids = tuple([seg for seg in target_chain])
# retrieve the target nodes from the segment dictionary by id
target_segment_nodes = [SegmentDict[id][1]
for id in target_ids]
target_nodes = []
for j, tsn in enumerate(target_segment_nodes):
if include_end_nodes and j > 0:
target_nodes.append((target_ids[j][0],
self.node[target_ids[j][0]]))
[target_nodes.append(n) for n in tsn]
targetFirstNode = target_ids[0][0]
targetLastNode = target_ids[-1][1]
# check if firstNode and targetFirstNode are connected via a
# 'warp' edge to verify
if (targetFirstNode == firstNode[0]
and targetLastNode in self[lastNode[0]]):
# print info on verbose setting
v_print("<=====/ detected. Connecting " +
"to segment {}.".format(target_key))
# we have successfully verified our target segment and
# can create some warp edges!
segment_pair = [current_nodes, target_nodes]
connected_chains[target_key] = True
# fill mapping dictionaries
if current_ids not in source_to_target:
source_to_target[current_ids] = target_ids
if current_ids not in source_to_key:
source_to_key[current_ids] = chain_value
if target_ids not in target_to_source:
target_to_source[target_ids] = current_ids
if target_ids not in target_to_key:
target_to_key[target_ids] = target_key
# create initial 'warp' connections between the chains
self._create_initial_warp_connections(
segment_pair,
max_connections=max_connections,
precise=precise,
verbose=verbose)
continue
else:
v_print("No real connection for <=====/. Next case...")
# CASE 3 - SHORT ROW TO THE LEFT /====> ALL CASES -----------------
# look for possible targets using a guess about the chain value
possible_target_keys = [key for key in target_chain_dict
if key[0] == chain_value[0]+1
and key[1] == chain_value[1]
and key not in connected_chains]
if len(possible_target_keys) == 1:
target_key = possible_target_keys[0]
elif len(possible_target_keys) > 1:
# find the correct chain by using geometric distance
possible_target_chains = [target_chain_dict[tk] for tk
in possible_target_keys]
# for every chain in the possible target chains, get the
# geometry and compute a sample distance
possible_target_chain_dists = []
for ptc in possible_target_chains:
# retrieve possible target geometry and join into one crv
ptc_geo = [SegmentDict[id][0][2]["geo"] for id in ptc]
ptc_geo = RhinoCurve.JoinCurves(
[pg.ToPolylineCurve() for pg in ptc_geo])[0]
# get a sample point and measure the distance to the
# source chain sample point
ptc_spt = ptc_geo.PointAtNormalizedLength(0.5)
if precise:
ptc_dist = current_chain_spt.DistanceTo(ptc_spt)
else:
ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt)
# append the measured distance to the list
possible_target_chain_dists.append(ptc_dist)
# sort possible target keys using the distances
possible_target_chain_dists, possible_target_keys = zip(
*sorted(zip(possible_target_chain_dists,
possible_target_keys),
key=itemgetter(0)))
target_key = possible_target_keys[0]
else:
target_key = None
# attempt warp connections if we have found a correct key
if target_key:
# get the guessed target chain from the chain dictionary
target_chain = target_chain_dict[target_key]
# extract the ids for node retrieval
target_ids = tuple([seg for seg in target_chain])
# retrieve the target nodes from the segment dictionary by id
target_segment_nodes = [SegmentDict[id][1]
for id in target_ids]
target_nodes = []
for j, tsn in enumerate(target_segment_nodes):
if include_end_nodes and j > 0:
target_nodes.append((target_ids[j][0],
self.node[target_ids[j][0]]))
[target_nodes.append(n) for n in tsn]
targetFirstNode = target_ids[0][0]
targetLastNode = target_ids[-1][1]
# check if firstNode and targetFirstNode are connected via a
# 'warp' edge to verify
if (targetFirstNode in self[firstNode[0]]
and targetLastNode == lastNode[0]):
# print info on verbose setting
v_print("/=====> detected. Connecting " +
"to segment {}.".format(target_key))
# we have successfully verified our target segment and
# can create some warp edges!
segment_pair = [current_nodes, target_nodes]
connected_chains[target_key] = True
# fill mapping dictionaries
if current_ids not in source_to_target:
source_to_target[current_ids] = target_ids
if current_ids not in source_to_key:
source_to_key[current_ids] = chain_value
if target_ids not in target_to_source:
target_to_source[target_ids] = current_ids
if target_ids not in target_to_key:
target_to_key[target_ids] = target_key
self._create_initial_warp_connections(
segment_pair,
max_connections=max_connections,
precise=precise,
verbose=verbose)
continue
else:
v_print("No real connection for /=====>. Next case...")
# CASE 4 - REGULAR ROW /=====/ ALL CASES --------------------------
# look for possible targets using a guess about the chain value
possible_target_keys = [key for key in target_chain_dict
if key[0] == chain_value[0]+1
and key[1] == chain_value[1]+1
and key not in connected_chains]
if len(possible_target_keys) == 1:
target_key = possible_target_keys[0]
elif len(possible_target_keys) > 1:
# find the correct chain by using geometric distance
possible_target_chains = [target_chain_dict[tk] for tk
in possible_target_keys]
# for every chain in the possible target chains, get the
# geometry and compute a sample distance
possible_target_chain_dists = []
for ptc in possible_target_chains:
# retrieve possible target geometry and join into one crv
ptc_geo = [SegmentDict[id][0][2]["geo"] for id in ptc]
ptc_geo = RhinoCurve.JoinCurves([pg.ToPolylineCurve()
for pg in ptc_geo])[0]
# get a sample point and measure the distance to the
# source chain sample point
ptc_spt = ptc_geo.PointAtNormalizedLength(0.5)
if precise:
ptc_dist = current_chain_spt.DistanceTo(ptc_spt)
else:
ptc_dist = current_chain_spt.DistanceToSquared(ptc_spt)
# append the measured distance to the list
possible_target_chain_dists.append(ptc_dist)
# sort possible target keys using the distances
possible_target_chain_dists, possible_target_keys = zip(
*sorted(zip(possible_target_chain_dists,
possible_target_keys),
key=itemgetter(0)))
target_key = possible_target_keys[0]
else:
target_key = None
# attempt warp connections if we have found a correct key
if target_key:
# get the guessed target chain from the chain dictionary
target_chain = target_chain_dict[target_key]
# extract the ids for node retrieval
target_ids = tuple([seg for seg in target_chain])
# retrieve the target nodes from the segment dictionary by id
target_segment_nodes = [SegmentDict[id][1]
for id in target_ids]
target_nodes = []
for j, tsn in enumerate(target_segment_nodes):
if include_end_nodes and j > 0:
target_nodes.append((target_ids[j][0],
self.node[target_ids[j][0]]))
[target_nodes.append(n) for n in tsn]
# set target first and last node ('end' nodes)
targetFirstNode = target_ids[0][0]
targetLastNode = target_ids[-1][1]
# check if firstNode and targetFirstNode are connected via a
# 'warp' edge to verify
if (targetFirstNode in self[firstNode[0]]
and targetLastNode in self[lastNode[0]]):
# print info on verbose setting
v_print("/=====/ detected. Connecting " +
"to segment {}.".format(target_key))
# we have successfully verified our target segment and
# can create some warp edges!
segment_pair = [current_nodes, target_nodes]
connected_chains[target_key] = True
# fill mapping dictionaries
if current_ids not in source_to_target:
source_to_target[current_ids] = target_ids
if current_ids not in source_to_key:
source_to_key[current_ids] = chain_value
if target_ids not in target_to_source:
target_to_source[target_ids] = current_ids
if target_ids not in target_to_key:
target_to_key[target_ids] = target_key
self._create_initial_warp_connections(
segment_pair,
max_connections=max_connections,
precise=precise,
verbose=verbose)
continue
else:
v_print("No real connection for /=====/. No cases match.")
# INVOKE SECOND PASS FOR SOURCE ---> TARGET ---------------------------
for i, current_chain in enumerate(source_to_target):
v_print("--------------------------------------------------------")
v_print("S>T Current Chain: {}".format(current_chain))
# build a list of nodes containing all nodes in the current chain
# including all 'end' nodes
current_chain_nodes = []
for j, ccid in enumerate(current_chain):
current_chain_nodes.append((ccid[0], self.node[ccid[0]]))
[current_chain_nodes.append(n) for n in SegmentDict[ccid][1]]
current_chain_nodes.append((current_chain[-1][1],
self.node[current_chain[-1][1]]))
# retrieve target chain from the source to target mapping
target_chain = source_to_target[current_chain]
cckey = source_to_key[current_chain]
tckey = target_to_key[target_chain]
# build a list of nodes containing all nodes in the target chain
# including all 'end' nodes
target_chain_nodes = []
for j, tcid in enumerate(target_chain):
target_chain_nodes.append((tcid[0], self.node[tcid[0]]))
[target_chain_nodes.append(n) for n in SegmentDict[tcid][1]]
target_chain_nodes.append((target_chain[-1][1],
self.node[target_chain[-1][1]]))
# initialize start of window marker
start_of_window = -1
# loop through all nodes on the current chain
for k, node in enumerate(current_chain_nodes):
# find out if the current node is already principally connected
node_connected = False
# if the node is the first or the last node, it is defined as
# connected per-se
if k == 0 or k == len(current_chain_nodes)-1:
node_connected = True
# find out if the current node is already connected to the
# target chain, get node warp edges and their target nodes
node_warp_edges = self.node_warp_edges(node[0], data=False)
warp_edge_targets = [we[1] for we in node_warp_edges]
# loop over warp edge targets to get the start of the window
for wet in warp_edge_targets:
# loop over target chain nodes
for n, tcn in enumerate(target_chain_nodes):
# if a warp edge target is in the target chain,
# the node is connected and star of window for next
# node is defined
if wet == tcn[0]:
if n > start_of_window or start_of_window == -1:
start_of_window = n
node_connected = True
# if the node is not connected to the target chain, we
# need to find the end of the window
if not node_connected:
v_print("Node: {}".format(node[0]))
v_print("Start of window: {}".format(start_of_window))
# re-check start of window for <.====/ case
if len(target_chain_nodes) >= 2 and start_of_window == -1:
if target_chain_nodes[0] == current_chain_nodes[0]:
start_of_window = 1
else:
start_of_window = 0
end_of_window = None
# loop over target chain nodes
for n, tcn in enumerate(target_chain_nodes):
if n >= start_of_window:
if tcn[0] == current_chain_nodes[-1][0]:
end_of_window = n
# get all warp edges of the current target node
# and their targets
tcn_warp_edges = self.node_warp_edges(tcn[0],
data=False)
tcn_warp_edge_targets = [we[1] for we
in tcn_warp_edges]
# loop over warp edge targets
for twet in tcn_warp_edge_targets:
if (twet in [cn[0] for cn
in current_chain_nodes]):
end_of_window = n
break
if end_of_window and end_of_window > start_of_window:
break
# re-check end of window for /====.> case
if end_of_window:
tcn_we = target_chain_nodes[end_of_window]
ccn_end = current_chain_nodes[-1]
ccn_len = len(current_chain_nodes)
if tcn_we == ccn_end and k == ccn_len-2:
end_of_window -= 1
if end_of_window < start_of_window:
start_of_window = -1
end_of_window = None
# if we have a valid window, set the target nodes
if start_of_window != -1 and end_of_window != None:
if end_of_window == len(target_chain_nodes)-1:
window = target_chain_nodes[start_of_window:]
else:
window = target_chain_nodes[start_of_window:
end_of_window+1]
v_print("End of window: {}".format(end_of_window))
# execute connection to target
if cckey <= tckey:
rev = False
else:
rev = True
v_print("Connecting chain {} to chain {}".format(
cckey,
tckey))
self._create_second_pass_warp_connection(
current_chain_nodes,
k,
window,
precise=precise,
verbose=verbose,
reverse=rev)
else:
# print info on verbose setting
v_print("No valid window for current chain!")
# INVOKE SECOND PASS FOR TARGET ---> SOURCE ---------------------------
for i, current_chain in enumerate(target_to_source):
v_print("--------------------------------------------------------")
v_print("T>S Current Chain: {}".format(current_chain))
# build a list of nodes containing all nodes in the current chain
# including all 'end' nodes
current_chain_nodes = []
for j, ccid in enumerate(current_chain):
current_chain_nodes.append((ccid[0], self.node[ccid[0]]))
[current_chain_nodes.append(n) for n in SegmentDict[ccid][1]]
current_chain_nodes.append((current_chain[-1][1],
self.node[current_chain[-1][1]]))
# retrieve target chain from the source to target mapping
target_chain = target_to_source[current_chain]
cckey = target_to_key[current_chain]
tckey = source_to_key[target_chain]
# build a list of nodes containing all nodes in the target chain
# including all 'end' nodes
target_chain_nodes = []
for j, tcid in enumerate(target_chain):
target_chain_nodes.append((tcid[0], self.node[tcid[0]]))
[target_chain_nodes.append(n) for n in SegmentDict[tcid][1]]
target_chain_nodes.append((target_chain[-1][1],
self.node[target_chain[-1][1]]))
# initialize start of window marker
start_of_window = -1
# loop through all nodes on the current chain
for k, node in enumerate(current_chain_nodes):
# find out if the current node is already principally connected
node_connected = False
if k == 0 or k == len(current_chain_nodes)-1:
node_connected = True
# find out if the current node is already connected to the
# target chain
node_warp_edges = self.node_warp_edges(node[0], data=False)
warp_edge_targets = [we[1] for we in node_warp_edges]
# loop over weft edge targets
for wet in warp_edge_targets:
# if warp edge target is in target chain nodes, node
# is connected and the start of our window for the next
# node
for n, tcn in enumerate(target_chain_nodes):
if wet == tcn[0]:
if n > start_of_window or start_of_window == -1:
start_of_window = n
node_connected = True
# if the node is not connected to the target chain, we
# need to find the end of the window
if not node_connected:
# print info on verbose output
v_print("Node: {}".format(node[0]))
v_print("Start of window: {}".format(start_of_window))
# re-check start of window for <.====/ case
if len(target_chain_nodes) >= 2 and start_of_window == -1:
if target_chain_nodes[0] == current_chain_nodes[0]:
start_of_window = 1
else:
start_of_window = 0
end_of_window = None
# loop over target chain nodes
for n, tcn in enumerate(target_chain_nodes):
if n >= start_of_window:
if tcn[0] == current_chain_nodes[-1][0]:
end_of_window = n
# get all warp edges of the current target node and
# their targets
tcn_warp_edges = self.node_warp_edges(tcn[0],
data=False)
tcn_warp_edge_targets = [we[1] for we
in tcn_warp_edges]
# loop over warp edge targets of current target
# node
for twet in tcn_warp_edge_targets:
# if warp edge target is in current chain,
# it is the end of the window
if (twet in [cn[0] for cn
in current_chain_nodes]):
end_of_window = n
break
if end_of_window and end_of_window > start_of_window:
break
# re-check end of window for /====.> case
if end_of_window:
tcn_we = target_chain_nodes[end_of_window]
ccn_end = current_chain_nodes[-1]
ccn_len = len(current_chain_nodes)
if tcn_we == ccn_end and k == ccn_len-2:
end_of_window -= 1
if end_of_window < start_of_window:
start_of_window = -1
end_of_window = None
# if there is a valid window, set the target chain nodes
if start_of_window != -1 and end_of_window != None:
if end_of_window == len(target_chain_nodes)-1:
window = target_chain_nodes[start_of_window:]
else:
window = target_chain_nodes[start_of_window:
end_of_window+1]
# print info on verbose output
v_print("End of window: {}".format(end_of_window))
# execute connection
if cckey < tckey:
rev = False
else:
rev = True
v_print("Connecting chain {} to chain {}.".format(
cckey,
tckey))
self._create_second_pass_warp_connection(
current_chain_nodes,
k,
window,
precise=precise,
verbose=verbose,
reverse=rev)
else:
v_print("No valid window for current chain!")
# FIND FACES OF NETWORK ---------------------------------------------------
def to_KnitDiNetwork(self):
"""
Constructs and returns a directed KnitDiNetwork based on this network
by duplicating all edges so that [u -> v] and [v -> u] for every
edge [u - v] in this undirected network.
Returns
-------
directed_network : :class:`KnitDiNetwork`
The directed representation of this network.
"""
# create a directed network with duplicate edges in opposing directions
dirnet = KnitDiNetwork()
dirnet.name = self.name
dirnet.add_nodes_from(self)
dirnet.add_edges_from((u, v, data)
for u, nbrs in self.adjacency_iter()
for v, data in nbrs.items())
dirnet.graph = self.graph
dirnet.node = self.node
dirnet.mapping_network = self.mapping_network
return dirnet
def find_cycles(self, mode=-1):
"""
Finds the cycles (faces) of this network by utilizing a wall-follower
mechanism.
Parameters
----------
mode : int, optional
Determines how the neighbors of each node are sorted when finding
cycles for the network.
``-1`` equals to using the world XY plane.
``0`` equals to using a plane normal to the origin nodes closest
point on the reference geometry.
``1`` equals to using a plane normal to the average of the origin
and neighbor nodes' closest points on the reference geometry.
``2`` equals to using an average plane between a plane fit to the
origin and its neighbor nodes and a plane normal to the origin
nodes closest point on the reference geometry.
Defaults to ``-1``.
Warning
-------
Modes other than ``-1`` are only possible if this network has an
underlying reference geometry in form of a Mesh or NurbsSurface. The
reference geometry should be assigned when initializing the network by
assigning the geometry to the "reference_geometry" attribute of the
network.
Notes
-----
Based on an implementation inside the COMPAS framework.
For more info see [16]_.
"""
return self.to_KnitDiNetwork().find_cycles(mode=mode)
def create_mesh(self, mode=-1, max_valence=4):
"""
Constructs a mesh from this network by finding cycles and using them as
mesh faces.
Parameters
----------
mode : int, optional
Determines how the neighbors of each node are sorted when finding
cycles for the network.
``-1`` equals to using the world XY plane.
``0`` equals to using a plane normal to the origin nodes closest
point on the reference geometry.
``1`` equals to using a plane normal to the average of the origin
and neighbor nodes' closest points on the reference geometry.
``2`` equals to using an average plane between a plane fit to the
origin and its neighbor nodes and a plane normal to the origin
nodes closest point on the reference geometry.
Defaults to ``-1``.
max_valence : int, optional
Sets the maximum edge valence of the faces. If this is set to > 4,
n-gon faces (more than 4 edges) are allowed. Otherwise, their
cycles are treated as invalid and will be ignored.
Defaults to ``4``.
Warning
-------
Modes other than ``-1`` are only possible if this network has an
underlying reference geometry in form of a Mesh or NurbsSurface. The
reference geometry should be assigned when initializing the network by
assigning the geometry to the "reference_geometry" attribute of the
network.
"""
return self.to_KnitDiNetwork().create_mesh(mode=mode,
max_valence=max_valence)
# DUALITY -----------------------------------------------------------------
def create_dual(self, mode=-1, merge_adj_creases=False,
mend_trailing_rows=False):
"""
Creates the dual of this KnitNetwork while translating current edge
attributes to the edges of the dual network.
Parameters
----------
mode : int, optional
Determines how the neighbors of each node are sorted when finding
cycles for the network.
``-1`` equals to using the world XY plane.
``0`` equals to using a plane normal to the origin nodes closest
point on the reference geometry.
``1`` equals to using a plane normal to the average of the origin
and neighbor nodes' closest points on the reference geometry.
``2`` equals to using an average plane between a plane fit to the
origin and its neighbor nodes and a plane normal to the origin
nodes closest point on the reference geometry.
Defaults to ``-1``.
merge_adj_creases : bool, optional
If ``True``, will merge adjacent 'increase' and 'decrease' nodes
connected by a 'weft' edge into a single node. This effectively
simplifies the pattern, as a decrease is unneccessary to perform
if an increase is right beside it - both nodes can be replaced by a
single regular node (stitch).
Defaults to ``False``.
mend_trailing_rows : bool, optional
If ``True``, will attempt to mend trailing rows by reconnecting
nodes.
Defaults to ``False``.
Returns
-------
dual_network : :class:`KnitDiNetwork`
The dual network of this KnitNetwork.
Warning
-------
Modes other than -1 (default) are only possible if this network has an
underlying reference geometry in form of a Mesh or NurbsSurface. The
reference geometry should be assigned when initializing the network by
assigning the geometry to the 'reference_geometry' attribute of the
network.
Notes
-----
Closely resembles the implementation described in *Automated Generation
of Knit Patterns for Non-developable Surfaces* [1]_. Also see
*KnitCrete - Stay-in-place knitted formworks for complex concrete
structures* [2]_.
"""
# first find the cycles of this network
cycles = self.find_cycles(mode=mode)
# get node data for all nodes once
node_data = {k: self.node[k] for k in self.nodes_iter()}
# create new directed KnitDiNetwork for dual network
DualNetwork = KnitDiNetwork(
reference_geometry=self.graph["reference_geometry"])
# create mapping dict for edges to adjacent cycles
edge_to_cycle = {(u, v): None for u, v in self.edges_iter()}
edge_to_cycle.update({(v, u): None for u, v in self.edges_iter()})
# CREATE NODES OF DUAL ------------------------------------------------
# for each cycle, find the centroid node
for ckey in sorted(cycles.keys()):
cycle = cycles[ckey]
clen = len(cycle)
# skip invalid cycles (ngons and self-loops)
if clen > 4 or clen < 3:
continue
# loop over cycle edges and fill mapping dicts
closed_cycle = cycle[:]
closed_cycle.append(cycle[0])
for u, v in pairwise(closed_cycle):
edge_to_cycle[(u, v)] = ckey
# get coords of cycle nodes
cycle_coords = [[node_data[k]["x"],
node_data[k]["y"],
node_data[k]["z"]] for k in cycle]
# compute centroid
cx, cy, cz = zip(*cycle_coords)
centroid = [sum(cx) / clen, sum(cy) / clen, sum(cz) / clen]
centroid_pt = RhinoPoint3d(*centroid)
# get node 'leaf' attributes
is_leaf = True in [node_data[k]["leaf"] for k in cycle]
# get node 'color' attributes. only if all colors of the cycle
# match, the color attribute will be set!
colors = [node_data[k]["color"] for k in cycle]
if all(x == colors[0] for x in colors):
cycle_color = colors[0]
else:
cycle_color = None
# add node to dual network
DualNetwork.node_from_point3d(ckey,
centroid_pt,
position=None,
num=None,
leaf=is_leaf,
start=False,
end=False,
segment=None,
increase=False,
decrease=False,
color=cycle_color)
# CREATE EDGES IN DUAL ------------------------------------------------
# loop over original edges and create corresponding edges in dual
for u, v, d in self.edges_iter(data=True):
u, v = self.edge_geometry_direction(u, v)
cycle_a = edge_to_cycle[(u, v)]
cycle_b = edge_to_cycle[(v, u)]
if cycle_a != None and cycle_b != None:
node_a = (cycle_a, DualNetwork.node[cycle_a])
node_b = (cycle_b, DualNetwork.node[cycle_b])
if d["warp"]:
DualNetwork.create_weft_edge(node_b, node_a)
elif d["weft"]:
DualNetwork.create_warp_edge(node_a, node_b)
# SET ATTRIBUTES OF DUAL NODES ----------------------------------------
# loop over all nodes of the network and set crease and end attributes
for node in DualNetwork.nodes_iter():
node_data = DualNetwork.node[node]
warp_in = DualNetwork.node_warp_edges_in(node)
warp_out = DualNetwork.node_warp_edges_out(node)
weft_in = DualNetwork.node_weft_edges_in(node)
weft_out = DualNetwork.node_weft_edges_out(node)
warplen = len(warp_in) + len(warp_out)
weftlen = len(weft_in) + len(weft_out)
# 2 warp edges and 1 weft edge >> end
if warplen == 2 and weftlen == 1:
node_data["end"] = True
if weft_out:
node_data["start"] = True
# 1 warp edge and 1 weft edge >> end and increase / decrease
elif warplen == 1 and weftlen == 1:
node_data["end"] = True
if weft_out:
node_data["start"] = True
if warp_out and not node_data["leaf"]:
node_data["increase"] = True
elif warp_in and not node_data["leaf"]:
node_data["decrease"] = True
# 2 warp edges and 0 weft edges >> end
elif warplen == 2 and weftlen == 0:
node_data["end"] = True
node_data["start"] = True
# 1 warp edge and 0 weft edges >> end
elif warplen == 1 and weftlen == 0:
node_data["end"] = True
node_data["start"] = True
# 0 warp edges and 1 weft edge >> end
elif warplen == 0 and weftlen == 1:
node_data["end"] = True
if weft_out:
node_data["start"] = True
# 1 warp edge and 2 weft edges >> increase or decrease
elif warplen == 1 and weftlen == 2:
if not node_data["leaf"]:
if warp_out:
node_data["increase"] = True
elif warp_in:
node_data["decrease"] = True
# MERGE ADJACENT INCREASES/DECREASES ----------------------------------
if merge_adj_creases:
increase_nodes = [inc for inc in DualNetwork.nodes_iter(data=True)
if inc[1]["increase"]]
for increase, data in increase_nodes:
pred = DualNetwork.predecessors(increase)
suc = DualNetwork.successors(increase)
pred = [p for p in pred if DualNetwork.node[p]["decrease"]]
suc = [s for s in suc if DualNetwork.node[s]["decrease"]]
# merge only with pred or with suc but not both
if (len(pred) == 1 and
DualNetwork.edge[pred[0]][increase]["weft"]):
# merge nodes, edge is pred, increase
pred = pred[0]
pd = DualNetwork.node[pred]
# remove the connecting edge
DualNetwork.remove_edge(pred, increase)
# get the points of the nodes
increase_pt = data["geo"]
pred_pt = pd["geo"]
# compute the new merged point
new_vec = RhinoVector3d(increase_pt - pred_pt)
new_pt = pred_pt + (new_vec * 0.5)
# replace the increase with the new pt and invert the
# increase attribute
data["geo"] = new_pt
data["x"] = new_pt.X
data["y"] = new_pt.Y
data["z"] = new_pt.Z
data["increase"] = False
# edit the edges of the increase
for edge in DualNetwork.edges_iter(increase, data=True):
edge[2]["geo"] = RhinoLine(
data["geo"],
DualNetwork.node[edge[1]]["geo"])
# edit edges of decrease
for edge in DualNetwork.in_edges_iter(pred, data=True):
if edge[2]["warp"]:
fromNode = (edge[0], DualNetwork.node[edge[0]])
toNode = (increase, data)
DualNetwork.create_warp_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
elif edge[2]["weft"]:
fromNode = (edge[0], DualNetwork.node[edge[0]])
toNode = (increase, data)
DualNetwork.create_weft_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
DualNetwork.remove_node(pred)
elif (not pred and len(suc) == 1 and
DualNetwork.edge[increase][suc[0]]["weft"]):
# merge nodes, edge is increase, suc
suc = suc[0]
sd = DualNetwork.node[suc]
# remove the connecting edge
DualNetwork.remove_edge(increase, suc)
# get the points of the nodes
increase_pt = data["geo"]
suc_pt = sd["geo"]
# compute the new merged point
new_vec = RhinoVector3d(suc_pt - increase_pt)
new_pt = increase_pt + (new_vec * 0.5)
# replace the increase with the new pt and invert the
# increase attribute
data["geo"] = new_pt
data["x"] = new_pt.X
data["y"] = new_pt.Y
data["z"] = new_pt.Z
data["increase"] = False
# edit the edges of the increase
for edge in DualNetwork.edges_iter(increase, data=True):
edge[2]["geo"] = RhinoLine(
data["geo"],
DualNetwork.node[edge[1]]["geo"])
for edge in DualNetwork.in_edges_iter(increase, data=True):
edge[2]["geo"] = RhinoLine(
DualNetwork.node[edge[0]]["geo"],
data["geo"])
# edit incoming edges of decrease
for edge in DualNetwork.in_edges_iter(suc, data=True):
if edge[2]["warp"]:
fromNode = (edge[0], DualNetwork.node[edge[0]])
toNode = (increase, data)
DualNetwork.create_warp_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
elif edge[2]["weft"]:
fromNode = (edge[0], DualNetwork.node[edge[0]])
toNode = (increase, data)
DualNetwork.create_weft_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
# edit outgoing edges of decrease
for edge in DualNetwork.edges_iter(suc, data=True):
if edge[2]["warp"]:
fromNode = (increase, data)
toNode = (edge[1], DualNetwork.node[edge[1]])
DualNetwork.create_warp_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
elif edge[2]["weft"]:
fromNode = (increase, data)
toNode = (edge[1], DualNetwork.node[edge[1]])
DualNetwork.create_weft_edge(fromNode, toNode)
DualNetwork.remove_edge(edge[0], edge[1])
DualNetwork.remove_node(suc)
# ATTEMPT TO MEND TRAILING ROWS ---------------------------------------
if mend_trailing_rows:
# TODO: find a safer / more robust implementation attempt!
errMsg = ("This option is not satisfyingly implemented for this " +
"method, yet. Therefore, it is deactivated for now.")
raise NotImplementedError(errMsg)
# get all nodes which are 'leaf' and 'end' (right side)
# and all nodes which are 'leaf' and 'start' (left side)
trailing = sorted([(n, d) for n, d in
DualNetwork.nodes_iter(data=True)
if d["leaf"]
and d["end"]], key=lambda x: x[0])
trailing_left = deque([t for t in trailing if t[1]["start"]])
trailing_right = deque([t for t in trailing if not t[1]["start"]])
# from the trailing left nodes...
# travel one outgoing 'weft'
# from there travel one incoming 'warp'
# if the resulting node is 'start', 'end' and has 3 edges in total
# >> take its outgoing 'warp' edge (we already traveled that so
# we should already have it)
# >> connect it to the trailing left node
# >> remove the 'leaf' attribute from the trailing node as it is no
# longer trailing
# >> add the 'increase' attribute to the previous target of the
# 'warp' edge
while len(trailing_left) > 0:
# pop an item from the deque
trail = trailing_left.popleft()
# travel one outgoing 'weft' edge
weft_out = DualNetwork.node_weft_edges_out(trail[0], data=True)
if not weft_out:
continue
weft_out = weft_out[0]
# check the target of the 'weft' edge for incoming 'warp'
warp_in = DualNetwork.node_warp_edges_in(
weft_out[1],
data=True)
warp_out = DualNetwork.node_warp_edges_out(
weft_out[1],
data=True)
if not warp_in:
continue
warp_in = warp_in[0]
candidate = (warp_in[0], DualNetwork.node[warp_in[0]])
nce = len(DualNetwork.in_edges(warp_in[0]))
nce += len(DualNetwork.edges(warp_in[0]))
# if this condition holds, we have a trailing increase
if (candidate[1]["start"]
and candidate[1]["end"]
and nce == 3):
# remove found 'warp' edge
DualNetwork.remove_edge(warp_in[0], warp_in[1])
# assign 'increase' attribute to former 'warp' edge target
DualNetwork.node[warp_in[1]]["increase"] = True
# connect candidate to trail with new 'warp' edge
DualNetwork.create_warp_edge(candidate, trail)
# remove 'leaf' attribute of former trail
trail[1]["leaf"] = False
else:
if warp_out:
warp_out = warp_out[0]
candidate = (warp_out[1],
DualNetwork.node[warp_out[1]])
nce = len(DualNetwork.in_edges(warp_out[1]))
nce += len(DualNetwork.edges(warp_out[1]))
# if this condition holds, we have a trailing decrease
if (candidate[1]["start"]
and candidate[1]["end"]
and nce == 3):
# remove found 'warp' edge
DualNetwork.remove_edge(warp_out[0], warp_out[1])
# assign 'decrease' attribute to former 'warp'
# edge source
DualNetwork.node[warp_out[0]]["decrease"] = True
# connect former trail to candidate with new
# 'warp' edge
DualNetwork.create_warp_edge(trail, candidate)
# remove 'leaf' attribute of former trail
trail[1]["leaf"] = False
while len(trailing_right) > 0:
# pop an item from the deque
trail = trailing_right.popleft()
# travel one incoming 'weft' edge
weft_in = DualNetwork.node_weft_edges_in(trail[0], data=True)
if not weft_in:
continue
weft_in = weft_in[0]
# check the target of the 'weft' edge for incoming 'warp'
warp_in = DualNetwork.node_warp_edges_in(weft_in[0],
data=True)
warp_out = DualNetwork.node_warp_edges_out(weft_in[0],
data=True)
if not warp_in:
continue
warp_in = warp_in[0]
candidate = (warp_in[0], DualNetwork.node[warp_in[0]])
nce = len(DualNetwork.in_edges(warp_in[0]))
nce += len(DualNetwork.edges(warp_in[0]))
# if this condition holds, we have a trailing increase
if candidate[1]["end"] and nce == 3:
# remove found 'warp' edge
DualNetwork.remove_edge(warp_in[0], warp_in[1])
# assign 'increase' attribute to former 'warp' edge target
DualNetwork.node[warp_in[1]]["increase"] = True
# connect candidate to trail with new 'warp' edge
DualNetwork.create_warp_edge(candidate, trail)
# remove 'leaf' attribute of former trail
trail[1]["leaf"] = False
else:
if warp_out:
warp_out = warp_out[0]
candidate = (warp_out[1],
DualNetwork.node[warp_out[1]])
nce = len(DualNetwork.in_edges(warp_out[1]))
nce += len(DualNetwork.edges(warp_out[1]))
# if this condition holds, we have a trailing decrease
if (candidate[1]["start"]
and candidate[1]["end"]
and nce == 3):
# remove found 'warp' edge
DualNetwork.remove_edge(warp_out[0], warp_out[1])
# assign 'decrease' attribute to former 'warp'
# edge source
DualNetwork.node[warp_out[0]]["decrease"] = True
# connect former trail to candidate with new
# 'warp' edge
DualNetwork.create_warp_edge(trail, candidate)
# remove 'leaf' attribute of former trail
trail[1]["leaf"] = False
return DualNetwork
# MAIN ------------------------------------------------------------------------
if __name__ == '__main__':
pass
| 45.454681
| 79
| 0.495106
|
27af82c734c9c172d86f1e925df82c41889d2af8
| 5,388
|
py
|
Python
|
main.py
|
GuruOfPython/Python-Tkinter-GUI
|
de17e819cc6008274077d8347d722e779cb9166b
|
[
"MIT"
] | null | null | null |
main.py
|
GuruOfPython/Python-Tkinter-GUI
|
de17e819cc6008274077d8347d722e779cb9166b
|
[
"MIT"
] | null | null | null |
main.py
|
GuruOfPython/Python-Tkinter-GUI
|
de17e819cc6008274077d8347d722e779cb9166b
|
[
"MIT"
] | null | null | null |
# from binary_tree import *
#
# root = Node(8)
#
# root.insert(3)
# root.insert(10)
# root.insert(1)
# root.insert(6)
# root.insert(4)
# root.insert(7)
# root.insert(14)
# root.insert(13)
# node, parent = root.lookup(6)
# print(node, parent)
# root.print_tree()
#
# root.delete(10)
#
# root.print_tree()
import tkinter as tk
from tkinter import *
# import tkMessageBox as messagesbox
import tkinter.messagebox as messagebox
import ttk
from tkinter import simpledialog
from treeview import TreeView
from random import shuffle
from naive import NaiveBST, perfect_inserter
from random import *
import random
if __name__ == '__main__':
app = main_GUI(None)
app.title("Binary Search Tree")
app.mainloop()
| 32.853659
| 103
| 0.615256
|
27b12ffdc16386ed1ffaa3ad7820397e93894fcc
| 4,634
|
py
|
Python
|
cbagent/collectors/sgimport_latency.py
|
sharujayaram/perfrunner
|
8fe8ff42a5c74c274b569ba2c45cd43b320f48eb
|
[
"Apache-2.0"
] | null | null | null |
cbagent/collectors/sgimport_latency.py
|
sharujayaram/perfrunner
|
8fe8ff42a5c74c274b569ba2c45cd43b320f48eb
|
[
"Apache-2.0"
] | null | null | null |
cbagent/collectors/sgimport_latency.py
|
sharujayaram/perfrunner
|
8fe8ff42a5c74c274b569ba2c45cd43b320f48eb
|
[
"Apache-2.0"
] | 1
|
2019-05-20T13:44:29.000Z
|
2019-05-20T13:44:29.000Z
|
import requests
import json
from concurrent.futures import ProcessPoolExecutor as Executor
from concurrent.futures import ThreadPoolExecutor
from time import sleep, time
from couchbase.bucket import Bucket
from cbagent.collectors import Latency, Collector
from logger import logger
from perfrunner.helpers.misc import uhex
from spring.docgen import Document
from cbagent.metadata_client import MetadataClient
from cbagent.stores import PerfStore
from perfrunner.settings import (
ClusterSpec,
PhaseSettings,
TargetIterator,
TestConfig,
)
| 30.486842
| 95
| 0.594087
|
27b2e2a025ad448d149dbcc0d2fb399829c3c2bf
| 370
|
py
|
Python
|
clean_junos_routes.py
|
JNPRAutomate/event_driven_automation_with_a_TIG_stack
|
4e2cebdec4dc8d681d71374a7c342f016b8b649e
|
[
"MIT"
] | 4
|
2019-08-23T10:55:48.000Z
|
2021-06-24T01:00:12.000Z
|
clean_junos_routes.py
|
JNPRAutomate/event_driven_automation_with_a_TIG_stack
|
4e2cebdec4dc8d681d71374a7c342f016b8b649e
|
[
"MIT"
] | null | null | null |
clean_junos_routes.py
|
JNPRAutomate/event_driven_automation_with_a_TIG_stack
|
4e2cebdec4dc8d681d71374a7c342f016b8b649e
|
[
"MIT"
] | 5
|
2019-03-25T11:12:00.000Z
|
2021-12-23T03:01:14.000Z
|
from jnpr.junos import Device
from jnpr.junos.utils.config import Config
clean_routing_table()
| 24.666667
| 76
| 0.716216
|
27b4b4442e8234ce781c98d6ea27cb6fba57c3a9
| 5,000
|
py
|
Python
|
Tools/renew-navi-npc.py
|
vakhet/ragnarok-navigation
|
df7d3ff95a9bd1c0497744113ad664a31d248de6
|
[
"MIT"
] | 3
|
2017-12-02T16:40:32.000Z
|
2020-02-11T17:44:02.000Z
|
Tools/renew-navi-npc.py
|
vakhet/ragnarok-navigation
|
df7d3ff95a9bd1c0497744113ad664a31d248de6
|
[
"MIT"
] | null | null | null |
Tools/renew-navi-npc.py
|
vakhet/ragnarok-navigation
|
df7d3ff95a9bd1c0497744113ad664a31d248de6
|
[
"MIT"
] | null | null | null |
"""
Author : vakhet at gmail.com
This script gets all your NPC names from the original rAthena folder
and updates their lines in navi_npc_krpri.lub
wherever matches the map_name and coords
"""
import re
import os
import random
import sqlite3
NPC_match = r'^[\w\d_]+,\d+,\d+,\d+\tscript\t[\w\d_ -]+#*[\w\d_ -]*\t[\d,{]+$'
allfiles = []
log = open('result.log', 'w', errors='ignore')
conn = sqlite3.connect('db.sqlite')
db = conn.cursor()
intro = '''
Renew navi_npc_krpri.lub | Version 0.2 | (C) 2017 vakhet @ gmail.com
Changes:
v0.2 - *.new file now creates in same folder with original *.lub
'''
outro = '''
Check results in result.log
NEW file generated: navi_npc_krpri.new
'''
db.executescript('''
DROP TABLE IF EXISTS npc;
CREATE TABLE npc (
id INTEGER NOT NULL PRIMARY KEY AUTOINCREMENT UNIQUE,
map TEXT,
thing1 INTEGER,
thing2 INTEGER,
thing3 INTEGER,
name TEXT,
shadow TEXT,
x INTEGER,
y INTEGER
)
''')
# The Beginning
print(intro)
while True:
path_rathena = input('Enter path to NPC: ')
if not os.path.exists(path_rathena):
print('Wrong path!\n\n')
continue
else:
break
while True:
path_navi = input('Enter path to navi_npc_krpri.lub: ')
if not os.path.exists(path_navi+'\\navi_npc_krpri.lub'):
print('Wrong path!\n\n')
continue
else:
break
stage_1() # scan for *.txt in \npc directory
stage_2() # build DB from navi_npc_krpri.lub
stage_3() # update NPC names in DB from *.txt
stage_4() # building navi_npc_krpri.new
print('Complete list of changes see in log.txt')
print('NEW file generated: navi_npc_krpri.new')
input('\nPress any key')
| 28.571429
| 82
| 0.523
|
27b65665bc694cadf63afa04994bea99c68ab2c9
| 85
|
py
|
Python
|
rpicarserver/backlight/__init__.py
|
krixian/rpi-car-server
|
8058bec91462b5f2645119de898779d4bf292a84
|
[
"MIT"
] | null | null | null |
rpicarserver/backlight/__init__.py
|
krixian/rpi-car-server
|
8058bec91462b5f2645119de898779d4bf292a84
|
[
"MIT"
] | null | null | null |
rpicarserver/backlight/__init__.py
|
krixian/rpi-car-server
|
8058bec91462b5f2645119de898779d4bf292a84
|
[
"MIT"
] | null | null | null |
from rpicarserver import ext
| 17
| 31
| 0.752941
|
27b801a71ed41ab9ae80dc219943a39cdead01b2
| 712
|
py
|
Python
|
tests/components/rtsp_to_webrtc/test_diagnostics.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/rtsp_to_webrtc/test_diagnostics.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/rtsp_to_webrtc/test_diagnostics.py
|
MrDelik/core
|
93a66cc357b226389967668441000498a10453bb
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Test nest diagnostics."""
from typing import Any
from .conftest import ComponentSetup
from tests.common import MockConfigEntry
from tests.components.diagnostics import get_diagnostics_for_config_entry
THERMOSTAT_TYPE = "sdm.devices.types.THERMOSTAT"
| 25.428571
| 87
| 0.716292
|
27b8c7ca0cbfe891ed4189a0d771be178c3ebb62
| 556
|
py
|
Python
|
Modulo_5/semana_3/pandas/pd.py
|
AutodidactaMx/cocid_python
|
11628f465ff362807a692c79ede26bf30dd8e26a
|
[
"MIT"
] | null | null | null |
Modulo_5/semana_3/pandas/pd.py
|
AutodidactaMx/cocid_python
|
11628f465ff362807a692c79ede26bf30dd8e26a
|
[
"MIT"
] | null | null | null |
Modulo_5/semana_3/pandas/pd.py
|
AutodidactaMx/cocid_python
|
11628f465ff362807a692c79ede26bf30dd8e26a
|
[
"MIT"
] | 1
|
2022-03-04T00:57:18.000Z
|
2022-03-04T00:57:18.000Z
|
import pandas as pd
import numpy as np
df = pd.read_csv('poblacion.csv')
pd.options.display.float_format = '{:,.1f}'.format
df = pd.read_csv('poblacion.csv')
df['year'] = pd.Categorical(df['year'].apply(str))
idx_filtro = df['Country'].isin(['Mexico','Panama'])
df_filtro_country = df[idx_filtro]
df_filtro_country =df_filtro_country.set_index(['Country','year']).sort_index(ascending= [False,True])
print(df_filtro_country.unstack('Country'))
ids = pd.IndexSlice
print(df_filtro_country.loc[ids['Albania':'Azerbaijan','2015':'2016'],:].sort_index())
| 30.888889
| 102
| 0.733813
|
27b8f98dbc5944c52c7fdf99ecb0474a2db0ffed
| 3,477
|
py
|
Python
|
reachweb/models.py
|
kamauvick/ReachOutDash
|
ceb7da731982bc9d1b1bb4185f34822b4dcf6526
|
[
"MIT"
] | null | null | null |
reachweb/models.py
|
kamauvick/ReachOutDash
|
ceb7da731982bc9d1b1bb4185f34822b4dcf6526
|
[
"MIT"
] | 9
|
2020-02-12T02:44:31.000Z
|
2022-03-12T00:03:57.000Z
|
reachweb/models.py
|
kamauvick/ReachOutDash
|
ceb7da731982bc9d1b1bb4185f34822b4dcf6526
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.models import User
from django.db import models
| 30.5
| 98
| 0.632442
|
27bb547681e27f63805f0e3f2bcfba62a6d181f3
| 4,876
|
py
|
Python
|
distances/symmetric_amd_distance.py
|
npielawski/py_alpha_amd_release
|
6fb5b3cdef65ba8902daea050785dd73970002c2
|
[
"MIT"
] | 14
|
2019-02-12T20:30:23.000Z
|
2021-11-04T01:10:34.000Z
|
distances/symmetric_amd_distance.py
|
npielawski/py_alpha_amd_release
|
6fb5b3cdef65ba8902daea050785dd73970002c2
|
[
"MIT"
] | 2
|
2021-05-12T05:02:59.000Z
|
2021-10-11T14:40:10.000Z
|
distances/symmetric_amd_distance.py
|
npielawski/py_alpha_amd_release
|
6fb5b3cdef65ba8902daea050785dd73970002c2
|
[
"MIT"
] | 7
|
2019-02-20T12:19:28.000Z
|
2021-02-09T10:12:06.000Z
|
#
# Py-Alpha-AMD Registration Framework
# Author: Johan Ofverstedt
# Reference: Fast and Robust Symmetric Image Registration Based on Distances Combining Intensity and Spatial Information
#
# Copyright 2019 Johan Ofverstedt
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
#
#
# Symmetric Average Minimal Distances (AMD) Distance implemented as a class.
#
import numpy as np
| 41.322034
| 179
| 0.689295
|
27bbbbc489c4d13faa511e2c2877df5e0ce8a2dd
| 431
|
py
|
Python
|
shop/migrations/0005_product_discounted_price.py
|
RitvikDayal/The-Stone-Shop
|
fed85d5ebb993fb1ce235f5e8a8ebc06a76fb956
|
[
"MIT"
] | 2
|
2020-08-27T21:02:54.000Z
|
2020-08-27T21:03:44.000Z
|
shop/migrations/0005_product_discounted_price.py
|
RitvikDayal/The-Stone-Shop
|
fed85d5ebb993fb1ce235f5e8a8ebc06a76fb956
|
[
"MIT"
] | null | null | null |
shop/migrations/0005_product_discounted_price.py
|
RitvikDayal/The-Stone-Shop
|
fed85d5ebb993fb1ce235f5e8a8ebc06a76fb956
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.8 on 2020-07-28 12:46
from django.db import migrations, models
| 21.55
| 50
| 0.605568
|
27bc982db629d22f64003fb61afd5dee8511c5de
| 131
|
py
|
Python
|
tudo/ex060b.py
|
Ramon-Erik/Exercicios-Python
|
158a7f1846dd3d486aa0517fa337d46d73aab649
|
[
"MIT"
] | 1
|
2021-07-08T00:35:57.000Z
|
2021-07-08T00:35:57.000Z
|
tudo/ex060b.py
|
Ramon-Erik/Exercicios-Python
|
158a7f1846dd3d486aa0517fa337d46d73aab649
|
[
"MIT"
] | null | null | null |
tudo/ex060b.py
|
Ramon-Erik/Exercicios-Python
|
158a7f1846dd3d486aa0517fa337d46d73aab649
|
[
"MIT"
] | null | null | null |
from math import factorial
n = int(input('Digite um nmero, para obter seu fatorial: '))
print('{}! {}'.format(n, factorial(n)))
| 32.75
| 61
| 0.679389
|
27bd073801f417f0a990ea8f8617bbc868baa23e
| 157
|
py
|
Python
|
setup.py
|
messa/bloom
|
ce975471d0fabac436bcbd3040d22c6e5a97e47c
|
[
"MIT"
] | 1
|
2021-03-14T13:54:42.000Z
|
2021-03-14T13:54:42.000Z
|
setup.py
|
messa/bloom
|
ce975471d0fabac436bcbd3040d22c6e5a97e47c
|
[
"MIT"
] | 1
|
2021-03-15T09:02:24.000Z
|
2021-03-16T07:41:46.000Z
|
setup.py
|
messa/bloom
|
ce975471d0fabac436bcbd3040d22c6e5a97e47c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from setuptools import setup, Extension
setup(
ext_modules=[
Extension('bloom._hashc', ['bloom/_hashcmodule.c'])
])
| 17.444444
| 59
| 0.66242
|
27be070f86ae724315deda03de85e57e9b0b008d
| 5,645
|
py
|
Python
|
misc/util.py
|
winder/indexer
|
18f48f026f022cdeef92dcac558d3900d6ea798d
|
[
"MIT"
] | 87
|
2020-08-20T19:14:02.000Z
|
2022-03-30T21:31:59.000Z
|
misc/util.py
|
hassoon1986/indexer
|
0a58e9a78ba7684c7f4cfb4fe7cb24b3d4622d9b
|
[
"MIT"
] | 615
|
2020-06-03T14:13:29.000Z
|
2022-03-31T12:08:38.000Z
|
misc/util.py
|
hassoon1986/indexer
|
0a58e9a78ba7684c7f4cfb4fe7cb24b3d4622d9b
|
[
"MIT"
] | 58
|
2020-06-03T21:33:48.000Z
|
2022-03-26T15:39:50.000Z
|
#!/usr/bin/env python3
import atexit
import logging
import os
import random
import subprocess
import sys
import time
import msgpack
logger = logging.getLogger(__name__)
def unmsgpack(ob):
"convert dict from msgpack.loads() with byte string keys to text string keys"
if isinstance(ob, dict):
od = {}
for k,v in ob.items():
k = maybedecode(k)
okv = False
if (not okv) and (k == 'note'):
try:
v = unmsgpack(mloads(v))
okv = True
except:
pass
if (not okv) and k in ('type', 'note'):
try:
v = v.decode()
okv = True
except:
pass
if not okv:
v = unmsgpack(v)
od[k] = v
return od
if isinstance(ob, list):
return [unmsgpack(v) for v in ob]
#if isinstance(ob, bytes):
# return base64.b64encode(ob).decode()
return ob
# whoever calls this will need to import boto and get the s3 client
| 34.845679
| 107
| 0.589725
|
27c0f66f70a59c9a16bcacfd772c973fa3bad2e9
| 11,093
|
py
|
Python
|
coconut/_pyparsing.py
|
evhub/coconut
|
27a4af9dc06667870f736f20c862930001b8cbb2
|
[
"Apache-2.0"
] | 3,624
|
2015-02-22T07:06:18.000Z
|
2022-03-31T03:38:00.000Z
|
coconut/_pyparsing.py
|
evhub/coconut
|
27a4af9dc06667870f736f20c862930001b8cbb2
|
[
"Apache-2.0"
] | 627
|
2015-03-31T01:18:53.000Z
|
2022-03-28T07:48:31.000Z
|
coconut/_pyparsing.py
|
evhub/coconut
|
27a4af9dc06667870f736f20c862930001b8cbb2
|
[
"Apache-2.0"
] | 162
|
2016-03-02T05:22:55.000Z
|
2022-03-31T23:42:55.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------------------------------------------------
# INFO:
# -----------------------------------------------------------------------------------------------------------------------
"""
Author: Evan Hubinger
License: Apache 2.0
Description: Wrapper around PyParsing that selects the best available implementation.
"""
# -----------------------------------------------------------------------------------------------------------------------
# IMPORTS:
# -----------------------------------------------------------------------------------------------------------------------
from __future__ import print_function, absolute_import, unicode_literals, division
from coconut.root import * # NOQA
import os
import sys
import traceback
import functools
import inspect
from warnings import warn
from collections import defaultdict
from coconut.constants import (
PURE_PYTHON,
PYPY,
use_fast_pyparsing_reprs,
use_packrat_parser,
packrat_cache_size,
default_whitespace_chars,
varchars,
min_versions,
pure_python_env_var,
enable_pyparsing_warnings,
use_left_recursion_if_available,
)
from coconut.util import get_clock_time # NOQA
from coconut.util import (
ver_str_to_tuple,
ver_tuple_to_str,
get_next_version,
)
# warning: do not name this file cPyparsing or pyparsing or it might collide with the following imports
try:
if PURE_PYTHON:
raise ImportError("skipping cPyparsing check due to " + pure_python_env_var + " = " + os.environ.get(pure_python_env_var, ""))
import cPyparsing as _pyparsing
from cPyparsing import * # NOQA
from cPyparsing import __version__
PYPARSING_PACKAGE = "cPyparsing"
PYPARSING_INFO = "Cython cPyparsing v" + __version__
except ImportError:
try:
import pyparsing as _pyparsing
from pyparsing import * # NOQA
from pyparsing import __version__
PYPARSING_PACKAGE = "pyparsing"
PYPARSING_INFO = "Python pyparsing v" + __version__
except ImportError:
traceback.print_exc()
__version__ = None
PYPARSING_PACKAGE = "cPyparsing"
PYPARSING_INFO = None
# -----------------------------------------------------------------------------------------------------------------------
# VERSION CHECKING:
# -----------------------------------------------------------------------------------------------------------------------
min_ver = min(min_versions["pyparsing"], min_versions["cPyparsing"][:3]) # inclusive
max_ver = get_next_version(max(min_versions["pyparsing"], min_versions["cPyparsing"][:3])) # exclusive
cur_ver = None if __version__ is None else ver_str_to_tuple(__version__)
if cur_ver is None or cur_ver < min_ver:
min_ver_str = ver_tuple_to_str(min_ver)
raise ImportError(
"Coconut requires pyparsing/cPyparsing version >= " + min_ver_str
+ ("; got " + PYPARSING_INFO if PYPARSING_INFO is not None else "")
+ " (run '{python} -m pip install --upgrade {package}' to fix)".format(python=sys.executable, package=PYPARSING_PACKAGE),
)
elif cur_ver >= max_ver:
max_ver_str = ver_tuple_to_str(max_ver)
warn(
"This version of Coconut was built for pyparsing/cPyparsing versions < " + max_ver_str
+ ("; got " + PYPARSING_INFO if PYPARSING_INFO is not None else "")
+ " (run '{python} -m pip install {package}<{max_ver}' to fix)".format(python=sys.executable, package=PYPARSING_PACKAGE, max_ver=max_ver_str),
)
# -----------------------------------------------------------------------------------------------------------------------
# SETUP:
# -----------------------------------------------------------------------------------------------------------------------
if cur_ver >= (3,):
MODERN_PYPARSING = True
_trim_arity = _pyparsing.core._trim_arity
_ParseResultsWithOffset = _pyparsing.core._ParseResultsWithOffset
else:
MODERN_PYPARSING = False
_trim_arity = _pyparsing._trim_arity
_ParseResultsWithOffset = _pyparsing._ParseResultsWithOffset
USE_COMPUTATION_GRAPH = (
not MODERN_PYPARSING # not yet supported
and not PYPY # experimentally determined
)
if enable_pyparsing_warnings:
if MODERN_PYPARSING:
_pyparsing.enable_all_warnings()
else:
_pyparsing._enable_all_warnings()
_pyparsing.__diag__.warn_name_set_on_empty_Forward = False
if MODERN_PYPARSING and use_left_recursion_if_available:
ParserElement.enable_left_recursion()
elif use_packrat_parser:
ParserElement.enablePackrat(packrat_cache_size)
ParserElement.setDefaultWhitespaceChars(default_whitespace_chars)
Keyword.setDefaultKeywordChars(varchars)
# -----------------------------------------------------------------------------------------------------------------------
# FAST REPRS:
# -----------------------------------------------------------------------------------------------------------------------
if PY2:
def fast_repr(cls):
"""A very simple, fast __repr__/__str__ implementation."""
return "<" + cls.__name__ + ">"
else:
fast_repr = object.__repr__
_old_pyparsing_reprs = []
def set_fast_pyparsing_reprs():
"""Make pyparsing much faster by preventing it from computing expensive nested string representations."""
for obj in vars(_pyparsing).values():
try:
if issubclass(obj, ParserElement):
_old_pyparsing_reprs.append((obj, (obj.__repr__, obj.__str__)))
obj.__repr__ = functools.partial(fast_repr, obj)
obj.__str__ = functools.partial(fast_repr, obj)
except TypeError:
pass
def unset_fast_pyparsing_reprs():
"""Restore pyparsing's default string representations for ease of debugging."""
for obj, (repr_method, str_method) in _old_pyparsing_reprs:
obj.__repr__ = repr_method
obj.__str__ = str_method
if use_fast_pyparsing_reprs:
set_fast_pyparsing_reprs()
# -----------------------------------------------------------------------------------------------------------------------
# PROFILING:
# -----------------------------------------------------------------------------------------------------------------------
_timing_info = [None] # in list to allow reassignment
def add_timing_to_method(cls, method_name, method):
"""Add timing collection to the given method.
It's a monstrosity, but it's only used for profiling."""
from coconut.terminal import internal_assert # hide to avoid circular import
args, varargs, keywords, defaults = inspect.getargspec(method)
internal_assert(args[:1] == ["self"], "cannot add timing to method", method_name)
if not defaults:
defaults = []
num_undefaulted_args = len(args) - len(defaults)
def_args = []
call_args = []
fix_arg_defaults = []
defaults_dict = {}
for i, arg in enumerate(args):
if i >= num_undefaulted_args:
default = defaults[i - num_undefaulted_args]
def_args.append(arg + "=_timing_sentinel")
defaults_dict[arg] = default
fix_arg_defaults.append(
"""
if {arg} is _timing_sentinel:
{arg} = _exec_dict["defaults_dict"]["{arg}"]
""".strip("\n").format(
arg=arg,
),
)
else:
def_args.append(arg)
call_args.append(arg)
if varargs:
def_args.append("*" + varargs)
call_args.append("*" + varargs)
if keywords:
def_args.append("**" + keywords)
call_args.append("**" + keywords)
new_method_name = "new_" + method_name + "_func"
_exec_dict = globals().copy()
_exec_dict.update(locals())
new_method_code = """
def {new_method_name}({def_args}):
{fix_arg_defaults}
_all_args = (lambda *args, **kwargs: args + tuple(kwargs.values()))({call_args})
_exec_dict["internal_assert"](not any(_arg is _timing_sentinel for _arg in _all_args), "error handling arguments in timed method {new_method_name}({def_args}); got", _all_args)
_start_time = _exec_dict["get_clock_time"]()
try:
return _exec_dict["method"]({call_args})
finally:
_timing_info[0][str(self)] += _exec_dict["get_clock_time"]() - _start_time
{new_method_name}._timed = True
""".format(
fix_arg_defaults="\n".join(fix_arg_defaults),
new_method_name=new_method_name,
def_args=", ".join(def_args),
call_args=", ".join(call_args),
)
exec(new_method_code, _exec_dict)
setattr(cls, method_name, _exec_dict[new_method_name])
return True
def collect_timing_info():
"""Modifies pyparsing elements to time how long they're executed for.
It's a monstrosity, but it's only used for profiling."""
from coconut.terminal import logger # hide to avoid circular imports
logger.log("adding timing to pyparsing elements:")
_timing_info[0] = defaultdict(float)
for obj in vars(_pyparsing).values():
if isinstance(obj, type) and issubclass(obj, ParserElement):
added_timing = False
for attr_name in dir(obj):
attr = getattr(obj, attr_name)
if (
callable(attr)
and not isinstance(attr, ParserElement)
and not getattr(attr, "_timed", False)
and attr_name not in (
"__getattribute__",
"__setattribute__",
"__init_subclass__",
"__subclasshook__",
"__class__",
"__setattr__",
"__getattr__",
"__new__",
"__init__",
"__str__",
"__repr__",
"__hash__",
"__eq__",
"_trim_traceback",
"_ErrorStop",
"enablePackrat",
"inlineLiteralsUsing",
"setDefaultWhitespaceChars",
"setDefaultKeywordChars",
"resetCache",
)
):
added_timing |= add_timing_to_method(obj, attr_name, attr)
if added_timing:
logger.log("\tadded timing to", obj)
def print_timing_info():
"""Print timing_info collected by collect_timing_info()."""
print(
"""
=====================================
Timing info:
(timed {num} total pyparsing objects)
=====================================
""".rstrip().format(
num=len(_timing_info[0]),
),
)
sorted_timing_info = sorted(_timing_info[0].items(), key=lambda kv: kv[1])
for method_name, total_time in sorted_timing_info:
print("{method_name}:\t{total_time}".format(method_name=method_name, total_time=total_time))
| 35.554487
| 180
| 0.554945
|
27c1c2dd0bdd326bf942be3440f758392e7db45f
| 4,948
|
py
|
Python
|
tests/test_explicit_hll.py
|
aholyoke/python-hll
|
30793aeb18103600fce0f3ad0b0c9e99e8b756fe
|
[
"MIT"
] | 13
|
2019-11-19T07:38:46.000Z
|
2022-02-11T13:23:25.000Z
|
tests/test_explicit_hll.py
|
aholyoke/python-hll
|
30793aeb18103600fce0f3ad0b0c9e99e8b756fe
|
[
"MIT"
] | 4
|
2019-12-12T04:19:34.000Z
|
2021-06-09T17:52:52.000Z
|
tests/test_explicit_hll.py
|
aholyoke/python-hll
|
30793aeb18103600fce0f3ad0b0c9e99e8b756fe
|
[
"MIT"
] | 6
|
2019-11-06T21:33:25.000Z
|
2022-02-21T14:43:35.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
from python_hll.hlltype import HLLType
from python_hll.hll import HLL
from python_hll.serialization import SerializationUtil
"""Unit tests for BitVector."""
def test_add_basic():
"""
Tests basic set semantics of ``HLL.add_raw()``.
"""
# Adding a single positive value to an empty set should work.
hll = new_hll(128) # arbitrary
hll.add_raw(1) # positive
assert hll.cardinality() == 1
# Adding a single negative value to an empty set should work.
hll = new_hll(128) # arbitrary
hll.add_raw(-1) # negative
assert hll.cardinality() == 1
# Adding a duplicate value to a set should be a no-op.
hll = new_hll(128) # arbitrary
hll.add_raw(1) # positive
hll.add_raw(1) # dupe
assert hll.cardinality() == 1
def test_union():
"""
Tests ``HLL.union()``.
"""
# Unioning two distinct sets should work
hll_a = new_hll(128) # arbitrary
hll_b = new_hll(128) # arbitrary
hll_a.add_raw(1)
hll_a.add_raw(2)
hll_b.add_raw(3)
hll_a.union(hll_b)
assert hll_a.cardinality() == 3
# Unioning two sets whose union doesn't exceed the cardinality cap should not promote
hll_a = new_hll(128) # arbitrary
hll_b = new_hll(128) # arbitrary
hll_a.add_raw(1)
hll_a.add_raw(2)
hll_b.add_raw(1)
hll_a.union(hll_b)
assert hll_a.cardinality() == 2
assert hll_a.get_type() == HLLType.EXPLICIT
# Unioning two sets whose union exceeds the cardinality cap should promote
hll_a = new_hll(128) # arbitrary
hll_b = new_hll(128) # arbitrary
for i in range(0, 128):
hll_a.add_raw(i)
hll_b.add_raw(i+128)
hll_a.union(hll_b)
assert hll_a.get_type() == HLLType.SPARSE
def test_clear():
"""
Tests ``HLL.clear()``
"""
hll = new_hll(128) # arbitrary
hll.add_raw(1)
hll.clear()
assert hll.cardinality() == 0
def test_to_from_bytes():
"""
Tests ``HLL.to_bytes() and ``HLL.from_bytes().
"""
schema_version = SerializationUtil.DEFAULT_SCHEMA_VERSION
type = HLLType.EXPLICIT
padding = schema_version.padding_bytes(type)
bytes_per_word = 8
# Should work on an empty set
hll = new_hll(128)
bytes = hll.to_bytes(schema_version)
assert len(bytes) == padding # no elements, just padding
in_hll = HLL.from_bytes(bytes)
assert_elements_equal(hll, in_hll)
# Should work on a partially filled set
hll = new_hll(128)
for i in range(0, 3):
hll.add_raw(i)
bytes = hll.to_bytes(schema_version)
assert len(bytes) == padding + bytes_per_word * 3
in_hll = HLL.from_bytes(bytes)
assert_elements_equal(hll, in_hll)
# Should work on a full set
explicit_threshold = 128
hll = new_hll(explicit_threshold)
for i in range(0, explicit_threshold):
hll.add_raw(27 + i)
bytes = hll.to_bytes(schema_version)
assert len(bytes) == padding + bytes_per_word * explicit_threshold
in_hll = HLL.from_bytes(bytes)
assert_elements_equal(hll, in_hll)
def test_random_values():
"""
Tests correctness against `set()`.
"""
explicit_threshold = 4096
canonical = set()
hll = new_hll(explicit_threshold)
seed = 1 # constant so results are reproducible
random.seed(seed)
max_java_long = 9223372036854775807
for i in range(0, explicit_threshold):
random_long = random.randint(1, max_java_long)
canonical.add(random_long)
hll.add_raw(random_long)
canonical_cardinality = len(canonical)
assert hll.cardinality() == canonical_cardinality
def test_promotion():
"""
Tests promotion to ``HLLType.SPARSE`` and ``HLLType.FULL``.
"""
explicit_threshold = 128
hll = HLL.create_for_testing(11, 5, explicit_threshold, 256, HLLType.EXPLICIT)
for i in range(0, explicit_threshold + 1):
hll.add_raw(i)
assert hll.get_type() == HLLType.SPARSE
hll = HLL(11, 5, 4, False, HLLType.EXPLICIT) # expthresh=4 => explicit_threshold=8
for i in range(0, 9):
hll.add_raw(i)
assert hll.get_type() == HLLType.FULL
# ------------------------------------------------------------
# assertion helpers
def assert_elements_equal(hll_a, hll_b):
"""
Asserts that values in both sets are exactly equal.
"""
assert hll_a._explicit_storage == hll_b._explicit_storage
def new_hll(explicit_threshold):
"""
Builds a ``HLLType.EXPLICIT`` ``HLL`` instance with the specified
explicit threshold.
:param explicit_threshold: explicit threshold to use for the constructed
``HLL``. This must be greater than zero.
:type explicit_threshold: int
:returns: A default-sized ``HLLType.EXPLICIT`` empty ``HLL`` instance. This
will never be ``None``.
:rtype: HLL
"""
return HLL.create_for_testing(11, 5, explicit_threshold, 256, HLLType.EXPLICIT)
| 27.337017
| 89
| 0.653597
|
27c4d9ac4a8dbf1e90e1e11bbe903ce9523aee39
| 4,792
|
py
|
Python
|
weixin/HelloMyDear.py
|
FantasyZsp/py-utils
|
4ccebd298780508d58400d2d8967f59ca7c6603d
|
[
"Apache-2.0"
] | null | null | null |
weixin/HelloMyDear.py
|
FantasyZsp/py-utils
|
4ccebd298780508d58400d2d8967f59ca7c6603d
|
[
"Apache-2.0"
] | null | null | null |
weixin/HelloMyDear.py
|
FantasyZsp/py-utils
|
4ccebd298780508d58400d2d8967f59ca7c6603d
|
[
"Apache-2.0"
] | null | null | null |
from weixin.utils.WeiXinUtils import *
# 5.main()
# 5.main()
if __name__ == '__main__':
# names = input("")
# hours = int(input(""))
# minutes = int(input(""))
# number = input("")
# hello(names, hours, minutes, number)
names = input("")
hours = int(input(""))
minutes = int(input(""))
number = input("")
print(names)
print(hours)
print(minutes)
print(number)
g = getYMD()
g1 = get_iciba_everyday_chicken_soup()
# number
name = 'http://t.weather.sojson.com/api/weather/city/' + number
# get_sentence
g2 = get_sentence(name)
times = g2['cityInfo']
for key, name in times.items():
city = times['city']
parent = times['parent']
#
time1 = g2['data']
for key, name in time1.items():
shidu = time1['shidu']
pm25 = time1['pm25']
quality = time1['quality']
ganmao = time1['ganmao']
time1 = g2['data']
time2 = time1.get('forecast', '')
time2 = time2[0]
itchat.auto_login(hotReload=True)
for key, name in time2.items():
high = time2['high']
low = time2['low']
fx = time2['fx']
fl = time2['fl']
type = time2['type']
notice = time2['type']
#
users = itchat.search_friends(names) #
userName = users[0]['UserName']
while True:
t = datetime.datetime.now()
t1 = t.strftime('%Y-%m-%d %H:%M:%S')
hour = t.hour
minute = t.minute
second = t.second
print('%d:%d:%d' % (hour, minute, second))
if hour == hours and minute == minutes:
itchat.send_msg("%s" % g, toUserName=userName)
itchat.send_msg('%s' % g1, toUserName=userName)
itchat.send_msg('%s\n'
'%s\n'
'%s\n '
'%s\n'
'%s\n '
'%s\n'
'%s \n'
'PM2.5: %s\n'
'%s \n'
'%s\n'
'%s - %s ' % (parent, city, high, low, fx, fl, shidu, pm25,
quality, ganmao, type, notice), toUserName=userName)
break
else:
time.sleep(5) # 5
continue
itchat.run()
time.sleep(86400)
| 31.320261
| 97
| 0.47788
|
27c607ecb317226ea26c46b6beec6b1d9d516ae8
| 7,892
|
py
|
Python
|
Medicine-manag-django/pharma/views.py
|
DanielDDHM/my-projects-py
|
f6c3af7f6cd61c69234d25c956027e8c7e626470
|
[
"MIT"
] | null | null | null |
Medicine-manag-django/pharma/views.py
|
DanielDDHM/my-projects-py
|
f6c3af7f6cd61c69234d25c956027e8c7e626470
|
[
"MIT"
] | null | null | null |
Medicine-manag-django/pharma/views.py
|
DanielDDHM/my-projects-py
|
f6c3af7f6cd61c69234d25c956027e8c7e626470
|
[
"MIT"
] | null | null | null |
from .models import Dealer
from .models import Employee
from .models import Customer
from .models import Medicine
from .models import Purchase
from django.shortcuts import render
from django.db import IntegrityError
| 28.490975
| 61
| 0.637988
|
27c811e47a423871511471f8e6a47527924900eb
| 202
|
py
|
Python
|
resume/display/views.py
|
Varun789/Profile
|
990818d233ac0279ef4d55641e1e284850bdbfb2
|
[
"BSD-3-Clause"
] | null | null | null |
resume/display/views.py
|
Varun789/Profile
|
990818d233ac0279ef4d55641e1e284850bdbfb2
|
[
"BSD-3-Clause"
] | null | null | null |
resume/display/views.py
|
Varun789/Profile
|
990818d233ac0279ef4d55641e1e284850bdbfb2
|
[
"BSD-3-Clause"
] | null | null | null |
from django.shortcuts import render
from .models import Profile
# Create your views here.
| 22.444444
| 60
| 0.737624
|
27c8bd46f163cd7e8d07a019d3a50bfc0ca3baa3
| 458
|
py
|
Python
|
libStash/books/migrations/0023_auto_20210320_1241.py
|
Dev-Rem/libStash
|
a364e9997c1c91b09f5db8a004deb4df305fa8cf
|
[
"MIT"
] | null | null | null |
libStash/books/migrations/0023_auto_20210320_1241.py
|
Dev-Rem/libStash
|
a364e9997c1c91b09f5db8a004deb4df305fa8cf
|
[
"MIT"
] | null | null | null |
libStash/books/migrations/0023_auto_20210320_1241.py
|
Dev-Rem/libStash
|
a364e9997c1c91b09f5db8a004deb4df305fa8cf
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.0.4 on 2021-03-20 12:41
import phone_field.models
from django.db import migrations
| 22.9
| 104
| 0.637555
|
27c9faa515cbfcb516d2a78da11f8590793a0cac
| 6,912
|
py
|
Python
|
src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py
|
Yulv-git/Model_Inference_Deployment
|
623f9955dfb60fe7af9d17415bfec58fc4c86c1b
|
[
"MIT"
] | 4
|
2022-02-05T14:16:05.000Z
|
2022-03-27T13:35:06.000Z
|
src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py
|
Yulv-git/Model_Inference_Deployment
|
623f9955dfb60fe7af9d17415bfec58fc4c86c1b
|
[
"MIT"
] | null | null | null |
src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py
|
Yulv-git/Model_Inference_Deployment
|
623f9955dfb60fe7af9d17415bfec58fc4c86c1b
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# coding=utf-8
'''
Author: Shuangchi He / Yulv
Email: yulvchi@qq.com
Date: 2022-01-28 14:21:09
Motto: Entities should not be multiplied unnecessarily.
LastEditors: Shuangchi He
LastEditTime: 2022-04-06 11:40:23
FilePath: /Model_Inference_Deployment/src/PyTorch2ONNX/PyTorch2ONNX_Run_in_ONNX_RUNTIME.py
Description: Init from https://pytorch.org/tutorials/advanced/super_resolution_with_onnxruntime.html
Exporting a model from PyTorch to ONNX and running it using ONNX RUNTIME.
'''
import argparse
import os
import numpy as np
from PIL import Image
import torch.nn as nn
import torch.nn.init as init
import torch.utils.model_zoo as model_zoo
import torchvision.transforms as transforms
import onnx
import torch.onnx
import onnxruntime
from utils import check_dir, torchtensor2numpy
# Super Resolution model definition in PyTorch
def PyTorch2ONNX(torch_model, dummy_input_to_model, onnx_save_dir, check_onnx_model=True):
''' Export the model. (PyTorch2ONNX) '''
torch.onnx.export(
torch_model, # model being run.
dummy_input_to_model, # model input (or a tuple for multiple inputs).
onnx_save_dir, # where to save the model (can be a file or file-like object).
export_params=True, # store the trained parameter weights inside the model file.
opset_version=10, # the ONNX version to export the model to.
do_constant_folding=True, # whether to execute constant folding for optimization.
input_names=['input'], # the model's input names.
output_names=['output'], # the model's output names.
dynamic_axes={ # variable length axes.
'input': {0: 'batch_size'},
'output': {0: 'batch_size'}})
if check_onnx_model: # Verify the models structure and confirm that the model has a valid schema.
onnx_model = onnx.load(onnx_save_dir)
onnx.checker.check_model(onnx_model)
def Verify_ONNX_in_ONNX_RUNTIME(onnx_dir, dummy_input_to_model, torch_out):
''' Verify ONNX Runtime and PyTorch are computing the same value for the model. '''
# Create an inference session.
ort_session = onnxruntime.InferenceSession(onnx_dir)
# Compute ONNX Runtime output prediction.
ort_inputs = {ort_session.get_inputs()[0].name: torchtensor2numpy(dummy_input_to_model)}
ort_outs = ort_session.run(None, ort_inputs)
# Compare ONNX Runtime and PyTorch results
np.testing.assert_allclose(torchtensor2numpy(torch_out), ort_outs[0], rtol=1e-03, atol=1e-05)
print("Exported model has been tested with ONNXRuntime, and the result looks good!")
def Run_ONNX_in_ONNX_RUNTIME(onnx_dir, img_path, img_save_path):
''' Running the model on an image using ONNX Runtime. '''
# Take the tensor representing the greyscale resized image.
img = Image.open(img_path)
resize = transforms.Resize([224, 224])
img = resize(img)
img_ycbcr = img.convert('YCbCr')
img_y, img_cb, img_cr = img_ycbcr.split()
to_tensor = transforms.ToTensor()
img_y = to_tensor(img_y)
img_y.unsqueeze_(0)
# Create an inference session.
ort_session = onnxruntime.InferenceSession(onnx_dir)
# Run the ONNX model in ONNX Runtime.
ort_inputs = {ort_session.get_inputs()[0].name: torchtensor2numpy(img_y)}
ort_outs = ort_session.run(None, ort_inputs)
img_out_y = ort_outs[0]
# Get the output image.
img_out_y = Image.fromarray(np.uint8((img_out_y[0] * 255.0).clip(0, 255)[0]), mode='L')
final_img = Image.merge(
"YCbCr", [
img_out_y,
img_cb.resize(img_out_y.size, Image.BICUBIC),
img_cr.resize(img_out_y.size, Image.BICUBIC),
]).convert("RGB")
# Save the image, compare this with the output image from mobile device.
final_img.save(img_save_path)
if __name__ == "__main__":
parse = argparse.ArgumentParser(description='PyTorch2ONNX_Run_in_ONNX_RUNTIME')
parse.add_argument('--img_path', type=str, default='{}/data/cat.jpg'.format(os.path.dirname(os.path.abspath(__file__))))
parse.add_argument('--check_onnx_model', type=bool, default=True)
parse.add_argument('--output_dir', type=str, default='{}/output'.format(os.path.dirname(os.path.abspath(__file__))))
args = parse.parse_args()
check_dir(args.output_dir)
args.onnx_save_dir = '{}/super_resolution.onnx'.format(args.output_dir)
args.img_save_path = '{}/cat_superres_with_ort.jpg'.format(args.output_dir)
main(args)
| 41.638554
| 124
| 0.684172
|
27cc788dc3d49e45198c96fa1cec36fea676e304
| 2,085
|
py
|
Python
|
scripts/dataset.py
|
MarcGroef/deeplearning
|
d1ef095fbe0f7e9b56017808d976efe7502e6b81
|
[
"MIT"
] | null | null | null |
scripts/dataset.py
|
MarcGroef/deeplearning
|
d1ef095fbe0f7e9b56017808d976efe7502e6b81
|
[
"MIT"
] | null | null | null |
scripts/dataset.py
|
MarcGroef/deeplearning
|
d1ef095fbe0f7e9b56017808d976efe7502e6b81
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
from sklearn.model_selection import StratifiedKFold
# Set dataset seed
np.random.seed(seed=842102)
if __name__ == "__main__":
dataset = Dataset();
| 35.948276
| 130
| 0.659472
|
27cf1141da0cf1cbeff01d7fcd33d6536ff17b4d
| 1,962
|
py
|
Python
|
src/python/utils/image.py
|
Lamzigit/manifold_learning
|
f699fe4f25dbabdbc2dc9635c4e654b59806e17d
|
[
"MIT"
] | 10
|
2017-06-14T08:04:44.000Z
|
2021-07-06T07:13:16.000Z
|
src/python/utils/image.py
|
Lamzigit/manifold_learning
|
f699fe4f25dbabdbc2dc9635c4e654b59806e17d
|
[
"MIT"
] | 1
|
2020-11-18T13:08:43.000Z
|
2020-11-18T13:12:39.000Z
|
src/python/utils/image.py
|
Lamzigit/manifold_learning
|
f699fe4f25dbabdbc2dc9635c4e654b59806e17d
|
[
"MIT"
] | 3
|
2017-06-14T08:04:53.000Z
|
2019-11-18T13:21:15.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Thu Oct 15 14:03:52 2015
@author: jemanjohnson
"""
import numpy as np
import matplotlib.pyplot as plt
import os
import scipy.io
from sklearn import preprocessing
from time import time
from sklearn.preprocessing import MinMaxScaler
# Image Reshape Function
def img_as_array(img, gt=False):
"""Takes a N*M*D image
where:
* N - number of rows
* M - number of columns
* D - dimension of data
Returns:
--------
Image as an array with dimensions -
(N*M) by D
"""
if gt == False:
img_array = img.reshape(
img.shape[0]*img.shape[1], img.shape[2])
else:
img_array = img.reshape(
img.shape[0]*img.shape[1])
return img_array
# Image Normalization function
def standardize(data):
"""
Quick function to standardize my data between 0 and 1
"""
return MinMaxScaler().fit_transform(data)
# Define HSI X and y Ground Truth pairing function
def img_gt_idx(img, img_gt, printinfo=False):
"""Takes a flattened image array and
extracts the image indices that correspond
to the ground truth that we have.
"""
# Find the non-zero entries
n_samples = (img_gt>0).sum()
# Find the classification labels
classlabels = np.unique(img_gt[img_gt>0])
# Create X matrix containing the features
X = img[img_gt>0,:]
# Create y matrix containing the labels
y = img_gt[img_gt>0]
# Print out useful information
if printinfo:
print('We have {n} ground-truth samples.'.format(
n=n_samples))
print('The training data includes {n} classes: {classes}'.format(
n=classlabels.size, classes=classlabels.T))
print('Dimensions of matrix X: {sizeX}'.format(sizeX=X.shape))
print('Dimensions of matrix y: {sizey}'.format(sizey=y.shape))
return X, y
#
| 25.480519
| 73
| 0.618756
|
27cf8bbca13f1461fc47b01d2bfbcfa734035197
| 1,767
|
py
|
Python
|
test/finsignia/gae/controllers.py
|
finsignia/finsignia-gae
|
e8cf6a3855cb2844a3a7e113f26b600bd952d371
|
[
"MIT"
] | 1
|
2016-05-08T21:47:10.000Z
|
2016-05-08T21:47:10.000Z
|
test/finsignia/gae/controllers.py
|
finsignia/finsignia-gae
|
e8cf6a3855cb2844a3a7e113f26b600bd952d371
|
[
"MIT"
] | null | null | null |
test/finsignia/gae/controllers.py
|
finsignia/finsignia-gae
|
e8cf6a3855cb2844a3a7e113f26b600bd952d371
|
[
"MIT"
] | null | null | null |
"""
Tests for the finsignia.gae.controllers module.
"""
import os
import sys
from finsignia.gae import loader
import unittest
def test_cases():
return [ApplicationControllerTest, ResourceControllerTest]
if '__main__' == __name__:
unittest.main()
| 29.949153
| 97
| 0.730051
|
27d2e55ac297493daba610855afc860802f2e6c9
| 2,074
|
py
|
Python
|
tests/test_visualize_poll.py
|
UBC-MDS/tweepypoll
|
62ea4ea0ab381eecf8f24bd13da0a0cdfb18eaa6
|
[
"MIT"
] | null | null | null |
tests/test_visualize_poll.py
|
UBC-MDS/tweepypoll
|
62ea4ea0ab381eecf8f24bd13da0a0cdfb18eaa6
|
[
"MIT"
] | 30
|
2022-01-14T17:10:08.000Z
|
2022-02-02T21:17:05.000Z
|
tests/test_visualize_poll.py
|
UBC-MDS/tweepypoll
|
62ea4ea0ab381eecf8f24bd13da0a0cdfb18eaa6
|
[
"MIT"
] | 1
|
2022-01-14T16:10:11.000Z
|
2022-01-14T16:10:11.000Z
|
from tweepypoll.tweepypoll import visualize_poll
import pandas as pd
import altair as alt
def test_visualize_poll():
"""Test visualize_poll on a dictionary input"""
sample_poll_obj = [
{
"text": "Important research!!!",
"duration": 1440,
"date": "2022-01-22T04:01:08.000Z",
"poll options": [
{"position": 1, "label": "Cookies", "votes": 29},
{"position": 2, "label": "Cupcakes", "votes": 5},
{"position": 3, "label": "Donuts", "votes": 24},
{"position": 4, "label": "Ice Cream", "votes": 25},
],
"user": "GregShahade",
"total": 83,
}
]
test_plot = visualize_poll(sample_poll_obj)
# test settings on altair plot
assert isinstance(
test_plot[0], alt.Chart
), "The type of the output mush be a altair chart"
assert (
test_plot[0].encoding.x.shorthand == "votes"
), "The votes should be mapped to the x axis"
assert (
test_plot[0].encoding.y.shorthand == "label"
), "The label should be mapped to the y axis"
assert test_plot[0].mark == "bar", "mark should be a bar"
assert (
test_plot[0].encoding.color.title == "Options"
), "Option should be the legend title"
# check if show_user=True, correct user name is printed
assert sample_poll_obj[0]["user"] == "GregShahade", "The user name is not correct."
# check if show_date=True, correct date and time is printed
assert (
pd.Timestamp(sample_poll_obj[0]["date"]).strftime("%Y-%m-%d %H:%M:%S")
== "2022-01-22 04:01:08"
), "Date and time is not correct."
# check if show_duration=True, correct duration is printed
assert sample_poll_obj[0]["duration"] / 60 == 24.0, "Duration is not correct."
# check if calculated total votes is equal to the input dict
df = pd.DataFrame(sample_poll_obj[0]["poll options"])
assert (
df["votes"].sum() == sample_poll_obj[0]["total"]
), "Total response is not correct."
| 35.152542
| 87
| 0.590646
|
27d3c4c2fd777115e15bd0efa78ebba378f85ab4
| 5,629
|
py
|
Python
|
quilt/avahiservice.py
|
rossdylan/quilt
|
463e3cfe419410b41ee6945ab96d51692d46b036
|
[
"MIT"
] | 2
|
2015-07-23T03:49:42.000Z
|
2015-11-05T18:49:53.000Z
|
quilt/avahiservice.py
|
rossdylan/quilt
|
463e3cfe419410b41ee6945ab96d51692d46b036
|
[
"MIT"
] | null | null | null |
quilt/avahiservice.py
|
rossdylan/quilt
|
463e3cfe419410b41ee6945ab96d51692d46b036
|
[
"MIT"
] | null | null | null |
"""
Avahi Network Service Scripting
"""
import Queue
import threading
import avahi, dbus, gobject
from dbus import DBusException
from dbus.mainloop.glib import DBusGMainLoop
__all__ = ["QuiltAvahiServer", "QuiltAvahiClient"]
TYPE = '_quilt._tcp'
from threading import Thread
| 35.626582
| 105
| 0.551252
|
27d5437b9102d270c520f2be5bfb611ac3f22737
| 612
|
py
|
Python
|
tests/test.py
|
Robert-96/altwalker-appium-example
|
17bb3087d13fed62a4cb98ac0d25b7aa3b8a937d
|
[
"MIT"
] | null | null | null |
tests/test.py
|
Robert-96/altwalker-appium-example
|
17bb3087d13fed62a4cb98ac0d25b7aa3b8a937d
|
[
"MIT"
] | null | null | null |
tests/test.py
|
Robert-96/altwalker-appium-example
|
17bb3087d13fed62a4cb98ac0d25b7aa3b8a937d
|
[
"MIT"
] | null | null | null |
from appium import webdriver
from .utils import PATH
desired_caps = dict(
platformName='Android',
platformVersion='10',
automationName='uiautomator2',
deviceName='Android Emulator',
app=PATH('app/ApiDemos-debug.apk.zip')
)
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
| 19.741935
| 73
| 0.668301
|
27d5f9dbcf40cc145235b4fffe9387c62c414d60
| 6,161
|
py
|
Python
|
tests/pte-onPrem-test-package/scripts/create_SCFile.py
|
gbl1124/hfrd
|
327d7c1e18704d2e31a2649b40ae1d90353ebe24
|
[
"Apache-2.0"
] | 5
|
2019-08-02T20:53:57.000Z
|
2021-06-25T05:16:46.000Z
|
tests/pte-onPrem-test-package/scripts/create_SCFile.py
|
anandbanik/hfrd
|
7bc1f13bfc9c7d902aec0363d27b089ef68c7eec
|
[
"Apache-2.0"
] | null | null | null |
tests/pte-onPrem-test-package/scripts/create_SCFile.py
|
anandbanik/hfrd
|
7bc1f13bfc9c7d902aec0363d27b089ef68c7eec
|
[
"Apache-2.0"
] | 14
|
2019-07-01T01:40:50.000Z
|
2020-03-24T06:14:32.000Z
|
import json
import os
import argparse
HOME = os.environ['HOME']+'/results/'
parser = argparse.ArgumentParser(description="Python script generates the SCFiles using MSPIDs")
parser.add_argument("-m", "--mspids", nargs="+", required=True, help="1 or more MSPIDs")
parser.add_argument("-n", "--networkId", metavar='', required=True, help="Network ID")
args = parser.parse_args()
if __name__ == "__main__":
scFileCreator = SCFileCreator()
| 50.917355
| 127
| 0.603636
|
27d6cff2a09a34968b82d0b674f282b1d2271a34
| 9,721
|
py
|
Python
|
catalogue/forms.py
|
lh00257/superharris
|
cc8794ac6a63fa157ed6d0ef75f5089253ff987d
|
[
"MIT"
] | null | null | null |
catalogue/forms.py
|
lh00257/superharris
|
cc8794ac6a63fa157ed6d0ef75f5089253ff987d
|
[
"MIT"
] | null | null | null |
catalogue/forms.py
|
lh00257/superharris
|
cc8794ac6a63fa157ed6d0ef75f5089253ff987d
|
[
"MIT"
] | null | null | null |
import re #Regular expression library
from django import forms
from django.contrib.auth.models import User
from django.core.exceptions import ObjectDoesNotExist
from django.template import RequestContext
from django.contrib.auth.forms import AuthenticationForm
from catalogue.models import Submitted
from models import GlobularCluster as GC
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Div, Submit, HTML, Button, Row, Field, Fieldset
from crispy_forms.bootstrap import AppendedText, PrependedText, FormActions
#from crispy_forms.bootstrap import InlineField
#class login_page(forms.Form):
# username = forms.CharField(label='Username', max_length=30)
#password = forms.CharField(widget=forms.PasswordInput)
#model = User
#widgets = {
# 'password': forms.PasswordInput(),
#}
| 47.419512
| 110
| 0.632342
|
27d78b89ba7b997214a4c7166893ac8b3158ac3f
| 38,343
|
py
|
Python
|
sgan/models.py
|
peaceminusones/Group-GAN-GCN
|
ff0abf90bb830729d082d1fa46e41c749c738895
|
[
"MIT"
] | 2
|
2021-05-25T09:10:15.000Z
|
2021-09-25T07:53:35.000Z
|
sgan/models.py
|
peaceminusones/Group-GAN-GCN
|
ff0abf90bb830729d082d1fa46e41c749c738895
|
[
"MIT"
] | null | null | null |
sgan/models.py
|
peaceminusones/Group-GAN-GCN
|
ff0abf90bb830729d082d1fa46e41c749c738895
|
[
"MIT"
] | null | null | null |
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
"""
modified by zyl 2021/3/2
"""
# class BatchMultiHeadGraphAttention(nn.Module):
# """
# graph attetion layer(GAL)
# """
# def __init__(self, n_head, f_in, f_out, attn_dropout, bias=True):
# super(BatchMultiHeadGraphAttention, self).__init__()
# self.n_head = n_head
# self.f_in = f_in
# self.f_out = f_out
# self.w = nn.Parameter(torch.Tensor(n_head, f_in, f_out))
# self.a_src = nn.Parameter(torch.Tensor(n_head, f_out, 1))
# self.a_dst = nn.Parameter(torch.Tensor(n_head, f_out, 1))
# self.leaky_relu = nn.LeakyReLU(negative_slope=0.2)
# self.softmax = nn.Softmax(dim=-1)
# self.dropout = nn.Dropout(attn_dropout)
# if bias:
# self.bias = nn.Parameter(torch.Tensor(f_out))
# nn.init.constant_(self.bias, 0)
# else:
# self.register_parameter("bias", None)
# nn.init.xavier_uniform_(self.w, gain=1.414)
# nn.init.xavier_uniform_(self.a_src, gain=1.414)
# nn.init.xavier_uniform_(self.a_dst, gain=1.414)
# def forward(self, h, adj):
# bs, n = h.size()[:2]
# h_prime = torch.matmul(h.unsqueeze(1), self.w)
# attn_src = torch.matmul(h_prime, self.a_src)
# attn_dst = torch.matmul(h_prime, self.a_dst)
# attn = attn_src.expand(-1, -1, -1, n) + attn_dst.expand(-1, -1, -1, n).permute(0, 1, 3, 2)
# attn = self.leaky_relu(attn)
# attn = self.softmax(attn)
# attn = self.dropout(attn)
# attn = torch.matmul(torch.squeeze(attn, dim=0), adj)
# attn = torch.unsqueeze(attn, 0)
# output = torch.matmul(attn, h_prime)
# if self.bias is not None:
# return output + self.bias, attn
# else:
# return output, attn
# def __repr__(self):
# return (
# self.__class__.__name__
# + " ("
# + str(self.n_head)
# + " -> "
# + str(self.f_in)
# + " -> "
# + str(self.f_out)
# + ")"
# )
# """
# modified by zyl 2021/2/6 graph attetion network
# """
# class GAT(nn.Module):
# def __init__(self, n_units, n_heads, dropout=0.2, alpha=0.2):
# super(GAT, self).__init__()
# self.n_layer = len(n_units) - 1
# self.dropout = dropout
# self.layer_stack = nn.ModuleList()
# for i in range(self.n_layer):
# f_in = n_units[i] * n_heads[i - 1] if i else n_units[i]
# self.layer_stack.append(
# BatchMultiHeadGraphAttention(
# n_heads[i], f_in=f_in, f_out=n_units[i + 1], attn_dropout=dropout
# )
# )
# self.norm_list = [
# torch.nn.InstanceNorm1d(32).cuda(),
# torch.nn.InstanceNorm1d(64).cuda(),
# ]
# def forward(self, x, adj):
# bs, n = x.size()[:2]
# for i, gat_layer in enumerate(self.layer_stack):
# # x = self.norm_list[i](x.permute(0, 2, 1)).permute(0, 2, 1)
# x, attn = gat_layer(x, adj)
# if i + 1 == self.n_layer:
# x = x.squeeze(dim=1)
# else:
# x = F.elu(x.contiguous().view(bs, n, -1))
# x = F.dropout(x, self.dropout, training=self.training)
# else:
# return x
# """
# modified by zyl 2021/2/6 graph attetion network encoder
# """
# class GATEncoder(nn.Module):
# def __init__(self, n_units, n_heads, dropout, alpha):
# super(GATEncoder, self).__init__()
# self.gat_intra = GAT([40,72,16], n_heads, dropout, alpha)
# self.gat_inter = GAT([16,72,16], n_heads, dropout, alpha)
# self.out_embedding = nn.Linear(16*2, 24)
# def normalize(self, adj, dim):
# N = adj.size()
# adj2 = torch.sum(adj, dim) #
# norm = adj2.unsqueeze(1).float() #
# norm = norm.pow(-1) #
# norm_adj = adj.mul(norm) #
# return norm_adj
# def forward(self, obs_traj_embedding, seq_start_end, end_pos, end_group):
# graph_embeded_data = []
# for start, end in seq_start_end.data:
# curr_seq_embedding_traj = obs_traj_embedding[:, start:end, :]
# h_states = torch.squeeze(obs_traj_embedding, dim=0)
# num_ped = end - start
# curr_end_group = end_group[start:end]
# eye_mtx = torch.eye(num_ped, device=end_group.device).bool()
# A_g = curr_end_group.repeat(1, num_ped)
# B_g = curr_end_group.transpose(1, 0).repeat(num_ped, 1)
# M_intra = (A_g == B_g) & (A_g != 0) | eye_mtx
# A_intra = self.normalize(M_intra, dim=1).cuda()
# curr_seq_graph_intra = self.gat_intra(curr_seq_embedding_traj, A_intra)
# # print("curr_seq_embedding_traj:", curr_seq_embedding_traj.size())
# # print("curr_seq_graph_intra:", curr_seq_graph_intra.size())
# R_intra_unique = torch.unique(M_intra, sorted=False, dim=0)
# n_group = R_intra_unique.size()[0]
# R_intra_unique.unsqueeze_(1)
# R_intra = []
# for i in range(n_group-1, -1, -1):
# R_intra.append(R_intra_unique[i])
# R_intra = torch.cat(R_intra, dim=0)
# R_intra = self.normalize(R_intra, dim=1).cuda()
# curr_seq_graph_state_in = torch.matmul(R_intra, torch.squeeze(curr_seq_graph_intra, dim=0))
# curr_seq_graph_state_in = torch.unsqueeze(curr_seq_graph_state_in, 0)
# M_inter = torch.ones((n_group, n_group), device=end_group.device).bool()
# A_inter = self.normalize(M_inter, dim=1).cuda()
# curr_seq_graph_out = self.gat_inter(curr_seq_graph_state_in, A_inter)
# curr_seq_graph_inter = torch.matmul(R_intra.T, torch.squeeze(curr_seq_graph_out, dim=0))
# curr_seq_graph_inter = torch.unsqueeze(curr_seq_graph_inter, 0)
# curr_gat_state = torch.cat([curr_seq_graph_intra, curr_seq_graph_inter],dim=2)
# curr_gat_state = torch.squeeze(curr_gat_state, dim=0)
# curr_gat_state = self.out_embedding(curr_gat_state)
# curr_gat_state = torch.unsqueeze(curr_gat_state, 0)
# graph_embeded_data.append(curr_gat_state)
# graph_embeded_data = torch.cat(graph_embeded_data, dim=1)
# return graph_embeded_data
| 38.652218
| 126
| 0.586235
|
27d7ec475999a81872908a78c697615fa5aa0984
| 4,258
|
py
|
Python
|
netflow/mkipfixtypes/ipfixtypes.py
|
kohler/click-packages
|
cec70da7cf460548ef08f1ddad6924db29d5c0c5
|
[
"MIT"
] | 13
|
2015-02-26T23:12:09.000Z
|
2021-04-18T04:37:12.000Z
|
netflow/mkipfixtypes/ipfixtypes.py
|
kohoumas/click-packages
|
6bb5c4ba286e5dbc74efd1708921d530425691f6
|
[
"MIT"
] | null | null | null |
netflow/mkipfixtypes/ipfixtypes.py
|
kohoumas/click-packages
|
6bb5c4ba286e5dbc74efd1708921d530425691f6
|
[
"MIT"
] | 7
|
2015-08-25T09:29:41.000Z
|
2021-04-18T04:37:13.000Z
|
#!/usr/bin/python
#
# Generates ipfixtypes.hh from IPFIX spec and schema
#
# Copyright (c) 2006 Mazu Networks, Inc.
#
# $Id: ipfixtypes.py,v 1.1 2006/05/12 16:43:44 eddietwo Exp $
#
import xml.dom.minidom
import sys
import time
def main():
if len(sys.argv) < 2:
print "Usage: %s [OPTION]... [FILE]..." % sys.argv[0]
sys.exit(0)
dataTypes = {}
fieldTypes = {}
for file in sys.argv[1:]:
spec = IPFIXSpecification(file)
for field in spec.fieldDefinitions():
if dataTypes.has_key(field.dataType):
dataTypes[field.dataType].append(field.name)
else:
dataTypes[field.dataType] = [field.name]
fieldTypes[int(field.fieldId)] = field.name
for dataType in spec.dataTypes():
if not dataTypes.has_key(dataType):
dataTypes[dataType] = []
# IPFIX_unsigned8,
data_types = ["IPFIX_%s" % dataType for dataType in dataTypes]
data_types = ",\n ".join(data_types)
# IPFIX_octetDeltaCount = 1,
field_types = fieldTypes.items()
field_types.sort()
field_types = ["IPFIX_%s = %d" % (name, fieldId) for fieldId, name in field_types]
field_types = ",\n ".join(field_types)
# case IPFIX_octetDeltaCount:
# case IPFIX_packetDeltaCount:
# ...
# return IPFIX_unsigned64;
ipfix_datatypes = []
for dataType, names in dataTypes.iteritems():
if names:
ipfix_datatypes += ["case IPFIX_%s:" % name for name in names]
ipfix_datatypes.append(" return IPFIX_%s;" % dataType)
ipfix_datatypes = "\n ".join(ipfix_datatypes)
# case IPFIX_octetDeltaCount: return "octetDeltaCount";
ipfix_names = ["case IPFIX_%s: return \"%s\";" % \
(name, name) for name in fieldTypes.values()]
ipfix_names = "\n ".join(ipfix_names)
# else if (strcmp(name, "octetDeltaCount") == 0) { return IPFIX_octetDeltaCount; }
ipfix_types = ["else if (strcmp(name, \"%s\") == 0) { return IPFIX_%s; }" % \
(name, name) for name in fieldTypes.values()]
ipfix_types = "\n ".join(ipfix_types)
date = time.asctime()
print """
// DO NOT EDIT. Generated at %(date)s.
#ifndef IPFIXTYPES_HH
#define IPFIXTYPES_HH
CLICK_DECLS
enum IPFIX_dataType {
IPFIX_unknown = 0,
%(data_types)s
};
enum IPFIX_fieldType {
%(field_types)s
};
static inline IPFIX_dataType
ipfix_datatype(uint16_t type) {
switch (type) {
%(ipfix_datatypes)s
}
return IPFIX_unknown;
}
static inline const char *
ipfix_name(uint16_t type) {
switch (type) {
%(ipfix_names)s
}
return "unknown";
}
static inline uint16_t
ipfix_type(const char *name) {
if (0) { }
%(ipfix_types)s
else { return 0; }
}
CLICK_ENDDECLS
#endif
""".strip() % locals()
if __name__ == '__main__':
main()
| 26.447205
| 106
| 0.625881
|
27d8e4bb5627f304929e5b7f3fa1b41d586d410e
| 694
|
py
|
Python
|
core_lib/web_helpers/constants_media_type.py
|
shubham-surya/core-lib
|
543db80706746a937e5ed16bd50f2de8d58b32e4
|
[
"MIT"
] | null | null | null |
core_lib/web_helpers/constants_media_type.py
|
shubham-surya/core-lib
|
543db80706746a937e5ed16bd50f2de8d58b32e4
|
[
"MIT"
] | 9
|
2021-03-11T02:29:17.000Z
|
2022-03-22T19:01:18.000Z
|
core_lib/web_helpers/constants_media_type.py
|
shubham-surya/core-lib
|
543db80706746a937e5ed16bd50f2de8d58b32e4
|
[
"MIT"
] | 2
|
2022-01-27T11:19:00.000Z
|
2022-02-11T11:33:09.000Z
|
import enum
| 34.7
| 69
| 0.729107
|
27da1fb06b835a7c7c1c2845d17975f0ff1c9b74
| 2,940
|
py
|
Python
|
pylons-emlo/emlo/workspace/indexing/src/conversionhelper.py
|
culturesofknowledge/emlo-server
|
8a88ca98a5211086195793e4bed5960550638936
|
[
"MIT"
] | null | null | null |
pylons-emlo/emlo/workspace/indexing/src/conversionhelper.py
|
culturesofknowledge/emlo-server
|
8a88ca98a5211086195793e4bed5960550638936
|
[
"MIT"
] | null | null | null |
pylons-emlo/emlo/workspace/indexing/src/conversionhelper.py
|
culturesofknowledge/emlo-server
|
8a88ca98a5211086195793e4bed5960550638936
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
'''
Created on 24 Aug 2010
@author: Matthew Wilcoxson
functions convert from one value to another in the form:
def conversion(value):
#do something
return new_value
'''
import time
| 30
| 119
| 0.611905
|
27de194719485c100a81b84fd59429f4b32b78e0
| 992
|
py
|
Python
|
modules/wxpy-index/wxpy_index/version.py
|
john04047210/mira_wepy_server
|
385b8561e63f9164102e60681e2704c55fec0577
|
[
"MIT"
] | 1
|
2018-05-22T11:25:59.000Z
|
2018-05-22T11:25:59.000Z
|
modules/wxpy-index/wxpy_index/version.py
|
john04047210/mira_wepy_server
|
385b8561e63f9164102e60681e2704c55fec0577
|
[
"MIT"
] | null | null | null |
modules/wxpy-index/wxpy_index/version.py
|
john04047210/mira_wepy_server
|
385b8561e63f9164102e60681e2704c55fec0577
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2018 QiaoPeng.
#
# Invenio is free software; you can redistribute it
# and/or modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston,
# MA 02111-1307, USA.
"""Version information for Wxpy-Index.
This file is imported by ``wxpy_index.__init__``,
and parsed by ``setup.py``.
"""
from __future__ import absolute_import, print_function
__version__ = '0.1.0.dev20180000'
| 33.066667
| 72
| 0.746976
|
27dfaf52615924607a73e76ca9bec8a17c8c3880
| 11,305
|
py
|
Python
|
estimate.py
|
DS3Lab/feebee
|
eb210d07a7f9956ca2d0681ccf446330c8427a8b
|
[
"Apache-2.0"
] | 1
|
2022-03-24T06:15:37.000Z
|
2022-03-24T06:15:37.000Z
|
estimate.py
|
DS3Lab/feebee
|
eb210d07a7f9956ca2d0681ccf446330c8427a8b
|
[
"Apache-2.0"
] | null | null | null |
estimate.py
|
DS3Lab/feebee
|
eb210d07a7f9956ca2d0681ccf446330c8427a8b
|
[
"Apache-2.0"
] | 1
|
2021-12-20T12:11:55.000Z
|
2021-12-20T12:11:55.000Z
|
from absl import app
from absl import flags
from absl import logging
import csv
import importlib
import numpy as np
import os.path as path
import random
from sklearn.model_selection import train_test_split
import time
from transformations.reader.matrix import test_argument_and_file, load_and_log
import transformations.label_noise as label_noise
import methods.knn as knn
import methods.knn_extrapolate as knn_extrapolate
import methods.ghp as ghp
import methods.kde as kde
import methods.onenn as onenn
import methods.lr_model as lr_model
FLAGS = flags.FLAGS
flags.DEFINE_string("path", ".", "Path to the matrices directory")
flags.DEFINE_string("features_train", None, "Name of the train features numpy matrix exported file (npy)")
flags.DEFINE_string("features_test", None, "Name of the test features numpy matrix exported file (npy)")
flags.DEFINE_string("labels_train", None, "Name of the train labels numpy matrix exported file (npy)")
flags.DEFINE_string("labels_test", None, "Name of the test labels numpy matrix exported file (npy)")
flags.DEFINE_list("noise_levels", [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0], "Run at different noise levels")
flags.DEFINE_integer("noise_runs", 5, "Number of runs for different noise levels")
flags.DEFINE_string("output_file", None, "File to write the output in CSV format (including headers)")
flags.DEFINE_bool("output_overwrite", True, "Writes (if True) or appends (if False) to the specified output file if any")
flags.DEFINE_enum("method", None, ["knn", "knn_loo", "knn_extrapolate", "ghp", "kde_knn_loo", "kde", "onenn", "lr_model"], "Method to estimate the bayes error (results in either 1 value or a lower and upper bound)")
if __name__ == "__main__":
app.run(main)
| 51.153846
| 215
| 0.665369
|
27dfb13b1540ca2ae940981337f040231ef6dd46
| 2,610
|
py
|
Python
|
allmodels_image.py
|
GustavZ/Tensorflow-Object-Detection
|
3aab434b20e510d3953b4265dd73a1c7c315067d
|
[
"MIT"
] | 187
|
2017-12-26T17:41:09.000Z
|
2019-03-06T04:44:25.000Z
|
allmodels_image.py
|
a554142589/realtime_object_detection
|
d2bd7e58df9af1848e473fa7627aa2433192903d
|
[
"MIT"
] | 38
|
2018-02-01T17:05:01.000Z
|
2019-02-15T21:58:25.000Z
|
allmodels_image.py
|
a554142589/realtime_object_detection
|
d2bd7e58df9af1848e473fa7627aa2433192903d
|
[
"MIT"
] | 65
|
2018-01-19T06:03:44.000Z
|
2019-03-06T04:58:31.000Z
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 10 09:45:23 2018
@author: www.github.com/GustavZ
"""
import os
import sys
import numpy as np
from rod.config import Config
from rod.helper import get_model_list, check_if_optimized_model
from rod.model import ObjectDetectionModel, DeepLabModel
ROOT_DIR = os.getcwd()
#MODELS_DIR = os.path.join(ROOT_DIR,'models')
MODELS_DIR = '/home/gustav/workspace/eetfm_automation/nmsspeed_test'
INPUT_TYPE = 'image'
# Read sequentail Models or Gather all Models from models/
config = Config('od')
if config.SEQ_MODELS:
model_names = config.SEQ_MODELS
else:
model_names = get_model_list(MODELS_DIR)
# Sequential testing
for model_name in model_names:
print("> testing model: {}".format(model_name))
# conditionals
optimized=False
single_class=False
# Test Model
if 'hands' in model_name or 'person' in model_name:
single_class=True
if 'deeplab' in model_name:
config = create_test_config('dl',model_name,optimized,single_class)
model = DeepLabModel(config).prepare_model(INPUT_TYPE)
else:
config = create_test_config('od',model_name,optimized,single_class)
model = ObjectDetectionModel(config).prepare_model(INPUT_TYPE)
# Check if there is an optimized graph
model_dir = os.path.join(os.getcwd(),'models',model_name)
optimized = check_if_optimized_model(model_dir)
# Again for the optimized graph
if optimized:
if 'deeplab' in model_name:
config = create_test_config('dl',model_name,optimized,single_class)
model = DeepLabModel(config).prepare_model(INPUT_TYPE)
else:
config = create_test_config('od',model_name,optimized,single_class)
model = ObjectDetectionModel(config).prepare_model(INPUT_TYPE)
model.run()
| 32.222222
| 79
| 0.668966
|
27e04f3e71ee9ae2490b13c55437303fba48ca2d
| 5,953
|
py
|
Python
|
train.py
|
Jing-lun/GPR_3D_Model_Reconstruction
|
24259bdbdf5e993e286e556ee1bae720892a16b9
|
[
"Unlicense"
] | 1
|
2021-09-30T10:22:54.000Z
|
2021-09-30T10:22:54.000Z
|
train.py
|
Jing-lun/GPR_3D_Model_Reconstruction
|
24259bdbdf5e993e286e556ee1bae720892a16b9
|
[
"Unlicense"
] | 1
|
2021-07-23T13:10:58.000Z
|
2021-07-23T13:10:58.000Z
|
train.py
|
Jing-lun/GPR_3D_Model_Reconstruction
|
24259bdbdf5e993e286e556ee1bae720892a16b9
|
[
"Unlicense"
] | null | null | null |
# Copyright 2021, Robotics Lab, City College of New York
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Originating Author: Jinglun Feng, (jfeng1@ccny.cuny.edu)
import argparse
import logging
import os
import sys
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from tqdm import tqdm
from torch.autograd import Variable
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader, random_split
from torchvision.utils import save_image
from model import UNet3D
from utils.data_loader import BasicDataset
from utils.utils import PointLoss
from eval import eval_net
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
args = args_setting()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logging.info(f'Let\'s use {torch.cuda.device_count()} GPUs!')
net = UNet3D(residual='conv')
net = torch.nn.DataParallel(net)
if args.load != '':
net.load_state_dict(
torch.load(args.load, map_location=device)
)
logging.info(f'Model loaded from {args.load}')
logging.info(f'Network Structure:\n'
f'\t{net}\n')
net.to(device=device)
try:
train_net(net=net,
epochs=args.epochs,
batch_size=args.batchsize,
lr=args.lr,
device=device)
except KeyboardInterrupt:
torch.save(net.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 36.975155
| 111
| 0.607425
|
27e1b8a412e18403318f1bb9f3adb67ae8c94d10
| 805
|
py
|
Python
|
Python/1-Fundamentals/Week 2/workshop2/banking_pkg/account.py
|
armirh/Nucamp-SQL-Devops-Training
|
6c2dc5793c732bfb4c4d365acbb346a95fbf4bf2
|
[
"MIT"
] | 2
|
2022-01-19T02:33:11.000Z
|
2022-01-19T02:33:13.000Z
|
Python/1-Fundamentals/Week 2/workshop2/banking_pkg/account.py
|
armirh/Nucamp-SQL-Devops-Training
|
6c2dc5793c732bfb4c4d365acbb346a95fbf4bf2
|
[
"MIT"
] | null | null | null |
Python/1-Fundamentals/Week 2/workshop2/banking_pkg/account.py
|
armirh/Nucamp-SQL-Devops-Training
|
6c2dc5793c732bfb4c4d365acbb346a95fbf4bf2
|
[
"MIT"
] | null | null | null |
import sys
| 23.676471
| 70
| 0.607453
|
27e21695cafe73f0d93a69b097c0c530103f4723
| 2,098
|
py
|
Python
|
scrapers/who.py
|
kollivier/sushi-chef-who-covid-advice
|
cb50d5fdfe992767eff50bc33e323a682752d0b2
|
[
"MIT"
] | null | null | null |
scrapers/who.py
|
kollivier/sushi-chef-who-covid-advice
|
cb50d5fdfe992767eff50bc33e323a682752d0b2
|
[
"MIT"
] | null | null | null |
scrapers/who.py
|
kollivier/sushi-chef-who-covid-advice
|
cb50d5fdfe992767eff50bc33e323a682752d0b2
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
from ricecooker.classes import nodes
| 32.78125
| 86
| 0.597712
|
27e33b028e6c906a2e346f640e4d67536b199914
| 23,817
|
py
|
Python
|
dxtbx/tests/model/experiment/test_experiment_list.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
dxtbx/tests/model/experiment/test_experiment_list.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
dxtbx/tests/model/experiment/test_experiment_list.py
|
jbeilstenedmands/cctbx_project
|
c228fb15ab10377f664c39553d866281358195aa
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import six.moves.cPickle as pickle
from glob import glob
import os
import pytest
from dxtbx.model import Experiment, ExperimentList
from dxtbx.model.experiment_list import ExperimentListFactory, \
ExperimentListDumper, ExperimentListDict
# def test_experimentlist_index(experiment_list):
# # Check the indices of exisiting experiments
# assert experiment_list.index(experiment_list[0]) is 0
# assert experiment_list.index(experiment_list[1]) is 1
# assert experiment_list.index(experiment_list[2]) is 2
# assert experiment_list.index(experiment_list[3]) is 1
# assert experiment_list.index(experiment_list[4]) is 0
# # Check index of non exisiting experiment
# try:
# experiment_list.index(Experiment())
# assert False
# except ValueError:
# pass
| 32.715659
| 89
| 0.719528
|
27e6185cd1321c58ae5c06b94cfd558705c422cd
| 365
|
py
|
Python
|
Divergence analysis/splitreference.py
|
MarniTausen/CloverAnalysisPipeline
|
ae169b46c7be40cdf0d97101480be12df87fc58e
|
[
"Unlicense"
] | 4
|
2018-03-26T08:54:50.000Z
|
2021-07-28T13:34:07.000Z
|
Divergence analysis/splitreference.py
|
MarniTausen/CloverAnalysisPipeline
|
ae169b46c7be40cdf0d97101480be12df87fc58e
|
[
"Unlicense"
] | null | null | null |
Divergence analysis/splitreference.py
|
MarniTausen/CloverAnalysisPipeline
|
ae169b46c7be40cdf0d97101480be12df87fc58e
|
[
"Unlicense"
] | 4
|
2017-10-26T12:59:39.000Z
|
2021-07-12T08:40:56.000Z
|
from sys import argv
if __name__=="__main__":
make_new_reference_files(argv[1], argv[2], argv[3], argv[4])
| 26.071429
| 68
| 0.635616
|
27e766575366ccd5d46f9e7a446bbcc0f07d388e
| 1,323
|
py
|
Python
|
__pg/appids.py
|
briandorsey/cloud-playground
|
6e8ee5fcc6bb4e96bd10dcdf9eda451870c3cc1e
|
[
"Apache-2.0"
] | 5
|
2017-03-02T15:57:44.000Z
|
2020-02-14T05:17:28.000Z
|
__pg/appids.py
|
briandorsey/cloud-playground
|
6e8ee5fcc6bb4e96bd10dcdf9eda451870c3cc1e
|
[
"Apache-2.0"
] | null | null | null |
__pg/appids.py
|
briandorsey/cloud-playground
|
6e8ee5fcc6bb4e96bd10dcdf9eda451870c3cc1e
|
[
"Apache-2.0"
] | 3
|
2017-05-20T11:23:07.000Z
|
2022-01-13T12:00:57.000Z
|
"""Module which defines collaborating app ids.
This module is used by:
settings.py
scripts/deploy.sh
"""
import os
# List of (playground appid, mimic app id, playground app id alias)
_APP_ID_TUPLES = [
# production environment
('try-appengine', 'shared-playground', 'cloud-playground'),
# development environment
('fredsa-bliss', 'fredsa-hr', None),
('dansanderson-bliss', 'dansanderson-mimic', None),
]
# Our app id
_APP_ID = os.environ['APPLICATION_ID'].split('~')[-1]
# support regular 'appspot.com' app ids only
assert ':' not in _APP_ID, ('{} app ids are unsupported'
.format(_APP_ID.split(':')[0]))
app_ids = _GetTupleFor(_APP_ID)
# The application where the playground IDE runs
PLAYGROUND_APP_ID = app_ids[0]
# The application where user code runs
MIMIC_APP_ID = app_ids[1]
# The application alias where the playground IDE runs
PLAYGROUND_APP_ID_ALIAS = app_ids[2]
# Whether we're using two collaborating app ids
TWO_COLLABORATING_APP_IDS = PLAYGROUND_APP_ID != MIMIC_APP_ID
def PrintAppIds():
"""Prints a new line delimited list of known app ids."""
print '\n'.join(set((PLAYGROUND_APP_ID, MIMIC_APP_ID)))
| 25.442308
| 67
| 0.708995
|
27e7f431903fe9377416892525c526c246e0ed24
| 21,183
|
py
|
Python
|
_states/novav21.py
|
NDPF/salt-formula-nova
|
265d9e6c2cbd41d564ee389b210441d9f7378433
|
[
"Apache-2.0"
] | 4
|
2017-04-27T14:27:04.000Z
|
2017-11-04T18:23:09.000Z
|
_states/novav21.py
|
NDPF/salt-formula-nova
|
265d9e6c2cbd41d564ee389b210441d9f7378433
|
[
"Apache-2.0"
] | 22
|
2017-02-01T09:04:52.000Z
|
2019-05-10T09:04:01.000Z
|
_states/novav21.py
|
NDPF/salt-formula-nova
|
265d9e6c2cbd41d564ee389b210441d9f7378433
|
[
"Apache-2.0"
] | 35
|
2017-02-05T23:11:16.000Z
|
2019-04-04T17:21:36.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from six.moves import zip_longest
import time
import salt
from salt.exceptions import CommandExecutionError
LOG = logging.getLogger(__name__)
KEYSTONE_LOADED = False
def __virtual__():
"""Only load if the nova module is in __salt__"""
if 'keystonev3.project_get_details' in __salt__:
global KEYSTONE_LOADED
KEYSTONE_LOADED = True
return 'novav21'
def _get_keystone_project_id_by_name(project_name, cloud_name):
if not KEYSTONE_LOADED:
LOG.error("Keystone module not found, can not look up project ID "
"by name")
return None
project = __salt__['keystonev3.project_get_details'](
project_name, cloud_name=cloud_name)
if not project:
return None
return project['project']['id']
def cell_present(name='cell1', transport_url='none:///', db_engine='mysql',
db_name='nova_upgrade', db_user='nova', db_password=None,
db_address='0.0.0.0'):
"""Ensure nova cell is present
For newly created cells this state also runs discover_hosts and
map_instances."""
cell_info = __salt__['cmd.shell'](
"nova-manage cell_v2 list_cells --verbose | "
"awk '/%s/ {print $4,$6,$8}'" % name).split()
db_connection = (
'%(db_engine)s+pymysql://%(db_user)s:%(db_password)s@'
'%(db_address)s/%(db_name)s?charset=utf8' % {
'db_engine': db_engine, 'db_user': db_user,
'db_password': db_password, 'db_address': db_address,
'db_name': db_name})
args = {'transport_url': transport_url, 'db_connection': db_connection}
# There should be at least 1 component printed to cell_info
if len(cell_info) >= 1:
cell_info = dict(zip_longest(
('cell_uuid', 'existing_transport_url', 'existing_db_connection'),
cell_info))
cell_uuid, existing_transport_url, existing_db_connection = cell_info
command_string = ''
if existing_transport_url != transport_url:
command_string = (
'%s --transport-url %%(transport_url)s' % command_string)
if existing_db_connection != db_connection:
command_string = (
'%s --database_connection %%(db_connection)s' % command_string)
if not command_string:
return _no_change(name, 'Nova cell')
try:
__salt__['cmd.shell'](
('nova-manage cell_v2 update_cell --cell_uuid %s %s' % (
cell_uuid, command_string)) % args)
LOG.warning("Updating the transport_url or database_connection "
"fields on a running system will NOT result in all "
"nodes immediately using the new values. Use caution "
"when changing these values.")
ret = _updated(name, 'Nova cell', args)
except Exception as e:
ret = _update_failed(name, 'Nova cell')
ret['comment'] += '\nException: %s' % e
return ret
args.update(name=name)
try:
cell_uuid = __salt__['cmd.shell'](
'nova-manage cell_v2 create_cell --name %(name)s '
'--transport-url %(transport_url)s '
'--database_connection %(db_connection)s --verbose' % args)
__salt__['cmd.shell']('nova-manage cell_v2 discover_hosts '
'--cell_uuid %s --verbose' % cell_uuid)
__salt__['cmd.shell']('nova-manage cell_v2 map_instances '
'--cell_uuid %s' % cell_uuid)
ret = _created(name, 'Nova cell', args)
except Exception as e:
ret = _create_failed(name, 'Nova cell')
ret['comment'] += '\nException: %s' % e
return ret
def cell_absent(name, force=False):
"""Ensure cell is absent"""
cell_uuid = __salt__['cmd.shell'](
"nova-manage cell_v2 list_cells | awk '/%s/ {print $4}'" % name)
if not cell_uuid:
return _non_existent(name, 'Nova cell')
try:
__salt__['cmd.shell'](
'nova-manage cell_v2 delete_cell --cell_uuid %s %s' % (
cell_uuid, '--force' if force else ''))
ret = _deleted(name, 'Nova cell')
except Exception as e:
ret = _delete_failed(name, 'Nova cell')
ret['comment'] += '\nException: %s' % e
return ret
def _db_version_update(db, version, human_readable_resource_name):
existing_version = __salt__['cmd.shell'](
'nova-manage %s version 2>/dev/null' % db)
try:
existing_version = int(existing_version)
version = int(version)
except Exception as e:
ret = _update_failed(existing_version,
human_readable_resource_name)
ret['comment'] += ('\nCan not convert existing or requested version '
'to integer, exception: %s' % e)
LOG.error(ret['comment'])
return ret
if existing_version < version:
try:
__salt__['cmd.shell'](
'nova-manage %s sync --version %s' % (db, version))
ret = _updated(existing_version, human_readable_resource_name,
{db: '%s sync --version %s' % (db, version)})
except Exception as e:
ret = _update_failed(existing_version,
human_readable_resource_name)
ret['comment'] += '\nException: %s' % e
return ret
return _no_change(existing_version, human_readable_resource_name)
def api_db_version_present(name=None, version="20"):
"""Ensures that specific api_db version is present"""
return _db_version_update('api_db', version, 'Nova API database version')
def db_version_present(name=None, version="334"):
"""Ensures that specific db version is present"""
return _db_version_update('db', version, 'Nova database version')
def online_data_migrations_present(name=None, api_db_version="20",
db_version="334"):
"""Runs online_data_migrations if databases are of specific versions"""
ret = {'name': 'online_data_migrations', 'changes': {}, 'result': False,
'comment': 'Current nova api_db version != {0} or nova db version '
'!= {1}.'.format(api_db_version, db_version)}
cur_api_db_version = __salt__['cmd.shell'](
'nova-manage api_db version 2>/dev/null')
cur_db_version = __salt__['cmd.shell'](
'nova-manage db version 2>/dev/null')
try:
cur_api_db_version = int(cur_api_db_version)
cur_db_version = int(cur_db_version)
api_db_version = int(api_db_version)
db_version = int(db_version)
except Exception as e:
LOG.error(ret['comment'])
ret['comment'] = ('\nCan not convert existing or requested database '
'versions to integer, exception: %s' % e)
return ret
if cur_api_db_version == api_db_version and cur_db_version == db_version:
try:
__salt__['cmd.shell']('nova-manage db online_data_migrations')
ret['result'] = True
ret['comment'] = ('nova-manage db online_data_migrations was '
'executed successfuly')
ret['changes']['online_data_migrations'] = (
'online_data_migrations run on nova api_db version {0} and '
'nova db version {1}'.format(api_db_version, db_version))
except Exception as e:
ret['comment'] = (
'Failed to execute online_data_migrations on nova api_db '
'version %s and nova db version %s, exception: %s' % (
api_db_version, db_version, e))
return ret
def _find_failed(name, resource):
return {
'name': name, 'changes': {}, 'result': False,
'comment': 'Failed to find {0}s with name {1}'.format(resource, name)}
def _created(name, resource, changes):
return {
'name': name, 'changes': changes, 'result': True,
'comment': '{0} {1} created'.format(resource, name)}
def _create_failed(name, resource):
return {
'name': name, 'changes': {}, 'result': False,
'comment': '{0} {1} creation failed'.format(resource, name)}
def _no_change(name, resource):
return {
'name': name, 'changes': {}, 'result': True,
'comment': '{0} {1} already is in the desired state'.format(
resource, name)}
def _updated(name, resource, changes):
return {
'name': name, 'changes': changes, 'result': True,
'comment': '{0} {1} was updated'.format(resource, name)}
def _update_failed(name, resource):
return {
'name': name, 'changes': {}, 'result': False,
'comment': '{0} {1} update failed'.format(resource, name)}
def _deleted(name, resource):
return {
'name': name, 'changes': {}, 'result': True,
'comment': '{0} {1} deleted'.format(resource, name)}
def _delete_failed(name, resource):
return {
'name': name, 'changes': {}, 'result': False,
'comment': '{0} {1} deletion failed'.format(resource, name)}
def _non_existent(name, resource):
return {
'name': name, 'changes': {}, 'result': True,
'comment': '{0} {1} does not exist'.format(resource, name)}
| 39.155268
| 84
| 0.626729
|
27ea8174cb81713dd5c70d96704d5a2c63cec32e
| 325
|
py
|
Python
|
server/dev.py
|
Khanable/Photography-Portfolio-Website
|
5019e8316e078dcb672888dd847fdd6b732443a9
|
[
"MIT"
] | null | null | null |
server/dev.py
|
Khanable/Photography-Portfolio-Website
|
5019e8316e078dcb672888dd847fdd6b732443a9
|
[
"MIT"
] | null | null | null |
server/dev.py
|
Khanable/Photography-Portfolio-Website
|
5019e8316e078dcb672888dd847fdd6b732443a9
|
[
"MIT"
] | null | null | null |
from sys import modules
from importlib import import_module
modules['server'] = import_module('src')
from werkzeug.serving import run_simple
from server.app import App
from server.mode import Mode
if __name__=='__main__':
app = App(mode=Mode.Development)
run_simple('localhost', 8000, app, use_reloader=True)
| 27.083333
| 55
| 0.76
|
27ed7774eba9356593529c7a047bb6eafaebca6b
| 6,891
|
py
|
Python
|
src/pyff/fetch.py
|
rhoerbe/pyFF
|
85933ed9cc9f720c9432d5e4c3114895cefd3579
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/pyff/fetch.py
|
rhoerbe/pyFF
|
85933ed9cc9f720c9432d5e4c3114895cefd3579
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
src/pyff/fetch.py
|
rhoerbe/pyFF
|
85933ed9cc9f720c9432d5e4c3114895cefd3579
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
"""
An abstraction layer for metadata fetchers. Supports both syncronous and asyncronous fetchers with cache.
"""
from .logs import get_log
import os
import requests
from .constants import config
from datetime import datetime
from collections import deque
import six
from concurrent import futures
import traceback
from .parse import parse_resource
from itertools import chain
from .exceptions import ResourceException
from .utils import url_get
from copy import deepcopy, copy
if six.PY2:
from UserDict import DictMixin as ResourceManagerBase
elif six.PY3:
from collections import MutableMapping as ResourceManagerBase
requests.packages.urllib3.disable_warnings()
log = get_log(__name__)
| 29.702586
| 108
| 0.565085
|
27effbc79d2bf6543199f4b75da0205988092da4
| 498
|
py
|
Python
|
pychonet/HomeSolarPower.py
|
mochipon/pychonet
|
65ba4189f9a66b6e698646854542cdd506369813
|
[
"MIT"
] | null | null | null |
pychonet/HomeSolarPower.py
|
mochipon/pychonet
|
65ba4189f9a66b6e698646854542cdd506369813
|
[
"MIT"
] | null | null | null |
pychonet/HomeSolarPower.py
|
mochipon/pychonet
|
65ba4189f9a66b6e698646854542cdd506369813
|
[
"MIT"
] | null | null | null |
from pychonet.EchonetInstance import EchonetInstance
| 35.571429
| 79
| 0.728916
|
27f5b22f4011155a67ce267a26bf5d2d27c8298e
| 6,955
|
py
|
Python
|
adlmagics/adlmagics/adlmagics_main.py
|
Azure/Azure-Data-Service-Notebook
|
6bd28587c9fa0a7c1f9113f638b790b1773c5585
|
[
"MIT"
] | 6
|
2018-06-06T08:37:53.000Z
|
2020-06-01T13:13:13.000Z
|
adlmagics/adlmagics/adlmagics_main.py
|
Azure/Azure-Data-Service-Notebook
|
6bd28587c9fa0a7c1f9113f638b790b1773c5585
|
[
"MIT"
] | 30
|
2018-06-08T02:47:18.000Z
|
2018-07-25T07:07:07.000Z
|
adlmagics/adlmagics/adlmagics_main.py
|
Azure/Azure-Data-Service-Notebook
|
6bd28587c9fa0a7c1f9113f638b790b1773c5585
|
[
"MIT"
] | 5
|
2018-06-06T08:37:55.000Z
|
2021-01-07T09:15:15.000Z
|
from IPython.core.magic import Magics, magics_class, line_cell_magic
from sys import stdout
from os import linesep
from os.path import join, expanduser
from adlmagics.version import adlmagics_version
from adlmagics.converters.dataframe_converter import DataFrameConverter
from adlmagics.utils.json_file_persister import JsonFilePersister
from adlmagics.utils.ipshell_result_receiver import IPShellResultReceiver
from adlmagics.presenters.presenter_base import PresenterBase
from adlmagics.presenters.text_presenter import TextPresenter
from adlmagics.presenters.adla_job_presenter import AdlaJobPresenter
from adlmagics.presenters.adla_jobs_presenter import AdlaJobsPresenter
from adlmagics.presenters.adls_files_presenter import AdlsFilesPresenter
from adlmagics.presenters.adls_folders_presenter import AdlsFoldersPresenter
from adlmagics.services.azure_token_service import AzureTokenService
from adlmagics.services.adla_service_sdk_impl import AdlaServiceSdkImpl
from adlmagics.services.adls_service_sdk_impl import AdlsServiceSdkImpl
from adlmagics.services.session_service import SessionService
from adlmagics.services.presenter_factory import PresenterFactory
from adlmagics.magics.session.session_magic_base import SessionMagicBase
from adlmagics.magics.session.session_viewing_magic import SessionViewingMagic
from adlmagics.magics.session.session_item_setting_magic import SessionItemSettingMagic
from adlmagics.magics.azure.azure_magic_base import AzureMagicBase
from adlmagics.magics.azure.azure_login_magic import AzureLoginMagic
from adlmagics.magics.azure.azure_logout_magic import AzureLogoutMagic
from adlmagics.magics.adla.adla_magic_base import AdlaMagicBase
from adlmagics.magics.adla.adla_accounts_listing_magic import AdlaAccountsListingMagic
from adlmagics.magics.adla.adla_job_viewing_magic import AdlaJobViewingMagic
from adlmagics.magics.adla.adla_job_submission_magic import AdlaJobSubmissionMagic
from adlmagics.magics.adla.adla_jobs_listing_magic import AdlaJobsListingMagic
from adlmagics.magics.adls.adls_magic_base import AdlsMagicBase
from adlmagics.magics.adls.adls_accounts_listing_magic import AdlsAccountsListingMagic
from adlmagics.magics.adls.adls_folders_listing_magic import AdlsFoldersListingMagic
from adlmagics.magics.adls.adls_files_listing_magic import AdlsFilesListingMagic
from adlmagics.magics.adls.adls_file_sampling_magic import AdlsFileSamplingMagic
| 48.298611
| 168
| 0.78404
|
27f6676280bfbc46f5ea3961bee24ccfef845e05
| 10,137
|
py
|
Python
|
metadata_service/api/dashboard.py
|
iiAnderson/metaflow-service
|
b42391e5ee2187a93259b944c515522d76b1314e
|
[
"Apache-2.0"
] | null | null | null |
metadata_service/api/dashboard.py
|
iiAnderson/metaflow-service
|
b42391e5ee2187a93259b944c515522d76b1314e
|
[
"Apache-2.0"
] | null | null | null |
metadata_service/api/dashboard.py
|
iiAnderson/metaflow-service
|
b42391e5ee2187a93259b944c515522d76b1314e
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import json
from datetime import datetime, timedelta
from aiohttp import web
from .utils import read_body, get_week_times, get_formatted_time
from ..data.models import RunRow
from ..data.postgres_async_db import AsyncPostgresDB
import logging
| 32.805825
| 111
| 0.56792
|
27f693df0e7ea237223f8c2bc9de9a57a4f98dac
| 838
|
py
|
Python
|
tests/test_report.py
|
whalebot-helmsman/pykt-64
|
ee5e0413cd850876d3abc438480fffea4f7b7517
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_report.py
|
whalebot-helmsman/pykt-64
|
ee5e0413cd850876d3abc438480fffea4f7b7517
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_report.py
|
whalebot-helmsman/pykt-64
|
ee5e0413cd850876d3abc438480fffea4f7b7517
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
from setup_teardown import start_db, stop_db
from nose.tools import *
from pykt import KyotoTycoon, KTException
| 21.487179
| 44
| 0.643198
|
27f6d38ee1079239114141527da38c16b3c99951
| 1,024
|
py
|
Python
|
src/Screen.py
|
D3r3k23/CaveRun
|
27f7b3c518f8646bc506f5d3b774ef6e62faef96
|
[
"MIT"
] | 1
|
2022-02-10T04:42:04.000Z
|
2022-02-10T04:42:04.000Z
|
src/Screen.py
|
D3r3k23/CaveRun
|
27f7b3c518f8646bc506f5d3b774ef6e62faef96
|
[
"MIT"
] | null | null | null |
src/Screen.py
|
D3r3k23/CaveRun
|
27f7b3c518f8646bc506f5d3b774ef6e62faef96
|
[
"MIT"
] | 1
|
2022-01-11T17:11:44.000Z
|
2022-01-11T17:11:44.000Z
|
import Resources
import Colors
import pygame
screen = None
# Base class for drawable objects
# Created from image and coordinates, stores image and rect
| 21.333333
| 90
| 0.640625
|
27f931503927cf87b2047c06d44bfc6dbb23b7c2
| 5,416
|
py
|
Python
|
manga_db/extractor/toonily.py
|
nilfoer/mangadb
|
860d7de310002735631ea26810b4df5b6bc08d7b
|
[
"MIT"
] | 3
|
2021-01-14T16:22:41.000Z
|
2022-02-21T03:31:22.000Z
|
manga_db/extractor/toonily.py
|
nilfoer/mangadb
|
860d7de310002735631ea26810b4df5b6bc08d7b
|
[
"MIT"
] | 13
|
2021-01-14T10:34:19.000Z
|
2021-05-20T08:47:54.000Z
|
manga_db/extractor/toonily.py
|
nilfoer/mangadb
|
860d7de310002735631ea26810b4df5b6bc08d7b
|
[
"MIT"
] | 1
|
2022-02-24T03:10:04.000Z
|
2022-02-24T03:10:04.000Z
|
import re
import datetime
import bs4
from typing import Dict, Tuple, Optional, TYPE_CHECKING, ClassVar, Pattern, cast, Match, Any
from .base import BaseMangaExtractor, MangaExtractorData
from ..constants import STATUS_IDS, CENSOR_IDS
if TYPE_CHECKING:
from ..ext_info import ExternalInfo
| 37.351724
| 111
| 0.605613
|
27fb6ab9dc39790c3dcbcf43be391bd869cc5d49
| 10,965
|
py
|
Python
|
blindbackup/providers/blindfs.py
|
nagylzs/blindbackup
|
fa0c7a6ef42bb5aefec99eff69a3227c8695fdd9
|
[
"Apache-2.0"
] | 1
|
2020-01-26T05:46:14.000Z
|
2020-01-26T05:46:14.000Z
|
blindbackup/providers/blindfs.py
|
nagylzs/blindbackup
|
fa0c7a6ef42bb5aefec99eff69a3227c8695fdd9
|
[
"Apache-2.0"
] | null | null | null |
blindbackup/providers/blindfs.py
|
nagylzs/blindbackup
|
fa0c7a6ef42bb5aefec99eff69a3227c8695fdd9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
import os.path
import threading
from .. import cryptfile
from ..util import *
from ..client import create_client
from ..syncdir import FsProvider, FsListener
| 38.882979
| 121
| 0.529503
|
27fd6c6f828a7e94f81f249d959e7e48fffdae85
| 3,587
|
py
|
Python
|
examples/computer_vision/harris.py
|
parag-hub/arrayfire-python
|
65040c10833506f212f13e5bcc0e49cb20645e6e
|
[
"BSD-3-Clause"
] | 420
|
2015-07-30T00:02:21.000Z
|
2022-03-28T16:52:28.000Z
|
examples/computer_vision/harris.py
|
parag-hub/arrayfire-python
|
65040c10833506f212f13e5bcc0e49cb20645e6e
|
[
"BSD-3-Clause"
] | 198
|
2015-07-29T17:17:36.000Z
|
2022-01-20T18:31:28.000Z
|
examples/computer_vision/harris.py
|
parag-hub/arrayfire-python
|
65040c10833506f212f13e5bcc0e49cb20645e6e
|
[
"BSD-3-Clause"
] | 75
|
2015-07-29T15:17:54.000Z
|
2022-02-24T06:50:23.000Z
|
#!/usr/bin/env python
#######################################################
# Copyright (c) 2018, ArrayFire
# All rights reserved.
#
# This file is distributed under 3-clause BSD license.
# The complete license agreement can be obtained at:
# http://arrayfire.com/licenses/BSD-3-Clause
########################################################
from time import time
import arrayfire as af
import os
import sys
if __name__ == "__main__":
if (len(sys.argv) > 1):
af.set_device(int(sys.argv[1]))
console = (sys.argv[2] == '-') if len(sys.argv) > 2 else False
af.info()
print("** ArrayFire Harris Corner Detector Demo **\n")
harris_demo(console)
| 28.927419
| 85
| 0.606078
|
7e0022ad51ef52a75fd8fa97ecb5ea7bdfaf493d
| 4,376
|
py
|
Python
|
tests/generate_data.py
|
ngounou92/py-glm
|
83081444e2cbba4d94f9e6b85b6be23e0ff600b8
|
[
"BSD-3-Clause"
] | 127
|
2017-09-01T13:54:43.000Z
|
2022-03-12T11:43:32.000Z
|
tests/generate_data.py
|
cscherrer/py-glm
|
d719d29fb5cc71c2cb5e728db36c6230a69292d8
|
[
"BSD-3-Clause"
] | 8
|
2017-09-01T14:00:55.000Z
|
2020-11-09T14:42:50.000Z
|
tests/generate_data.py
|
cscherrer/py-glm
|
d719d29fb5cc71c2cb5e728db36c6230a69292d8
|
[
"BSD-3-Clause"
] | 35
|
2017-09-01T19:23:04.000Z
|
2022-03-22T13:45:10.000Z
|
import numpy as np
from scipy.linalg import sqrtm
from sklearn.preprocessing import StandardScaler
| 41.67619
| 81
| 0.658364
|
7e0285965f79d7e3cf86a7275a5d19452f38b750
| 1,735
|
py
|
Python
|
scripts/http-server.py
|
jrbenito/SonoffDIY-tasmotizer
|
1fe9eb9b3b5630102feaf941bd02173d916e81a5
|
[
"MIT"
] | null | null | null |
scripts/http-server.py
|
jrbenito/SonoffDIY-tasmotizer
|
1fe9eb9b3b5630102feaf941bd02173d916e81a5
|
[
"MIT"
] | 3
|
2020-03-30T14:07:54.000Z
|
2020-03-30T22:59:29.000Z
|
scripts/http-server.py
|
jrbenito/SonoffDIY-tasmotizer
|
1fe9eb9b3b5630102feaf941bd02173d916e81a5
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
"""
fake-registration-server.py
Created by nano on 2018-11-22.
Copyright (c) 2018 VTRUST. All rights reserved.
"""
import tornado.web
import tornado.locks
from tornado.options import define, options, parse_command_line
define("port", default=80, help="run on the given port", type=int)
define("addr", default="192.168.254.1", help="run on the given ip", type=str)
define("debug", default=True, help="run in debug mode")
import os
import signal
signal.signal(signal.SIGINT, exit_cleanly)
from base64 import b64encode
import hashlib
import hmac
import binascii
from time import time
timestamp = lambda : int(time())
def main():
parse_command_line()
app = tornado.web.Application(
[
(r"/", MainHandler),
('/files/(.*)', FilesHandler, {'path': str('../files/')}),
(r".*", tornado.web.RedirectHandler, {"url": "http://" + options.addr + "/", "permanent": False}),
],
debug=options.debug,
)
try:
app.listen(options.port, options.addr)
print("Listening on " + options.addr + ":" + str(options.port))
tornado.ioloop.IOLoop.current().start()
except OSError as err:
print("Could not start server on port " + str(options.port))
if err.errno == 98: # EADDRINUSE
print("Close the process on this port and try again")
else:
print(err)
if __name__ == "__main__":
main()
| 25.144928
| 101
| 0.702594
|
7e03585ae9ededa10d0e3ad01e0e054a8d2b1e4e
| 1,998
|
py
|
Python
|
tests/summary/test_binning_config.py
|
rob-tay/fast-carpenter
|
a8b128ba00b9a6808b2f0de40cefa2a360466897
|
[
"Apache-2.0"
] | null | null | null |
tests/summary/test_binning_config.py
|
rob-tay/fast-carpenter
|
a8b128ba00b9a6808b2f0de40cefa2a360466897
|
[
"Apache-2.0"
] | null | null | null |
tests/summary/test_binning_config.py
|
rob-tay/fast-carpenter
|
a8b128ba00b9a6808b2f0de40cefa2a360466897
|
[
"Apache-2.0"
] | null | null | null |
import numpy as np
import fast_carpenter.summary.binning_config as mgr
from . import dummy_binning_descriptions as binning
| 33.864407
| 116
| 0.68018
|
7e037b73adcc0dc266a78c21e1147b45fea5e505
| 671
|
py
|
Python
|
create_ITIs.py
|
daanvanes/mloc_exp
|
bf6fb94b933f1cb78c60c38f80f03c78e9da3686
|
[
"MIT"
] | 1
|
2019-03-20T15:12:07.000Z
|
2019-03-20T15:12:07.000Z
|
create_ITIs.py
|
daanvanes/mloc_exp
|
bf6fb94b933f1cb78c60c38f80f03c78e9da3686
|
[
"MIT"
] | null | null | null |
create_ITIs.py
|
daanvanes/mloc_exp
|
bf6fb94b933f1cb78c60c38f80f03c78e9da3686
|
[
"MIT"
] | null | null | null |
from __future__ import division
from constants import *
import numpy as np
import os
precueITIs = np.random.exponential(standard_parameters['mean_iti_precue'], standard_parameters['n_targets']) + standard_parameters['min_iti_precue']
np.save('ITIs/precueITIs.npy',precueITIs)
postcueITIs = np.random.exponential(standard_parameters['mean_iti_postcue'], standard_parameters['n_targets']) + standard_parameters['min_iti_postcue']
np.save('ITIs/postcueITIs.npy',postcueITIs)
spITIs = np.round(np.random.exponential(standard_parameters['mean_iti_sp'], standard_parameters['n_targets']) + standard_parameters['min_iti_sp']).astype('int32')
np.save('ITIs/spITIs.npy',spITIs)
| 51.615385
| 162
| 0.81073
|
7e040cb0e0c724ec734deffc7ed5a19d9e7e9d15
| 99
|
py
|
Python
|
venv/lib/python3.7/site-packages/kdlearn/myfunctions.py
|
FillOverFlow/kdlearn
|
1e57895cb10ca903a33e2774986661b9b64d4071
|
[
"MIT"
] | 1
|
2021-01-19T03:35:20.000Z
|
2021-01-19T03:35:20.000Z
|
venv/lib/python3.7/site-packages/kdlearn/myfunctions.py
|
FillOverFlow/kdlearn
|
1e57895cb10ca903a33e2774986661b9b64d4071
|
[
"MIT"
] | null | null | null |
venv/lib/python3.7/site-packages/kdlearn/myfunctions.py
|
FillOverFlow/kdlearn
|
1e57895cb10ca903a33e2774986661b9b64d4071
|
[
"MIT"
] | null | null | null |
'''
PUT FUNCTION HERE !!
Author Davinci
'''
| 9.9
| 29
| 0.626263
|
7e0480f047709048b68affbe1e229fbea8aaa94b
| 4,122
|
py
|
Python
|
Set_ADT/linearset.py
|
jaeheeLee17/DS_and_Algorithms_summary
|
917500dd768eae8cfbb02cf2838d494cb720f1c0
|
[
"MIT"
] | null | null | null |
Set_ADT/linearset.py
|
jaeheeLee17/DS_and_Algorithms_summary
|
917500dd768eae8cfbb02cf2838d494cb720f1c0
|
[
"MIT"
] | null | null | null |
Set_ADT/linearset.py
|
jaeheeLee17/DS_and_Algorithms_summary
|
917500dd768eae8cfbb02cf2838d494cb720f1c0
|
[
"MIT"
] | null | null | null |
# Implementation of the Set ADT container using a Python list.
# An iterator for the Set ADT.
| 31.707692
| 71
| 0.563561
|
7e0968601bb493a7e6ab7c62ca33e94de63a37f6
| 123
|
py
|
Python
|
src/apps/buttons/apps.py
|
GoddessEyes/info_tbot
|
c7c5c818dc0c0c72aa15e6e4a85e7e28b4a7660d
|
[
"MIT"
] | null | null | null |
src/apps/buttons/apps.py
|
GoddessEyes/info_tbot
|
c7c5c818dc0c0c72aa15e6e4a85e7e28b4a7660d
|
[
"MIT"
] | 4
|
2021-03-19T02:42:10.000Z
|
2021-09-22T19:08:09.000Z
|
src/apps/buttons/apps.py
|
GoddessEyes/info_tbot
|
c7c5c818dc0c0c72aa15e6e4a85e7e28b4a7660d
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
| 17.571429
| 33
| 0.731707
|
7e09cc367a70ac9496d060fdad8e3eb6e83f2472
| 141
|
py
|
Python
|
code/tenka1_2019_c_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | 3
|
2019-08-16T16:55:48.000Z
|
2021-04-11T10:21:40.000Z
|
code/tenka1_2019_c_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
code/tenka1_2019_c_01.py
|
KoyanagiHitoshi/AtCoder
|
731892543769b5df15254e1f32b756190378d292
|
[
"MIT"
] | null | null | null |
input()
S=input()
dot=S.count(".")
ans=dot
count=0
for s in S:
if s=="#":count+=1
else:dot-=1
ans=(min(ans,count+dot))
print(ans)
| 14.1
| 28
| 0.574468
|
7e0a3148033e56abb61f66e7e257ace62456c980
| 2,932
|
py
|
Python
|
app/billing/views.py
|
flaviogf/finance
|
86a74e1eea6b19d7fe8c311eb77394a267e26432
|
[
"MIT"
] | null | null | null |
app/billing/views.py
|
flaviogf/finance
|
86a74e1eea6b19d7fe8c311eb77394a267e26432
|
[
"MIT"
] | null | null | null |
app/billing/views.py
|
flaviogf/finance
|
86a74e1eea6b19d7fe8c311eb77394a267e26432
|
[
"MIT"
] | null | null | null |
from flask import (Blueprint, abort, flash, redirect, render_template, request,
url_for)
from flask_login import current_user, login_required
from app import db
from app.billing.forms import CreateBillingForm
from app.models import Billing
from sqlalchemy import desc
billing = Blueprint('billing', __name__)
| 27.660377
| 96
| 0.664734
|
7e0b8779363fd91f6026918cffc7f561df56bcf8
| 9,120
|
py
|
Python
|
flickipedia/mashup.py
|
rfaulkner/Flickipedia
|
1b53f30be4027901748a09c411d568c7148f4e4b
|
[
"BSD-2-Clause"
] | 1
|
2016-03-11T09:40:19.000Z
|
2016-03-11T09:40:19.000Z
|
flickipedia/mashup.py
|
rfaulkner/Flickipedia
|
1b53f30be4027901748a09c411d568c7148f4e4b
|
[
"BSD-2-Clause"
] | 1
|
2015-02-27T02:23:19.000Z
|
2015-02-27T02:23:19.000Z
|
flickipedia/mashup.py
|
rfaulkner/Flickipedia
|
1b53f30be4027901748a09c411d568c7148f4e4b
|
[
"BSD-2-Clause"
] | null | null | null |
"""
Author: Ryan Faulkner
Date: October 19th, 2014
Container for mashup logic.
"""
import json
import random
from sqlalchemy.orm.exc import UnmappedInstanceError
from flickipedia.redisio import DataIORedis
from flickipedia.model.articles import ArticleModel, ArticleContentModel
from flickipedia.config import log, settings
from flickipedia.model.likes import LikeModel
from flickipedia.model.exclude import ExcludeModel
from flickipedia.model.photos import PhotoModel
from flickipedia.parse import parse_strip_elements, parse_convert_links, \
handle_photo_integrate, format_title_link, add_formatting_generic
def get_article_count():
"""
Fetch total article count
:return: int; total count of articles
"""
DataIORedis().connect()
# Fetch article count from redis (query from DB if not present)
# Refresh according to config for rate
article_count = DataIORedis().read(settings.ARTICLE_COUNT_KEY)
if not article_count \
or random.randint(1, settings.ARTICLE_COUNT_REFRESH_RATE) == 1 \
or article_count < settings.MYSQL_MAX_ROWS:
with ArticleModel() as am:
article_count = am.get_article_count()
DataIORedis().write(settings.ARTICLE_COUNT_KEY, article_count)
return int(article_count)
def get_max_article_id():
"""
Fetch the maximum article ID
:return: int; maximum id from article meta
"""
max_aid = DataIORedis().read(settings.MAX_ARTICLE_ID_KEY)
if not max_aid \
or random.randint(1, settings.ARTICLE_MAXID_REFRESH_RATE) == 1:
with ArticleModel() as am:
max_aid = am.get_max_id()
DataIORedis().write(settings.MAX_ARTICLE_ID_KEY, max_aid)
return max_aid
def get_article_stored_body(article):
"""
Fetch corresponding article object
:param article: str; article name
:return: json, Article; stored page content, corresponding
article model object
"""
with ArticleModel() as am:
article_obj = am.get_article_by_name(article)
try:
with ArticleContentModel() as acm:
body = acm.get_article_content(article_obj._id).markup
except Exception as e:
log.info('Article markup not found: "%s"' % e.message)
body = ''
return body
def get_wiki_content(article):
"""
Retrieve the wiki content from the mediawiki API
:param article: str; article name
:return: Wikipedia; mediawiki api response object
"""
pass
def get_flickr_photos(flickr_json):
"""
Retrience Flickr photo content from Flickr API
:param article: str; article name
:return: list; list of Flickr photo json
"""
photos = []
for i in xrange(settings.NUM_PHOTOS_TO_FETCH):
try:
photos.append(
{
'owner': flickr_json['photos']['photo'][i]['owner'],
'photo_id': flickr_json['photos']['photo'][i]['id'],
'farm': flickr_json['photos']['photo'][i]['farm'],
'server': flickr_json['photos']['photo'][i]['server'],
'title': flickr_json['photos']['photo'][i]['title'],
'secret': flickr_json['photos']['photo'][i]['secret'],
},
)
except (IndexError, KeyError) as e:
log.error('No more photos to process for: - "%s"' % (e.message))
log.debug('Photo info: %s' % (str(photos)))
return photos
def manage_article_storage(max_article_id, article_count):
"""
Handle the storage of new articles
:param max_article_id: int; article id
:param article_count: int; total count of articles
:return: bool; success
"""
if article_count >= settings.MYSQL_MAX_ROWS:
if max_article_id:
# TODO - CHANGE THIS be careful, could iterate many times
article_removed = False
attempts = 0
while not article_removed \
or attempts > settings.MAX_RETRIES_FOR_REMOVE:
attempts += 1
article_id = random.randint(0, int(max_article_id))
with ArticleModel() as am:
log.info('Removing article id: ' + str(article_id))
try:
am.delete_article(article_id)
article_removed = True
except UnmappedInstanceError:
continue
else:
log.error('Could not determine a max article id.')
return True
def handle_article_insert(article, wiki_page_id):
"""
Handle insertion of article meta data
:param article_id: int; article id
:return: int, bool; success
"""
with ArticleModel() as am:
if am.insert_article(article, wiki_page_id):
article_obj = am.get_article_by_name(article)
article_id = article_obj._id
success = True
else:
log.error('Couldn\'t insert article: "%s"' % article)
article_id = -1
success = False
return article_id, success
def handle_article_content_insert(article_id, page_content, is_new_article):
"""
Handle the insertion of article content
:param article_id: int; article id
:param page_content: json; page content
:param is_new_article: bool; a new article?
:return: bool; success
"""
with ArticleContentModel() as acm:
if is_new_article:
acm.insert_article(article_id, json.dumps(page_content))
else:
acm.update_article(article_id, json.dumps(page_content))
def prep_page_content(article_id, article, wiki, photos, user_obj):
"""
Prepare the formatted article content
:param article_id: int; article id
:param article: str; article name
:param wiki_resp: wikipedia; mediawiki api response
:param photos: list; list of photo json
:param user_obj: User; user object for request
:return: dict; formatted page response passed to jinja template
"""
html = parse_strip_elements(wiki.html())
html = parse_convert_links(html)
html = add_formatting_generic(html)
photo_ids = process_photos(article_id, photos, user_obj)
html = handle_photo_integrate(photos, html, article)
page_content = {
'title': format_title_link(wiki.title, article),
'content': html,
'section_img_class': settings.SECTION_IMG_CLASS,
'num_photos': len(photos),
'article_id': article_id,
'user_id': user_obj.get_id(),
'photo_ids': photo_ids
}
return page_content
def update_last_access(article_id):
"""
Update article last access
:param article_id: int; article id
:return: bool; success
"""
pass
def order_photos_by_rank(article_id, photos):
""" Reorders photos by score """
# Compute scores
for i in xrange(len(photos)):
# Get Exclusions & Endorsements
with ExcludeModel() as em:
exclusions = em.get_excludes_article_photo(article_id,
photos[i]['photo_id'])
with LikeModel() as lm:
endorsements = lm.get_likes_article_photo(article_id,
photos[i]['photo_id'])
photos[i]['score'] = len(endorsements) - len(exclusions)
# lambda method for sorting by score descending
f = lambda x, y: cmp(-x['score'], -y['score'])
return sorted(photos, f)
def process_photos(article_id, photos, user_obj):
"""
Handles linking photo results with the model and returns a list of
Flickr photo ids to pass to templating
:param article_id: int; article id
:param photos: list of photos
:param user_obj: User; user object for request
:return: List of Flickr photo ids
"""
photo_ids = []
for photo in photos:
# Ensure that each photo is modeled
with PhotoModel() as pm:
photo_obj = pm.get_photo(photo['photo_id'], article_id)
if not photo_obj:
log.info('Processing photo: "%s"' % str(photo))
if pm.insert_photo(photo['photo_id'], article_id):
photo_obj = pm.get_photo(
photo['photo_id'], article_id)
if not photo_obj:
log.error('DB Error: Could not retrieve or '
'insert: "%s"' % str(photo))
continue
else:
log.error('Couldn\'t insert photo: "%s"' % (
photo['photo_id']))
photo['id'] = photo_obj._id
photo['votes'] = photo_obj.votes
# Retrieve like data
with LikeModel() as lm:
if lm.get_like(article_id, photo_obj._id,
user_obj.get_id()):
photo['like'] = True
else:
photo['like'] = False
photo_ids.append(photo['photo_id'])
return photo_ids
| 35.076923
| 78
| 0.607346
|
7e0be21835c15a9296a6ae0c119d0388d9169b45
| 240
|
py
|
Python
|
docs/examples/slider_dimmer.py
|
SatoshiIwasada/BlueDot
|
e93bc242593d3a3cbfd0ff97f98fcffb0fcd961a
|
[
"MIT"
] | 112
|
2017-03-27T17:23:17.000Z
|
2022-03-13T09:51:43.000Z
|
docs/examples/slider_dimmer.py
|
SatoshiIwasada/BlueDot
|
e93bc242593d3a3cbfd0ff97f98fcffb0fcd961a
|
[
"MIT"
] | 109
|
2017-03-29T11:19:54.000Z
|
2022-02-03T14:18:15.000Z
|
docs/examples/slider_dimmer.py
|
SatoshiIwasada/BlueDot
|
e93bc242593d3a3cbfd0ff97f98fcffb0fcd961a
|
[
"MIT"
] | 40
|
2017-03-30T23:23:27.000Z
|
2022-01-21T17:09:11.000Z
|
from bluedot import BlueDot
from gpiozero import PWMLED
from signal import pause
led = PWMLED(27)
bd = BlueDot()
bd.when_moved = set_brightness
pause()
| 17.142857
| 32
| 0.725
|
7e0e399837934c037868f72f1f2ece1fe8884d6e
| 328
|
py
|
Python
|
blog/urls.py
|
Halo-Developers/Halo-Learn
|
4c8f9e395c0145df39fa3333fefa23d02a370688
|
[
"MIT"
] | 1
|
2021-09-23T16:02:51.000Z
|
2021-09-23T16:02:51.000Z
|
blog/urls.py
|
kuyesu/Halo-Learn
|
abd60d45c191297daedd20b3b308a30a78cba9c7
|
[
"MIT"
] | null | null | null |
blog/urls.py
|
kuyesu/Halo-Learn
|
abd60d45c191297daedd20b3b308a30a78cba9c7
|
[
"MIT"
] | 2
|
2021-09-20T09:50:45.000Z
|
2022-02-20T06:42:42.000Z
|
from django.urls import path
from . import views
app_name = 'blog'
urlpatterns = [
path('', views.post_list, name='post_list'),
path('<slug:post>/',views.post_detail,name="post_detail"),
path('comment/reply/', views.reply_page, name="reply"),
path('tag/<slug:tag_slug>/',views.post_list, name='post_tag'),
]
| 27.333333
| 68
| 0.67378
|
fd665b1231aab43a664a3eab839a54a833e10f79
| 3,144
|
py
|
Python
|
web/env/lib/python3.6/site-packages/test/file/test_includer.py
|
Conbrown100/webfortune
|
779026d064498d36ddeba07e06cc744fb335ceb6
|
[
"Apache-2.0"
] | 8
|
2015-07-30T16:19:18.000Z
|
2021-08-10T21:00:47.000Z
|
web/env/lib/python3.6/site-packages/test/file/test_includer.py
|
Conbrown100/webfortune
|
779026d064498d36ddeba07e06cc744fb335ceb6
|
[
"Apache-2.0"
] | 3
|
2015-01-09T13:53:55.000Z
|
2017-06-05T17:39:46.000Z
|
web/env/lib/python3.6/site-packages/test/file/test_includer.py
|
Conbrown100/webfortune
|
779026d064498d36ddeba07e06cc744fb335ceb6
|
[
"Apache-2.0"
] | 6
|
2015-01-09T13:47:15.000Z
|
2020-12-25T14:09:41.000Z
|
import os
from tempfile import TemporaryDirectory
import codecs
import logging
from grizzled.file.includer import *
from grizzled.os import working_directory
from grizzled.text import strip_margin
import pytest
| 29.383178
| 70
| 0.515585
|
fd684e3bf1de0c4b9c2f1d5a15a6a2d42e862075
| 286
|
py
|
Python
|
output/models/nist_data/list_pkg/unsigned_short/schema_instance/nistschema_sv_iv_list_unsigned_short_min_length_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 1
|
2021-08-14T17:59:21.000Z
|
2021-08-14T17:59:21.000Z
|
output/models/nist_data/list_pkg/unsigned_short/schema_instance/nistschema_sv_iv_list_unsigned_short_min_length_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | 4
|
2020-02-12T21:30:44.000Z
|
2020-04-15T20:06:46.000Z
|
output/models/nist_data/list_pkg/unsigned_short/schema_instance/nistschema_sv_iv_list_unsigned_short_min_length_2_xsd/__init__.py
|
tefra/xsdata-w3c-tests
|
b6b6a4ac4e0ab610e4b50d868510a8b7105b1a5f
|
[
"MIT"
] | null | null | null |
from output.models.nist_data.list_pkg.unsigned_short.schema_instance.nistschema_sv_iv_list_unsigned_short_min_length_2_xsd.nistschema_sv_iv_list_unsigned_short_min_length_2 import NistschemaSvIvListUnsignedShortMinLength2
__all__ = [
"NistschemaSvIvListUnsignedShortMinLength2",
]
| 47.666667
| 221
| 0.905594
|
fd69e06856c7f3a481475985f97cf69bf7d1965f
| 127
|
py
|
Python
|
satchmo/projects/skeleton/localsite/urls.py
|
predatell/satchmo
|
6ced1f845aadec240c7e433c3cbf4caca96e0d92
|
[
"BSD-3-Clause"
] | 1
|
2019-10-08T16:19:59.000Z
|
2019-10-08T16:19:59.000Z
|
satchmo/projects/skeleton/localsite/urls.py
|
predatell/satchmo
|
6ced1f845aadec240c7e433c3cbf4caca96e0d92
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/projects/skeleton/localsite/urls.py
|
predatell/satchmo
|
6ced1f845aadec240c7e433c3cbf4caca96e0d92
|
[
"BSD-3-Clause"
] | null | null | null |
from django.conf.urls import url
from simple.localsite.views import example
urlpatterns = [
url(r'example/', example),
]
| 15.875
| 42
| 0.732283
|
fd6a627b6084b5a56d9fe3161a2d00c62052ed2a
| 8,850
|
py
|
Python
|
tbconnect/tests/test_views.py
|
praekeltfoundation/healthcheck
|
3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba
|
[
"BSD-3-Clause"
] | null | null | null |
tbconnect/tests/test_views.py
|
praekeltfoundation/healthcheck
|
3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba
|
[
"BSD-3-Clause"
] | 23
|
2020-07-16T15:40:35.000Z
|
2021-12-13T13:59:30.000Z
|
tbconnect/tests/test_views.py
|
praekeltfoundation/healthcheck
|
3f8b3722ea41c2d81c706e0f9a3473ba2cb2f2ba
|
[
"BSD-3-Clause"
] | 1
|
2021-02-24T04:58:40.000Z
|
2021-02-24T04:58:40.000Z
|
from django.test import TestCase
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Permission
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APITestCase
from tbconnect.models import TBCheck, TBTest
from userprofile.models import HealthCheckUserProfile
from userprofile.tests.test_views import BaseEventTestCase
from tbconnect.serializers import TBCheckSerializer
| 37.184874
| 84
| 0.568701
|
fd6abf4d61e22150256649650adbe262b09e0720
| 1,350
|
py
|
Python
|
code/runibm1.py
|
jrod2699/CS159-NLP-Final-Project-
|
76eea6149ab01d5e72232874398458ec9f35227f
|
[
"MIT"
] | null | null | null |
code/runibm1.py
|
jrod2699/CS159-NLP-Final-Project-
|
76eea6149ab01d5e72232874398458ec9f35227f
|
[
"MIT"
] | null | null | null |
code/runibm1.py
|
jrod2699/CS159-NLP-Final-Project-
|
76eea6149ab01d5e72232874398458ec9f35227f
|
[
"MIT"
] | null | null | null |
import nltk
import random
from preprocess import compile_corpus
from nltk.translate import IBMModel1, AlignedSent, Alignment
def get_rand_sent():
'''
Redirect the standard output of the program -- i.e. the random sentences --
and transfer it over to the appropriate file. From there we will take a
look at the sentence pair and include the hand alignment (gold standard)
to proceed with evaluating the IBM model.
'''
i = 0
while i < 20:
index = random.randint(0, len(corpus))
try:
# only print out "valid" sentence pairs
# valid = sentence pairs with system-created alignments
print(" ".join(corpus[index].mots), "\t", " ".join(corpus[index].words), "\t", corpus[index].alignment)
i += 1
except:
pass
if __name__ == "__main__":
main()
| 31.395349
| 115
| 0.665926
|
fd6ba7ba1979062899ef77a2f2ebee1332127153
| 374
|
py
|
Python
|
makenew_python_app/server/boot.py
|
makenew/python-app
|
5f3c6669efe6e80d356d39afb712d72bf0e69916
|
[
"MIT"
] | 2
|
2021-01-10T05:54:37.000Z
|
2021-01-12T01:24:38.000Z
|
makenew_python_app/server/boot.py
|
makenew/python-app
|
5f3c6669efe6e80d356d39afb712d72bf0e69916
|
[
"MIT"
] | null | null | null |
makenew_python_app/server/boot.py
|
makenew/python-app
|
5f3c6669efe6e80d356d39afb712d72bf0e69916
|
[
"MIT"
] | null | null | null |
from os import environ, path
from .server import Server
from .config import configure
| 26.714286
| 68
| 0.716578
|
fd6c34c3adb6f440619a388c9b66cf0b7a99a5e9
| 1,027
|
py
|
Python
|
checker/backends/pagure.py
|
1dot75cm/repo-checker
|
1ca191efbeaa9f44876546ee59487e8d515cd735
|
[
"MIT"
] | 4
|
2016-01-10T15:58:48.000Z
|
2019-08-10T23:12:31.000Z
|
checker/backends/pagure.py
|
1dot75cm/repo-checker
|
1ca191efbeaa9f44876546ee59487e8d515cd735
|
[
"MIT"
] | 1
|
2021-03-31T18:46:14.000Z
|
2021-03-31T18:46:14.000Z
|
checker/backends/pagure.py
|
1dot75cm/repo-checker
|
1ca191efbeaa9f44876546ee59487e8d515cd735
|
[
"MIT"
] | 5
|
2016-03-22T00:58:33.000Z
|
2017-09-14T12:43:54.000Z
|
# -*- coding: utf-8 -*-
from checker.backends import BaseBackend
from checker import logger
log = logger.getLogger(__name__)
| 27.756757
| 77
| 0.559883
|
fd6d8440e80ddfffc2b7c87e874259a2676fb497
| 2,184
|
py
|
Python
|
vial/plugins/grep/plugin.py
|
solarnz/vial
|
080dd204c6fac49c9541cd179e7842de7cb6f8ee
|
[
"MIT"
] | 5
|
2015-06-27T09:36:26.000Z
|
2018-05-05T02:43:43.000Z
|
vial/plugins/grep/plugin.py
|
solarnz/vial
|
080dd204c6fac49c9541cd179e7842de7cb6f8ee
|
[
"MIT"
] | 4
|
2018-06-07T15:19:33.000Z
|
2020-02-10T12:15:11.000Z
|
vial/plugins/grep/plugin.py
|
solarnz/vial
|
080dd204c6fac49c9541cd179e7842de7cb6f8ee
|
[
"MIT"
] | 2
|
2019-08-30T07:27:05.000Z
|
2020-02-12T08:03:24.000Z
|
import os
import re
from time import time
from vial import vfunc, vim
from vial.fsearch import get_files
from vial.utils import get_projects, redraw
MAX_FILESIZE = 10 * 1024 * 1024
| 24.818182
| 61
| 0.431777
|
fd6ea7420f474f3252a16e6bcdeebb2e566cf6e9
| 3,619
|
py
|
Python
|
tests/test_models.py
|
DynamicGravitySystems/DGP
|
5c0b566b846eb25f1e5ede64b2caaaa6a3352a29
|
[
"Apache-2.0"
] | 7
|
2017-08-15T21:51:40.000Z
|
2020-10-28T00:40:23.000Z
|
tests/test_models.py
|
DynamicGravitySystems/DGP
|
5c0b566b846eb25f1e5ede64b2caaaa6a3352a29
|
[
"Apache-2.0"
] | 63
|
2017-08-11T15:12:03.000Z
|
2020-05-23T19:03:46.000Z
|
tests/test_models.py
|
cbertinato/DGP
|
5bb8a30895365eccdd452970c45e248903fca8af
|
[
"Apache-2.0"
] | 4
|
2018-03-29T21:30:26.000Z
|
2020-10-27T20:15:23.000Z
|
# -*- coding: utf-8 -*-
"""
Unit tests for new Project/Flight data classes, including JSON
serialization/de-serialization
"""
import time
from datetime import datetime
from typing import Tuple
from uuid import uuid4
from pathlib import Path
import pytest
import pandas as pd
from dgp.core import DataType
from dgp.core.models.project import AirborneProject
from dgp.core.hdf5_manager import HDF5Manager
from dgp.core.models.datafile import DataFile
from dgp.core.models.dataset import DataSet
from dgp.core.models import flight
from dgp.core.models.meter import Gravimeter
| 27.210526
| 80
| 0.698812
|
fd6f10a9e5cd95371737b186d651e8e464b2660c
| 389
|
py
|
Python
|
examples/urls.py
|
sayanjap/DynamicForms
|
071707de36d109fe3a17ae5df239240ea5ba707f
|
[
"BSD-3-Clause"
] | 42
|
2018-01-18T14:50:05.000Z
|
2022-03-24T18:34:19.000Z
|
examples/urls.py
|
sayanjap/DynamicForms
|
071707de36d109fe3a17ae5df239240ea5ba707f
|
[
"BSD-3-Clause"
] | 14
|
2018-12-05T21:39:23.000Z
|
2022-02-27T06:43:48.000Z
|
examples/urls.py
|
sayanjap/DynamicForms
|
071707de36d109fe3a17ae5df239240ea5ba707f
|
[
"BSD-3-Clause"
] | 5
|
2018-01-18T16:32:20.000Z
|
2021-06-07T10:15:18.000Z
|
from django.conf.urls import include, url
from rest_framework.documentation import include_docs_urls
from examples.rest import router
from .views import index
urlpatterns = [
url(r'^$', index, name='index'),
url(r'^', include(router.urls)),
url(r'^dynamicforms/', include('dynamicforms.urls')),
url(r'^api-docs/', include_docs_urls(title='Example API documentation')),
]
| 29.923077
| 77
| 0.722365
|
fd6f1c1a3069baecfcb5b723cf12a8c76710a022
| 1,312
|
py
|
Python
|
tests/contract/test_concept.py
|
Informasjonsforvaltning/fdk-harvester-bff
|
21f5d41bbe2506d9c23f0e670e6dee1bfe9f0742
|
[
"Apache-2.0"
] | null | null | null |
tests/contract/test_concept.py
|
Informasjonsforvaltning/fdk-harvester-bff
|
21f5d41bbe2506d9c23f0e670e6dee1bfe9f0742
|
[
"Apache-2.0"
] | 20
|
2020-09-23T10:04:48.000Z
|
2022-03-14T07:47:45.000Z
|
tests/contract/test_concept.py
|
Informasjonsforvaltning/fdk-harvester-bff
|
21f5d41bbe2506d9c23f0e670e6dee1bfe9f0742
|
[
"Apache-2.0"
] | null | null | null |
"""Test cases for concepts."""
from typing import Any
import pytest
import requests
| 42.322581
| 289
| 0.70503
|
fd6fc2f8e9fb0cf4963f53e0dd218bc472fd9daa
| 4,572
|
py
|
Python
|
passmanBackend/vault_backend/models.py
|
sharanvarma0/passman-backend
|
d210fcc43886bd9be40ceaba3411209799cb8476
|
[
"BSD-3-Clause"
] | null | null | null |
passmanBackend/vault_backend/models.py
|
sharanvarma0/passman-backend
|
d210fcc43886bd9be40ceaba3411209799cb8476
|
[
"BSD-3-Clause"
] | null | null | null |
passmanBackend/vault_backend/models.py
|
sharanvarma0/passman-backend
|
d210fcc43886bd9be40ceaba3411209799cb8476
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Mostly these are internal imports related to django and rest_framework.
The os and io imports are for creating files, paths and parsing bytes objects respectively
'''
from django.db import models
from django.contrib.auth.models import User
from rest_framework.renderers import JSONRenderer
from rest_framework.parsers import JSONParser
from vault_backend.extra_functions import *
import os
import io
'''
The Vault model represents the basic password vault in passman. This model will store the directory path, filename and vault_name specified. This is linked to the User model for only displaying vaults belonging
to the authenticated user. The Vault model is later referenced in different places for creating and updating records stored in it.
'''
# Delete Record functionality in vault.Not tested delete functionality yet. Might implement in future.
''' def delete_data(self, sitename, password):
try:
delete_data = {'site_name':sitename, 'password':password}
data = self.get_data()
if self.check_data(delete_data, data):
data.remove(delete_data)
if data:
for dictionary_data in data:
self.add_data(dictionary_data['site_name'], dictionary_data['password'])
return 0
else:
self.create_vault()
return 0
except ValueError:
return 'No Such Value'
'''
| 41.563636
| 227
| 0.649606
|
fd71a2a6d5b1e71ced9722bf68301238887fd3c8
| 95,557
|
py
|
Python
|
DexParse.py
|
liumengdeqq/DexParse
|
769899e26f01700c690ed82c48790d1000efb5f1
|
[
"Apache-2.0"
] | 16
|
2015-11-19T01:51:52.000Z
|
2020-03-10T06:24:28.000Z
|
DexParse.py
|
CvvT/DexParse
|
80c3f4a27e7163536f98584c5e7f7ec35a9451b8
|
[
"Apache-2.0"
] | null | null | null |
DexParse.py
|
CvvT/DexParse
|
80c3f4a27e7163536f98584c5e7f7ec35a9451b8
|
[
"Apache-2.0"
] | 22
|
2015-09-15T02:20:48.000Z
|
2021-06-24T02:55:09.000Z
|
#! /usr/bin/python
# coding=utf-8
import struct
import os
import hashlib
import Instruction
Access_Flag = {'public': 1, 'private': 2, 'protected': 4, 'static': 8, 'final': 0x10,
'synchronized': 0x20, 'volatile': 0x40, 'bridge': 0x40, 'transient': 0x80,
'varargs': 0x80, 'native': 0x100, 'interface': 0x200, 'abstract': 0x400,
'strictfp': 0x800, 'synthetic': 0x1000, 'annotation': 0x2000, 'enum': 0x4000,
'constructor': 0x10000, 'declared_synchronized': 0x20000}
TypeDescriptor = {'void': 'V', 'boolean': 'Z', 'byte': 'B', 'short': 'S', 'char': 'C',
'int': 'I', 'long': 'J', 'float': 'F', 'double': 'D', 'boolean[]': '[Z',
'byte[]': '[B', 'short[]': '[S', 'char[]': '[C', 'int[]': 'I',
'long[]': '[J', 'float[]': '[F', 'double[]': 'D'}
ShortyDescriptor = {'void': 'V', 'boolean': 'Z', 'byte': 'B', 'short': 'S', 'char': 'C',
'int': 'I', 'long': 'J', 'float': 'F', 'double': 'D'}
ACSII = {'1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8, '9': 9, '0': 0,
'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15}
# ----------------------------------------------------------------------------------------
# alignment: 4bytes
# alignment: 4bytes
# alignment:none
# alignment: 4bytes
# alignment: none
# alignment: none
# alignment: 4 bytes
# alignment: none
# default: 0 create from file 1 create from memory
def jiaguAll(dexfile, outfile):
method_list = [] # record all method need to protect
tmp_method = dexfile.getmethodItem("Lcom/cc/test/MainActivity;", "onCreate")
method_list.append({"access": tmp_method["method"].access_flags, "ref": tmp_method["method"].coderef,
"classidx": tmp_method["classidx"], "methodidx": tmp_method["methodidx"]})
tmp_method["method"].access_flags = int(Access_Flag['native'] | Access_Flag['public'])
tmp_method["method"].modified = 1
# change the access flag, make it native
dexfile.makeoffset() # make offset
if os.path.exists(outfile): # if exists, delete it
print("the file is exist, just replace it")
os.remove(outfile)
file = open(outfile, 'wb+')
file.seek(0, 0)
size = len(method_list)
filesize = dexfile.dexheader.file_size # in order to adjust the dex file
dexfile.dexheader.file_size += 16 * size # each injected data need 16 bytes
dexfile.dexmaplist.copy(file)
file.seek(filesize, 0)
print("file size :", filesize, " size : ", size)
for i in range(0, size):
file.write(struct.pack("I", method_list[i]["classidx"]))
file.write(struct.pack("I", method_list[i]["methodidx"]))
file.write(struct.pack("I", method_list[i]["access"]))
file.write(struct.pack("I", method_list[i]["ref"].start))
print("inject data :", method_list[i]["classidx"], method_list[i]["methodidx"])
# assume that the code ref is not None, otherwise it make no sense(no need to protect)
file_sha = get_file_sha1(file)
tmp = bytes(file_sha)
i = 0
file.seek(12)
while i < 40:
num = (ACSII[tmp[i]] << 4) + ACSII[tmp[i+1]]
file.write(struct.pack("B", num))
i += 2
csum = checksum(file, dexfile.dexheader.file_size)
print("checksum:", hex(csum), "file size:", dexfile.dexheader.file_size)
file.seek(8)
file.write(struct.pack("I", csum))
file.close()
if __name__ == '__main__':
dexfile = DexFile("classes.dex")
# jiaguAll(dexfile, "classescp.dex")
# dexfile.printclasscode("Lcom/cc/test/MainActivity;", "onCreate")
# dexfile.printf(3)
# dexfile.addstr("DexParse.java")
# dexfile.addstr("Lcom/cc/test/DexParse.java")
# dexfile.modifystr("A Text From CwT", "A Text From DexParse")
# dexfile.printf()
# note: you need to delete file classescp.dex first, otherwise
# new dex file will append the old one
# dexfile.copytofile("classescp.dex")
| 40.958851
| 124
| 0.551744
|
fd71ae1315e427ea9c9874263b95024d2ffb8696
| 1,852
|
py
|
Python
|
api/myapi/serializers.py
|
UmmuRasul/sbvbn
|
3d4705b9eb5e6bc996028ecc0a8ec43a435ef18b
|
[
"MIT"
] | null | null | null |
api/myapi/serializers.py
|
UmmuRasul/sbvbn
|
3d4705b9eb5e6bc996028ecc0a8ec43a435ef18b
|
[
"MIT"
] | null | null | null |
api/myapi/serializers.py
|
UmmuRasul/sbvbn
|
3d4705b9eb5e6bc996028ecc0a8ec43a435ef18b
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
from api.models import User, UserProfile, Post, News, Video
from datetime import datetime
| 31.389831
| 83
| 0.665227
|
fd71c4f7dcacba2ce5484fe215f8d27faba98441
| 6,603
|
py
|
Python
|
src/morphforgecontrib/simulation/channels/hh_style/neuron/mm_neuron_alphabetabeta.py
|
mikehulluk/morphforge
|
2a95096f144ed4ea487decb735ce66706357d3c7
|
[
"BSD-2-Clause"
] | 1
|
2021-01-21T11:31:59.000Z
|
2021-01-21T11:31:59.000Z
|
src/morphforgecontrib/simulation/channels/hh_style/neuron/mm_neuron_alphabetabeta.py
|
mikehulluk/morphforge
|
2a95096f144ed4ea487decb735ce66706357d3c7
|
[
"BSD-2-Clause"
] | null | null | null |
src/morphforgecontrib/simulation/channels/hh_style/neuron/mm_neuron_alphabetabeta.py
|
mikehulluk/morphforge
|
2a95096f144ed4ea487decb735ce66706357d3c7
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ---------------------------------------------------------------------
# Copyright (c) 2012 Michael Hull.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# - Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------
from ..core import StdChlAlphaBetaBeta
from morphforge.units import qty
from morphforge import units
from hocmodbuilders.mmwriter_alphabetabeta import NEURONChlWriterAlphaBetaBeta
from morphforge.simulation.neuron.hocmodbuilders import HocModUtils
from morphforge.simulation.neuron import NEURONChl_Base
from morphforge.constants.standardtags import StandardTags
from morphforge.simulation.neuron.core.neuronsimulationenvironment import NEURONEnvironment
from morphforge.simulation.neuron.objects.neuronrecordable import NEURONRecordableOnLocation
# Register the channel
NEURONEnvironment.channels.register_plugin( StdChlAlphaBetaBeta, NEURONChl_AlphaBetaBeta)
| 33.015
| 123
| 0.69741
|
fd7312c0409e17edc8a594caad14c3eebd8edb1f
| 5,344
|
py
|
Python
|
cookie.py
|
cppchriscpp/fortune-cookie
|
46e433e1ae06a8ad742b252d642f8620bde9e38b
|
[
"MIT"
] | null | null | null |
cookie.py
|
cppchriscpp/fortune-cookie
|
46e433e1ae06a8ad742b252d642f8620bde9e38b
|
[
"MIT"
] | null | null | null |
cookie.py
|
cppchriscpp/fortune-cookie
|
46e433e1ae06a8ad742b252d642f8620bde9e38b
|
[
"MIT"
] | null | null | null |
import markovify
import re
import nltk
import os
import urllib.request
from shutil import copyfile
# We need a temporary(ish) place to store the data we retrieve.
# If you are running this in a docker container you may want to mount a volume and use it.
# Also be sure to make a symlink between it and the assets directory. See our dockerfile for an example!
datadir = "./web/assets/data"
if 'DATA_DIR' in os.environ:
datadir = os.environ['DATA_DIR']
if not os.path.exists(datadir):
os.mkdir(datadir)
# Basically the example from the markovify documentation that uses parts of speech and stuff to make better sentences
# Grab a list of fortunes from Github
if not os.path.exists(datadir+"/cookie.txt"):
urllib.request.urlretrieve("https://raw.githubusercontent.com/ianli/fortune-cookies-galore/master/fortunes.txt", datadir+"/cookie.txt")
# Grab the US constitution raw text
if not os.path.exists(datadir+'/const.txt'):
urllib.request.urlretrieve("https://www.usconstitution.net/const.txt", datadir+"/const.txt")
if not os.path.exists(datadir+'/tweeter.txt'):
urllib.request.urlretrieve("https://raw.githubusercontent.com/ElDeveloper/tweets/master/tweets_text.txt", datadir+"/tweeter.txt")
# Read both files into variables
with open(datadir+"/cookie.txt") as f:
text = f.read()
with open(datadir+'/const.txt') as f:
tswext = f.read()
with open(datadir+"/tweeter.txt") as f:
tweetext = f.read()
# Break up the text to make it more workable
cookie_text_split = text.split("\n")
const_text_split = tswext.split("\n")
tweet_text_split = tweetext.split("\n")
# Some cleanup to remove things in the fortune cookie file that aren't really fortunes.
# (There are some odd facts and quotes in here. This is a bit barbaric, but this is a fun project anyway! No need for perfection...)
# Same thing for the constitution text - this just removes the comment at the top.
# Apply the cleanups from above
cookie_text_split[:] = [x for x in cookie_text_split if excluded(x)]
const_text_split[:] = [x for x in const_text_split if exwifted(x)]
# Merge the text back into one big blob like markovify expects. (There's probably a better way to do this, but again, fun project. Efficiency's not that important...
cookie_text_model = POSifiedText("\n".join(cookie_text_split))
const_text_model = POSifiedText("\n".join(const_text_split))
tweet_text_model = POSifiedText("\n".join(tweet_text_split))
# Combine them into a terrifying structure
const_and_cookie_model = markovify.combine([cookie_text_model, const_text_model])
tweet_and_cookie_model = markovify.combine([cookie_text_model, tweet_text_model], [4, 1])
everything_model = markovify.combine([cookie_text_model, const_text_model, tweet_text_model], [4, 1, 1])
# Print a couple lines to the terminal to show that everything's working...
print("Examples:")
for i in range(5):
print(const_and_cookie_model.make_short_sentence(240, tries=25))
# Now, open a temporary file and write some javascript surrounding our story.
with open(datadir+"/cookie.js.new", "w+") as file:
# NOTE: I don't escape anything here... with bad seed text it'd be quite possible to inject weird js, etc.
file.write("window.fortuneCookies=[\n")
print("Running cookie")
# Write 100 lines of junk into the js file. Note that leaving the closing comma is ok, as javascript doesn't care.
for i in range(250):
file.write("\"" + cookie_text_model.make_short_sentence(240, tries=25) + "\",\n")
# Close it up!
file.write("];")
print("Running const + cookie")
file.write("window.constCookies=[\n")
for i in range(250):
file.write("\"" + const_and_cookie_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
print("Running const only")
file.write("window.constLines=[\n")
for i in range(250):
file.write("\"" + const_text_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
print("Running tweet only")
file.write("window.tweetLines=[\n")
for i in range(250):
file.write("\"" + tweet_text_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
print("Running tweet cookie")
file.write("window.tweetCookie=[\n")
for i in range(250):
file.write("\"" + tweet_and_cookie_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
print("Running everything")
file.write("window.everythingCookie=[\n")
for i in range(250):
file.write("\"" + everything_model.make_short_sentence(240, tries=25) + "\",\n")
file.write("];")
# Finally, copy our temp file over the old one, so clients can start seeing it.
copyfile(datadir+"/cookie.js.new", datadir+"/cookie.js")
| 36.60274
| 165
| 0.698915
|
fd76b6a6e3bed41850763cc3f44afdab15844d51
| 427
|
py
|
Python
|
wsgi_microservice_middleware/__init__.py
|
presalytics/WSGI-Microservice-Middleware
|
1dfcd1121d25569312d7c605d162cb52f38101e3
|
[
"MIT"
] | 1
|
2020-08-13T05:31:01.000Z
|
2020-08-13T05:31:01.000Z
|
wsgi_microservice_middleware/__init__.py
|
presalytics/WSGI-Microservice-Middleware
|
1dfcd1121d25569312d7c605d162cb52f38101e3
|
[
"MIT"
] | null | null | null |
wsgi_microservice_middleware/__init__.py
|
presalytics/WSGI-Microservice-Middleware
|
1dfcd1121d25569312d7c605d162cb52f38101e3
|
[
"MIT"
] | null | null | null |
import environs
env = environs.Env()
env.read_env()
from wsgi_microservice_middleware.cors import CORSMiddleware
from wsgi_microservice_middleware.request_id import (
RequestIdFilter,
RequestIdMiddleware,
current_request_id,
RequestIdJsonLogFormatter
)
__all__ = [
'CORSMiddleware',
'RequestIdFilter',
'RequestIdMiddleware',
'current_request_id',
'RequestIdJsonLogFormatter'
]
| 17.791667
| 60
| 0.744731
|
fd77738934a082ed69675adc328a1ec23a42bd8b
| 686
|
py
|
Python
|
car_manager.py
|
njiang6/turtle_crossing
|
5445ca941bc53002299c60a0587d84f8a111f1be
|
[
"Apache-2.0"
] | 1
|
2021-03-24T02:21:03.000Z
|
2021-03-24T02:21:03.000Z
|
car_manager.py
|
njiang6/turtle_crossing
|
5445ca941bc53002299c60a0587d84f8a111f1be
|
[
"Apache-2.0"
] | null | null | null |
car_manager.py
|
njiang6/turtle_crossing
|
5445ca941bc53002299c60a0587d84f8a111f1be
|
[
"Apache-2.0"
] | null | null | null |
import turtle as t
import random
COLORS = ["red", "orange", "yellow", "green", "blue", "purple"]
STARTING_MOVE_DISTANCE = 5
MOVE_INCREMENT = 1
| 23.655172
| 63
| 0.644315
|
fd78ccdbc7f44ee790bb4e0e5bb66afdadb94039
| 3,329
|
py
|
Python
|
2021/05_2/solution.py
|
budavariam/advent_of_code
|
0903bcbb0df46371b6a340ca2be007dce6470c66
|
[
"MIT"
] | null | null | null |
2021/05_2/solution.py
|
budavariam/advent_of_code
|
0903bcbb0df46371b6a340ca2be007dce6470c66
|
[
"MIT"
] | null | null | null |
2021/05_2/solution.py
|
budavariam/advent_of_code
|
0903bcbb0df46371b6a340ca2be007dce6470c66
|
[
"MIT"
] | 1
|
2022-02-11T13:14:50.000Z
|
2022-02-11T13:14:50.000Z
|
""" Advent of code 2021 day 05 / 2 """
import math
from os import path
import re
from collections import Counter
def solution(data):
""" Solution to the problem """
lines = preprocess(data)
solver = Code(lines)
return solver.solve()
if __name__ == "__main__":
with(open(path.join(path.dirname(__file__), 'input.txt'), 'r')) as input_file:
print(solution(input_file.read()))
| 31.40566
| 82
| 0.393812
|
fd7a3d5f8bd77ce667a1424c233439cb51d4d806
| 2,032
|
py
|
Python
|
examples/plot_tissue_specific_corrections.py
|
imagejan/starfish
|
adf48f4b30cfdf44ac8c9cc78fc469665ce7d594
|
[
"MIT"
] | null | null | null |
examples/plot_tissue_specific_corrections.py
|
imagejan/starfish
|
adf48f4b30cfdf44ac8c9cc78fc469665ce7d594
|
[
"MIT"
] | null | null | null |
examples/plot_tissue_specific_corrections.py
|
imagejan/starfish
|
adf48f4b30cfdf44ac8c9cc78fc469665ce7d594
|
[
"MIT"
] | null | null | null |
"""
Tissue Corrections
==================
"""
###################################################################################################
# .. _tutorial_removing_autoflourescence:
#
# Removing autofluorescence
# =========================
#
# In addition to the bright spots (signal) that we want to detect, microscopy experiments on tissue
# slices often have a non-zero amount of auto-fluorescence from the cell bodies. This can be mitigated
# by "clearing" strategies whereby tissue lipids and proteins are digested, or computationally by
# estimating and subtracting the background values.
#
# We use the same test image from the previous section to demonstrate how this can work.
#
# Clipping
# --------
# The simplest way to remove background is to set a global, (linear) cut-off and clip out the
# background values.
import starfish
import starfish.data
from starfish.image import Filter
from starfish.types import Axes
experiment: starfish.Experiment = starfish.data.ISS(use_test_data=True)
field_of_view: starfish.FieldOfView = experiment["fov_001"]
image: starfish.ImageStack = field_of_view.get_image("primary")
###################################################################################################
# Next, create the clip filter. Here we clip at the 50th percentile, optimally separates the spots
# from the background
clip_50 = Filter.Clip(p_min=97)
clipped: starfish.ImageStack = clip_50.run(image)
###################################################################################################
# plot both images
import matplotlib.pyplot as plt
import xarray as xr
# get the images
orig_plot: xr.DataArray = image.sel({Axes.CH: 0, Axes.ROUND: 0}).xarray.squeeze()
clip_plot: xr.DataArray = clipped.sel({Axes.CH: 0, Axes.ROUND: 0}).xarray.squeeze()
f, (ax1, ax2) = plt.subplots(ncols=2)
ax1.imshow(orig_plot)
ax1.set_title("original")
ax2.imshow(clip_plot)
ax2.set_title("clipped")
###################################################################################################
#
| 35.034483
| 102
| 0.599902
|