text
stringlengths 8
6.05M
|
|---|
import numpy as np
import soundfile as sf
def read_audio(audio_file, config, duration):
if duration > config['target_duration']:
offset = np.random.choice(duration - config['target_duration'])
y, sr = sf.read(audio_file,
start=offset,
frames=config['target_duration'] * config['sampling_rate'])
else:
y, sr = sf.read(audio_file,
frames=config['target_duration'] * config['sampling_rate'],
fill_value=0)
return y, sr
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import random
random.seed(67)
import numpy as np
np.random.seed(67)
import pandas as pd
from tpot import TPOTClassifier
import os
def main():
df_train = pd.read_csv(os.getenv('PREPARED_TRAINING'))
df_valid = pd.read_csv(os.getenv('PREPARED_VALIDATING'))
df_test = pd.read_csv(os.getenv('PREPARED_TESTING'))
feature_cols = list(df_train.columns[:-1])
target_col = df_train.columns[-1]
X_train = df_train[feature_cols].values
y_train = df_train[target_col].values
X_valid = df_valid[feature_cols].values
y_valid = df_valid[target_col].values
X_test = df_test[feature_cols].values
prefix = os.getenv('STORING')
tsne_data = np.load(os.path.join(prefix, 'tsne_2d_5p.npz'))
tsne_train = tsne_data['train']
tsne_valid = tsne_data['valid']
tsne_test = tsne_data['test']
# concat features
X_train_concat = np.concatenate([X_train, tsne_train], axis=1)
X_valid_concat = np.concatenate([X_valid, tsne_valid], axis=1)
X_test_concat = np.concatenate([X_test, tsne_test], axis=1)
tpot = TPOTClassifier(
max_time_mins=int(os.getenv('TIME_LIMIT_ALL', '1440')),
max_eval_time_mins=int(os.getenv('TIME_LIMIT_PART', '5')),
population_size=100,
scoring='log_loss',
cv=3,
verbosity=2,
random_state=67)
tpot.fit(X_train_concat, y_train)
loss = tpot.score(X_valid_concat, y_valid)
print(loss)
tpot.export(os.path.join(prefix, 'tpot_pipeline.py'))
p_test = tpot.predict_proba(X_test_concat)
df_pred = pd.DataFrame({
'id': df_test['id'],
'probability': p_test[:,1]
})
csv_path = os.getenv('PREDICTING')
df_pred.to_csv(csv_path, columns=('id', 'probability'), index=None)
print('Saved: {}'.format(csv_path))
if __name__ == '__main__':
main()
|
#!/usr/bin/python3.4
# -*-coding:Utf-8
prenom = "Anthony"
nom = "TASTET"
age = 21
print("Je m'appelle {0} {1} et j'ai {2} ans".format(prenom, nom, age))
|
import json
from django.views.generic.simple import direct_to_template
from django.shortcuts import get_object_or_404
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect, HttpResponse
from django.core.urlresolvers import reverse
from models import Person
from forms import PersonForm
def person_detail(request, template="contact/person_detail.html"):
""" Get default person and renders its detail template. """
person = get_object_or_404(Person, pk=settings.DEFAULT_PERSON_PK)
return direct_to_template(request, template, {'person': person})
@login_required
def person_edit(request, template="contact/person_edit.html"):
"""
Renders person edit template with person form, validate and save form.
"""
detail_url = reverse('person_detail')
person = get_object_or_404(Person, pk=settings.DEFAULT_PERSON_PK)
form = PersonForm(request.POST or None,
request.FILES or None,
instance=person)
if request.is_ajax() and request.POST:
return HttpResponse(json.dumps(form.errors or None))
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('person_detail'))
return direct_to_template(request, template,
{'form': form,
'next': request.GET.get('next', detail_url), })
|
from rest_framework.serializers import (ModelSerializer,
CharField,
HyperlinkedIdentityField,
SerializerMethodField,
ValidationError)
from User.models import UserExtended
from LiveData.models import LiveData
from django.contrib.auth import get_user_model
class LiveDataUpdateSerializer(ModelSerializer):
class Meta:
model = LiveData
fields = [
'speed',
'longitude',
'latitude',
'live_image'
]
class LiveDataDetailSerializer(ModelSerializer):
delete_url = HyperlinkedIdentityField(view_name='livedata:delete', lookup_field = 'pk')
edit_url = HyperlinkedIdentityField(view_name='livedata:update', lookup_field = 'pk')
username_owner = SerializerMethodField()
class Meta:
model = LiveData
fields = [
'pk',
'device',
'speed',
'logitude',
'latitude',
'live_image',
'delete_url',
'edit_url',
'username_owner'
]
def get_username_owner(self, obj):
return str(obj.device.owner.username)
class LiveDataListSerializer(ModelSerializer):
detail_url = HyperlinkedIdentityField(view_name='livedata:detail', lookup_field = 'pk')
class Meta:
model = LiveData
fields = [
'pk',
'device',
'detail_url'
]
class LiveDataSerializer(ModelSerializer):
class Meta:
model = LiveData
fields = [
'pk',
'device',
'speed',
'logitude',
'latitude',
'live_image',
]
|
import mne
import numpy as np
import scipy.io
from sklearn import preprocessing
'''author of all functions found in this file : Alina Weinberger'''
def printShape(file_name, nb_subjects, ext):
files=[]
subjects=range(nb_subjects)
if ext == '.edf':
for i in subjects:
files.append(file_name +str(i)+ '.edf')
for i in subjects:
raw= mne.io.read_raw_edf(files[i]) #load data from edf file
data=raw.get_data()
print(i, data.shape)
if ext == '.npy':
for i in subjects:
files.append(file_name +str(i)+ '.npy')
for i in subjects: #load data from npy file
data=np.load(files[i])
print(i, data.shape)
if ext == '.txt':
for i in subjects:
files.append(file_name +str(i)+ '.txt')
for i in subjects: #load data from text file
data=np.loadtxt(files[i])
print(i, data.shape)
if ext == 'none':
data=file_name
for i in subjects:
print(i, data[i].shape)
def getSmallestItemNb(file_name, nb_subjects, ext):
subjects=range(nb_subjects)
lens=[]
if ext == '.edf':
for i in subjects:
raw=mne.io.read_raw_edf(file_name +str(i) + ext)
data=raw.get_data()
lens.append(np.size(data,1))
elif ext == '.npy':
for i in subjects:
data= np.load(file_name + str(i) + ext)
lens.append(np.size(data,1))
elif ext == '.txt':
for i in subjects:
data= np.loadtxt(file_name + str(i) + ext)
lens.append(np.size(data,1))
return min(lens)
def minfromlist(liste):
mins=[]
for i in range(len(liste)):
mins.append(min(liste[i]))
return min(mins)
def maxfromlist(liste):
maxs=[]
for i in range(len(liste)):
maxs.append(max(liste[i]))
return max(maxs)
def save3Darray(file_name, list):
array= np.array(list)
np.save(file_name,array)
def loaddmat(file_name, nb_subjects):
files=[]
for i in range(nb_subjects):
mat = scipy.io.loadmat(file_name+str(i)+'.mat')
files.append(mat)
return files
def loaddnpy(file_name, nb_subjects):
files=[]
for i in range(nb_subjects):
npy = np.load(file_name+str(i)+'.npy')
files.append(npy)
return files
def saveNpyfromMat_LA(files, new_file_name):
dictio={0:'S2_unconscious_RawData', 1: 'S3_unconscious_RawData', 2: 'S4_unconscious_RawData', 3: 'S5_unconscious_RawData', 4: 'S6_unconscious_RawData', 5:'S8_unconscious_RawData', 6:'S10_unconscious_RawData'}
for i in dictio:
data= files[i][dictio[i]]
data=data[:,:590000]
np.save(new_file_name +str(i)+'.npy',data)
def saveNpyfromMat_LAw(files, new_file_name ):
#file n*
dictio={ 0:'S2_conscious_RawData', 1: 'S3_conscious_RawData', 2: 'S4_conscious_RawData', 3: 'S5_conscious_RawData', 4: 'S6_conscious_RawData', 5:'S8_conscious_RawData', 6:'S10_conscious_RawData'}
for i in dictio:
data= files[i][dictio[i]]
data=data[:,:690000]
np.save(new_file_name +str(i)+'.npy',data)
def saveNpyfromMat_L(files, new_file_name):
#file n*
dictio={ 0: 'S2_RawData', 1: 'S3_RawData', 2: 'S4_RawData', 3: 'S5_RawData', 4: 'S6_RawData', 5:'S8_RawData', 6:'S10_RawData'}
for i in dictio:
data=files[i][dictio[i]]
data=data[:,:590000]
np.save(new_file_name +str(i)+'.npy',files[i][dictio[i]])
def saveclean(data_list, nb_subjects, new_file_name):
for i in range(nb_subjects):
np.save(new_file_name +str(i)+'.npy', data_list[i])
def appendd(delta, theta, alpha, beta, lowgamma):
liste=[]
liste.append(delta)
liste.append(theta)
liste.append(alpha)
liste.append(beta)
liste.append(lowgamma)
return liste
def minshaperow(liste):
shapes=[]
for i in range(len(liste)):
shapes.append(liste[i].shape[0])
return min(shapes)
def list2array(list_of_lists):
list_of_arrays=[]
for i in range(len(list_of_lists)):
list_of_arrays.append(np.array(list_of_lists[i]))
return list_of_arrays
def list_of_arraysT(list_of_arrays):
list_transposed=[]
for i in range(len(list_of_arrays)):
list_transposed.append(list_of_arrays[i].T)
for i in range(len(list_transposed)):
list_transposed[i]=np.reshape(list_transposed[i], (63,))
return list_transposed
def normalize_list(liste):
reshaped_list=[]
scaled_list=[]
scaled_list_reshaped=[]
scaler = preprocessing.StandardScaler()
for i in range(len(liste)):
reshaped_list.append(liste[i].reshape(-1,1))
for i in range(len(liste)):
scaled_list.append(scaler.fit_transform(reshaped_list[i]))
for i in range(len(liste)):
scaled_list_reshaped.append(scaled_list[i].reshape(58,))
return scaled_list_reshaped
def maxi_distribution(liste):
flat_list=[]
liste2=[]
for i in range(len(liste)):
liste2.append(np.reshape(liste[i], (-1,1)))
flat_list=np.concatenate(liste2)
flat_list=sorted(flat_list)
quant_val= np.quantile(flat_list, [0.95,0.99,0.999])
return flat_list, quant_val
def split_PB(liste, nb_electrodes):
delta=[]
delta.extend(liste[:nb_electrodes])
theta=[]
theta.extend(liste[nb_electrodes:nb_electrodes*2])
alpha=[]
alpha.extend(liste[nb_electrodes*2:nb_electrodes*3])
beta=[]
beta.extend(liste[nb_electrodes*3:nb_electrodes*4])
lowgamma=[]
lowgamma.extend(liste[nb_electrodes*4:nb_electrodes*5])
return delta, theta, alpha, beta, lowgamma
def split_appendd(liste, nb_electrodes):
delta=[]
delta.extend(liste[:nb_electrodes])
theta=[]
theta.extend(liste[nb_electrodes:nb_electrodes*2])
alpha=[]
alpha.extend(liste[nb_electrodes*2:nb_electrodes*3])
beta=[]
beta.extend(liste[nb_electrodes*3:nb_electrodes*4])
lowgamma=[]
lowgamma.extend(liste[nb_electrodes*4:nb_electrodes*5])
listee=[]
listee.append(delta)
listee.append(theta)
listee.append(alpha)
listee.append(beta)
listee.append(lowgamma)
return listee
def rel_diff(liste1, liste2):
rel_diff=[]
for i in range(len(liste1)):
array_diff=np.empty_like(liste1[i])
for j in range(liste1[i].shape[0]):
array_diff[i][j]= liste1[i][j] -liste2[i][j]
rel_diff.append(array_diff)
return rel_diff
|
# coding:utf-8
import click
class State(object):
def __init__(self):
self.verbosity = 0
self.debug = False
pass_state = click.make_pass_decorator(State, ensure=True)
def verbosity_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.verbosity = value
return value
return click.option('-v', '--verbose', count=True,
expose_value=False,
help='Enables verbosity.',
callback=callback)(f)
def debug_option(f):
def callback(ctx, param, value):
state = ctx.ensure_object(State)
state.debug = value
return value
return click.option('--debug/--no-debug',
expose_value=False,
help='Enables or disables debug mode.',
callback=callback)(f)
def common_options(f):
f = verbosity_option(f)
f = debug_option(f)
return f
@click.group()
def cli():
pass
@cli.command()
@common_options
@pass_state
def cmd1(state):
click.echo('Verbosity: %s' % state.verbosity)
click.echo('Debug: %s' % state.debug)
if __name__ == '__main__':
cli()
|
from setuptools import setup, find_packages
setup (name = 'py-faster-rcnn',
version = "0.0.16",
package_dir = {'':'.'},
packages = ['','datasets','fast_rcnn','nms','roi_data_layer','rpn','transform', 'utils', 'caffe', 'caffe.proto'],
package_data = { 'nms':['cpu_nms.so', 'gpu_nms.so'],
'utils':['cython_bbox.so'],
'caffe':['_caffe.so'],
},
include_package_data = True
)
|
import typing as typ
from .base import BaseMH
class RegexMH(BaseMH):
regex: str
class Meta:
abstract = True
@classmethod
def _get_regex(cls) -> str:
return cls.regex
class CommandMH(BaseMH):
command: str
args: typ.List[str] = []
must_start_with_command: bool = True
allow_trailing_symbols: bool = False
class Meta:
abstract = True
@classmethod
def _get_regex(cls) -> str:
n_args = len(cls.args)
return "{}/{}{}{}".format(
"^" if cls.must_start_with_command else "",
cls.command.lstrip("/"),
r" [0-9a-zA-Z_]+" * n_args,
"$" if not cls.allow_trailing_symbols else "",
)
|
import airflow
from airflow.models import DAG
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import BranchPythonOperator
import pendulum
local_tz = pendulum.timezone("America/Los_Angeles")
args = {
'owner': 'airflow',
'start_date': airflow.utils.dates.days_ago(0),
}
dag = DAG(
dag_id='dag_save_xcoms',
default_args=args,
schedule_interval=None,
catchup=False,
)
def pushXcomData(**context):
date = context['execution_date']
newdate = local_tz.convert(date)
print(newdate.strftime("%Y-%m-%d"))
print(newdate)
return newdate
def pullXcomData(**context):
xcomdata = context['task_instance'].xcom_pull(task_ids='push_xcom')
print(xcomdata)
t1 = PythonOperator(
task_id='push_xcom',
python_callable=pushXcomData,
provide_context=True,
dag=dag
)
t2 = PythonOperator(
task_id='pull_xcom',
python_callable=pullXcomData,
provide_context=True,
dag=dag
)
t3 = DummyOperator(
task_id='complete',
trigger_rule='one_success',
dag=dag
)
t1 >> t2 >> t3
|
import pandas as pd
import os
DEEPCOM = "/home/qiuyuanchen/Onedrive/EMSE-DeepCom/my_test"
CODE2SEQ = "/home/qiuyuanchen/Onedrive/code2seq-master/my_test"
NNGEN = "/home/qiuyuanchen/Onedrive/nngen/my_test"
MERGE = "/home/qiuyuanchen/Onedrive/my_parser/src/main/resources/merge_result"
deepcom_res = os.path.join(DEEPCOM, "results.txt")
code2seq_res = os.path.join(CODE2SEQ, "results.txt")
nngen_res = os.path.join(NNGEN, "results.txt")
merge_res = os.path.join(MERGE, "merge_results.txt")
def read_results(data, input_nlgeval, method_name):
with open(input_nlgeval) as f:
data["Method"].append(method_name)
for line in f.readlines():
if line.startswith("Bleu_1"):
res = line.split(": ")[1].strip()
data['BLEU-1'].append(res)
if line.startswith("Bleu_2"):
res = line.split(": ")[1].strip()
data['BLEU-2'].append(res)
if line.startswith("Bleu_3"):
res = line.split(": ")[1].strip()
data['BLEU-3'].append(res)
if line.startswith("Bleu_4"):
res = line.split(": ")[1].strip()
data['BLEU-4'].append(res)
if line.startswith("ROUGE_L"):
res = line.split(": ")[1].strip()
data['ROUGE-L'].append(res)
return data
def main():
data = {
"Method": [],
"ROUGE-L": [],
"BLEU-1": [],
"BLEU-2": [],
"BLEU-3": [],
"BLEU-4": [],
}
data = read_results(data, deepcom_res, "DeepCom")
data = read_results(data, code2seq_res, "Code2Seq")
data = read_results(data, nngen_res, "NNgen")
data = read_results(data, merge_res, "Merge")
data = pd.DataFrame(data)
print(data)
excel_path = os.path.join(MERGE, "RQ3-results.xlsx")
data.to_excel(excel_path)
if __name__ == "__main__":
main()
|
# Generated by Django 3.0.7 on 2020-06-19 10:02
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("foodcartapp", "0020_auto_20200619_0959"),
]
operations = [
migrations.RenameField(
model_name="order",
old_name="order_type",
new_name="payment_type",
),
]
|
from flask import Flask
from flask_restful import Api
from flaskext.mysql import MySQL
class ConfigDatabaseA:
_mysql = None
@staticmethod
def getMysql():
return ConfigDatabaseA._mysql
def __init__(self, app):
""" Virtually private constructor. """
if ConfigDatabaseA._mysql != None:
raise Exception("This class is already configured!")
else:
ConfigDatabaseA._mysql = MySQL()
# MySQL configurations
app.config['MYSQL_DATABASE_USER'] = 'userA'
app.config['MYSQL_DATABASE_PASSWORD'] = 'userA'
app.config['MYSQL_DATABASE_DB'] = 'base_A'
app.config['MYSQL_DATABASE_HOST'] = 'localhost'
ConfigDatabaseA._mysql.init_app(app)
|
from django.test import TestCase,Client
from .models import Airport ,Flight
# Create your tests here.
class ModelsTestCase(TestCase):
def setUp(self):
a1 = Airport.objects.create(code="AAA",city="aaa")
a2=Airport.objects.create(code="BBB",city="bbb")
Flight.objects.create(origin=a1,destination=a2,duration=100)
Flight.objects.create(origin=a1,destination=a2,duration=200)
Flight.objects.create(origin=a1,destination=a1,duration=-300)
def test_departure_count(self):
a=Airport.objects.get(code="AAA")
self.assertEqual(a.departures.count(),3)
def test_index(self):
c=Client()
response=c.get("/")
self.assertEqual(response.status_code,200)
self.assertEqual(response.context["flights"].count(),3)
|
#!/bin/python3
"""Script to retrieve image files from urls listed in a file."""
from urllib import request, error, parse
import os
# Base directory for the downloaded images
IMAGE_DIR = '.'
# Suffix for the downloads subdirectory
DIR_SUFFIX = '_downloads'
# File extensions that are accepted as images
IMAGE_EXTS = ['.jpg', '.png']
# Timeout for network requests
REQ_TIMEOUT = 5
def download(url: str, dest: str):
""" Downloads the resource at url to dest.
:param url: url to the resource
:param dest: destination of the download
:raise URLError if url is not actually a url or the resource is unreachable
:raise OSError if dest already exists
:raise OSError for lack of permissions to create dest
"""
with request.urlopen(url, timeout=REQ_TIMEOUT) as r, open(dest, 'xb') as f:
f.write(r.read())
def is_image(fname: str) -> bool:
"""Determines if fname refers to an image file.
:param fname: string to be considered as name of image file
"""
_, ext = os.path.splitext(fname)
if ext in IMAGE_EXTS:
return True
return False
def lines_with_lineno(file: str) -> (str, int):
"""Generates pairs of line and line number from file.
:param file: text file to be read line by line
:raise UnicodeDecodeError if file is not unicode compatible
:raise PermissionError for lack of read permissions on file
:raise OSError if file does not exist
"""
with open(file, 'r') as urls:
lineno = 0
for line in urls.readlines():
lineno += 1
yield (line.strip(), lineno)
def setup_path(dirname: str) -> str:
"""Create the destination directory inside of IMAGE_DIR.
:param dirname: the directory to be created
:raise FileNotFoundError if IMAGE_DIR does not exist
:raise PermissionError for lack of write permissions in IMAGE_DIR
"""
if len(IMAGE_DIR) > 0:
path = IMAGE_DIR + os.path.sep + dirname
else:
path = dirname
os.mkdir(path)
return path
def cleanup(path: str):
"""Deletes path if it exists.
:param path: path to folder that will be deleted
:raise PermissionError
"""
if os.path.exists(path):
os.rmdir(path)
def download_images(url_file: str, dest: str) -> int:
"""Downloads images referenced in url_file to dest.
Does not handle Errors regarding url_file.
:param url_file: path to file containing the urls
:param dest: path to destination directory
:return: number of downloaded images
:raise UnicodeDecodeError if url_file is not unicode compatible
:raise PermissionError for lack of read permissions on url_file
:raise OSError if url_file does not exist
"""
n = 0
for (url, lineno) in lines_with_lineno(url_file):
url_path = parse.urlparse(url)[2]
fname = os.path.split(url_path)[1]
if is_image(fname):
fpath = dest + os.path.sep + fname
try:
download(url, fpath)
n += 1
except error.URLError as e:
print('Could not retrieve url from line {}: {}'.format(lineno, e.reason))
except ValueError as e:
print('Could not retrieve url from line {}: {}'.format(lineno, e))
except PermissionError as e:
print('Could not save {} from line {}: {}'.format(url, lineno, e.strerror))
return n
except OSError as e:
print('Could not save {} from line {}: {}'.format(url, lineno, e.strerror))
return n
def main(url_file: str):
"""Downloads all images referenced as urls in url_file.
:param url_file: path to file containing the urls
"""
fname, _ = os.path.splitext(url_file)
dirname = fname + DIR_SUFFIX
n = 0
path = None
try:
path = setup_path(dirname)
except FileNotFoundError:
print('Image base directory {} does not exist. You need to create it first.'.format(IMAGE_DIR))
except FileExistsError as e:
print('Directory {} already exists. Maybe you ran this script on {} before.'.format(e.filename, url_file))
except PermissionError as e:
print('You don\'t have the right permissions to create {}.'.format(e.filename))
if path is None:
return
try:
n = download_images(url_file, path)
print('Downloaded {} images to {}'.format(n, path))
except UnicodeDecodeError as e:
print('Could not read from {}: {}'.format(url_file, e.reason))
except PermissionError as e:
print('Could not read from {}: {}'.format(url_file, e.strerror))
except OSError as e:
print('Could not read from {}: {}'.format(url_file, e.strerror))
if n == 0:
cleanup(path)
if __name__ == '__main__':
from sys import argv
if len(argv) == 2:
main(argv[1])
else:
print('Usage: image_retrieve.py <url_file>')
|
import os
import unittest
from abc import abstractmethod
from pathlib import Path
from typing import TypeVar, Generic, Optional
from patchworkdocker.importers import GitImporter, Importer, FileSystemImporter
from patchworkdocker.tests._common import TestWithTempFiles, EXAMPLE_GIT_REPOSITORY
ImporterType = TypeVar("ImporterType", bound=Importer)
class _TestImporter(Generic[ImporterType], TestWithTempFiles):
"""
Tests for `ImporterType`.
"""
@property
@abstractmethod
def importer(self) -> ImporterType:
"""
Gets an instance of the importer under test.
:return: the importer to test
"""
def setUp(self):
super().setUp()
self._importer: Optional[Importer] = None
self._paths = []
def load(self, origin: str) -> str:
"""
Uses the importer under test and calls loads with the given origin. The returned path is removed on tear down.
:param origin: the origin to load from
:return: the path of where the import has been loaded to
"""
path = self.importer.load(origin)
self.temp_manager._temp_directories.add(path)
return path
class TestGitImporter(_TestImporter[GitImporter]):
"""
Tests for `GitImporter`.
"""
@property
def importer(self) -> ImporterType:
if self._importer is None:
self._importer = GitImporter()
return self._importer
def test_load(self):
path = self.load(EXAMPLE_GIT_REPOSITORY)
self.assertTrue(os.path.exists(os.path.join(path, "a/d.txt")))
def test_load_commit(self):
path = self.load(f"{EXAMPLE_GIT_REPOSITORY}#e22fcb940d5356f8dc57fa99d7a6cb4ecdc04b66")
self.assertTrue(os.path.exists(os.path.join(path, "b.txt")))
def test_load_branch(self):
path = self.load(f"{EXAMPLE_GIT_REPOSITORY}#develop")
self.assertTrue(os.path.exists(os.path.join(path, "develop.txt")))
def test_load_tag(self):
path = self.load(f"{EXAMPLE_GIT_REPOSITORY}#1.0")
self.assertTrue(os.path.exists(os.path.join(path, "b.txt")))
class TestFileSystemImporter(_TestImporter[GitImporter]):
"""
Tests for `FileSystemImporter`.
"""
_EXAMPLE_FILE = "test.txt"
@property
def importer(self) -> ImporterType:
if self._importer is None:
self._importer = FileSystemImporter()
return self._importer
def setUp(self):
super().setUp()
self.test_directory = self.temp_manager.create_temp_directory()
Path(os.path.join(self.test_directory, TestFileSystemImporter._EXAMPLE_FILE)).touch()
def test_load(self):
path = self.load(self.test_directory)
self.assertTrue(os.path.exists(os.path.join(path, TestFileSystemImporter._EXAMPLE_FILE)))
if __name__ == "__main__":
unittest.main()
|
# -*- coding: utf-8 -*-
# _author_ = "qqx"
# date : 4/7/2020
from selenium import webdriver
from selenium.webdriver.common.action_chains import ActionChains #鼠标悬停
from PIL import Image,ImageEnhance
import pytesseract
from time import sleep
option = webdriver.ChromeOptions()
option.add_argument('--proxy-server=socks5://192.168.1.42:55551') # 浏览器代理配置
driver = webdriver.Chrome(chrome_options=option)
driver.maximize_window() # 窗口最大化
###################### 入口1:详情页Email us #############################
def TICKET1():
driver.get('https://www.jjshouse.com/-g105575')
driver.find_element_by_id('login_register_li').click()
sleep(2)
driver.find_element_by_xpath('//*[@id="_email"]').click() # 登录
driver.find_element_by_xpath('//*[@id="_email"]').send_keys('qwe1@tetx.com')
driver.find_element_by_id('_password').send_keys('123456')
driver.find_element_by_class_name('sign-btn').click()
print('登录成功')
sleep(5)
# driver.find_element_by_xpath('//*[@id="signOrLogin"]/div/h6/em').click() #sign out
# driver.find_element_by_xpath('//*[@id="signOrLogin"]/div/div/a[6]').click()
# print('退出登录')
mouse = driver.find_element_by_link_text('Ask a question') #鼠标悬停才显示Email us
ActionChains(driver).move_to_element(mouse).perform()
sleep(2)
driver.find_element_by_link_text('Email us').click()
sleep(2)
print('ticket界面')
handles = driver.window_handles # 获取当前浏览器全部窗口句柄
driver.switch_to.window(handles[1]) # 切换到新标签页,n=1定位到当前浏览器的第二个标签页
driver.find_element_by_xpath('//*[@id="form_question"]/ul/li[3]/div/label[3]').click() # topic
driver.find_element_by_id('comment_title').click() # Question Title
driver.find_element_by_id('comment_title').send_keys('test')
driver.find_element_by_id('comment_content').click() # Question
driver.find_element_by_id('comment_content').send_keys('testTEST123@$%!')
target = driver.find_element_by_class_name("trust")
driver.execute_script("arguments[0].scrollIntoView();", target) # 页面拖动到底部
sleep(2)
driver.save_screenshot('picture.png') # 截取整个页面
code1 = driver.find_element_by_id('verifyImg')
# print(code1.location)
sleep(2)
# left = code1.location['x'] # 自动定位
# top = code1.location['y']
# right = code1.size['width'] + left
# height = code1.size['height'] + top
im = Image.open('picture.png')
box = (570,340,670,380) # 手动定位验证码位置
im.crop(box).save('picture1.png') # 保存获取的验证码
# img = im.crop((left,top, right, height))
# img.save('picture1.png') # 截取到的验证码图片
sleep(2)
class demo():
def __init__(self, path):
self.image = Image.open(path)
self.image = self.image.convert('L')
def test(self):
threshold = 50
table = []
for i in range(256):
if i < threshold:
table.append(0)
else:
table.append(1)
self.image = self.image.point(table, '1')
self.img_array = self.image.load()
width = self.image.size[0]
height = self.image.size[1]
for i in range(0, 1000):
for x in range(1, width - 1):
for y in range(1, height - 1):
count = 0
if self.img_array[x, y] == self.img_array[x - 1, y + 1]:
count += 1
if self.img_array[x, y] == self.img_array[x, y + 1]:
count += 1
if self.img_array[x, y] == self.img_array[x + 1, y + 1]:
count += 1
if self.img_array[x, y] == self.img_array[x - 1, y]:
count += 1
if self.img_array[x, y] == self.img_array[x + 1, y]:
count += 1
if self.img_array[x, y] == self.img_array[x - 1, y - 1]:
count += 1
if self.img_array[x, y] == self.img_array[x, y - 1]:
count += 1
if self.img_array[x, y] == self.img_array[x + 1, y - 1]:
count += 1
if count <= 2 and count > 1:
self.img_array[x, y] = 1
self.image = self.image.convert('L')
self.image.show()
self.image.save('picture2.png')
final = pytesseract.image_to_string(self.image)
return final
demo('picture1.png').test()
verifycode = demo('picture1.png').test()
print(verifycode)
sleep(2)
driver.find_element_by_xpath('//*[@id="genCode"]').click()
driver.find_element_by_xpath('//*[@id="genCode"]').send_keys(verifycode)
driver.find_element_by_id('sbmt_comment').click()
###################### 入口2:订单待付款product support #############################
def TICKET2():
driver.get('https://www.jjshouse.fr/account/order.php?order_sn=6401552446')
sleep(2)
driver.find_element_by_xpath('//*[@id="_email"]').click() #登录
driver.find_element_by_xpath('//*[@id="_email"]').send_keys('qwe1@tetx.com')
driver.find_element_by_id('_password').send_keys('123456')
driver.find_element_by_class_name('sign-btn').click()
sleep(2)
print('待付款订单6401552446')
driver.find_element_by_xpath('//*[@id="items_label_0"]/div[1]/div/div/a').click()
print('跳转product support')
sleep(2)
driver.find_element_by_xpath('//*[@id="my_form"]/div/div[2]/label/img').click()
driver.find_element_by_id('_message').click() # detail message
driver.find_element_by_id('_message').send_keys('ABCabc123!@#$')
driver.find_element_by_id('_phone').click() # phone number
driver.find_element_by_id('_phone').send_keys('12345678')
driver.find_element_by_id('sbmit_frm_btn').click() # submit
sleep(2)
driver.find_element_by_id('submit1').click() # comfirm
sleep(2)
ordertext = driver.find_element_by_xpath('/html/body/div[7]/div[2]/div[2]/ul/li[2]').text # 获取文本
ticketid = ''.join([x for x in ordertext if x.isdigit()]) # 文本中提取数字
print('ticket id:',ticketid)
sleep(2)
driver.find_element_by_id('signOrLogin').click()
driver.find_element_by_xpath('//*[@id="signOrLogin"]/div/div/a[4]').click() # my ticket
print('跳转my ticket')
sleep(2)
ticketid1 = driver.find_element_by_xpath('/html/body/div[2]/div[6]/div[2]/table/tbody/tr[2]/td[2]/a').text
if ticketid == ticketid1:
print('提交成功')
else:
print('提交失败')
if __name__=="__main__":
TICKET2()
|
from django.utils import timezone
def calculate_age(birth_date):
today = timezone.now().date()
return today.year - birth_date.year - ((today.month, today.day) < (birth_date.month, birth_date.day))
|
# KVM-based Discoverable Cloudlet (KD-Cloudlet)
# Copyright (c) 2015 Carnegie Mellon University.
# All Rights Reserved.
#
# THIS SOFTWARE IS PROVIDED "AS IS," WITH NO WARRANTIES WHATSOEVER. CARNEGIE MELLON UNIVERSITY EXPRESSLY DISCLAIMS TO THE FULLEST EXTENT PERMITTEDBY LAW ALL EXPRESS, IMPLIED, AND STATUTORY WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT OF PROPRIETARY RIGHTS.
#
# Released under a modified BSD license, please see license.txt for full terms.
# DM-0002138
#
# KD-Cloudlet includes and/or makes use of the following Third-Party Software subject to their own licenses:
# MiniMongo
# Copyright (c) 2010-2014, Steve Lacy
# All rights reserved. Released under BSD license.
# https://github.com/MiniMongo/minimongo/blob/master/LICENSE
#
# Bootstrap
# Copyright (c) 2011-2015 Twitter, Inc.
# Released under the MIT License
# https://github.com/twbs/bootstrap/blob/master/LICENSE
#
# jQuery JavaScript Library v1.11.0
# http://jquery.com/
# Includes Sizzle.js
# http://sizzlejs.com/
# Copyright 2005, 2014 jQuery Foundation, Inc. and other contributors
# Released under the MIT license
# http://jquery.org/license
#!/usr/bin/env python
#
# Used to copy files.
import shutil
# Used to check file existence.
import os.path
# To create unique IDs from hashes.
import hashlib
# To show progerss visually, for hash calculation.
from pycloud.pycloud.utils import progressbar
################################################################################################################
# Exception type used in our system.
################################################################################################################
class DiskImageException(Exception):
def __init__(self, message):
super(DiskImageException, self).__init__(message)
self.message = message
################################################################################################################
# Simple structure to represent a disk image.
################################################################################################################
class DiskImage(object):
# Describes the image types that the class knows how to handle.
# This is not the best way to handle this, as new types will require changes to this class (ie, adding them to this list).
TYPE_RAW = 'raw'
TYPE_QCOW2 = 'qcow2'
SUPPORTED_IMAGE_TYPES = ( TYPE_RAW, TYPE_QCOW2 )
################################################################################################################
# Sets up internal values of the disk image.
# This will NOT create an actual disk image file, it will only create the DiskImage object.
################################################################################################################
def __init__(self, diskImageFilepath):
# Try to automatically guess the image type form extension.
diskImageType = DiskImage.getDiskImageType(diskImageFilepath)
if(diskImageType == None):
# If we couldn't get it, throw an error.
raise DiskImageException("Filepath %s contains no extension, so image type can not be obtained." % diskImageFilepath)
# Check that the image type is valid.
if(not DiskImage.isValidDiskImageType(diskImageType)):
raise DiskImageException("Disk image type " + diskImageType + " not supported.")
# Set the basic characteristics of a disk image.
self.filepath = diskImageFilepath
self.type = diskImageType
################################################################################################################
# Returns the type of a given file (basically its extension), or None if it had no extension.
################################################################################################################
@staticmethod
def getDiskImageType(diskImageFilepath):
# This splits the extension and puts it in the second location of the array.
diskImageExtension = os.path.splitext(diskImageFilepath)[1]
# Check if we got an extension.
if(diskImageExtension != ''):
# Remove the '.' before returning the type.
diskImageType = diskImageExtension[1:]
# Check if this extension is a valid type.
if(DiskImage.isValidDiskImageType(diskImageType)):
return diskImageType
else:
return None
else:
return None
################################################################################################################
# Checks if a given filename is a valid disk image.
################################################################################################################
@staticmethod
def isValidDiskImageFilename(diskImageFilepath):
# Check that we support this image type.
diskImageType = DiskImage.getDiskImageType(diskImageFilepath)
isValid = DiskImage.isValidDiskImageType(diskImageType)
return isValid
################################################################################################################
# Checks if a given type is a valid disk type.
################################################################################################################
@staticmethod
def isValidDiskImageType(imageType):
# Check that we support this image type.
isValid = imageType in DiskImage.SUPPORTED_IMAGE_TYPES
return isValid
################################################################################################################
# Copies a disk image file into a new location, and returns a new DiskImage object pointing to that location.
################################################################################################################
def clone(self, cloneDiskImageFilepath):
# Check if the source disk image file exists.
if not os.path.exists(self.filepath):
raise DiskImageException("Source image file does not exist (%s)." % self.filepath)
# Check if the new disk image file already exists.
if os.path.exists(cloneDiskImageFilepath):
# This is an error, as we don't want to overwrite an existing disk image with a source.
raise DiskImageException("Destination image file already exists (%s). Will not overwrite existing image." % cloneDiskImageFilepath)
# Check if the filepath has a valid extension, and add if it not.
if(not DiskImage.isValidDiskImageFilename(cloneDiskImageFilepath)):
extension = '.' + self.type
cloneDiskImageFilepath = cloneDiskImageFilepath + extension
# Simply copy the file.
print "Copying disk image %s to new disk image %s..." % (self.filepath, cloneDiskImageFilepath)
shutil.copyfile(self.filepath, cloneDiskImageFilepath)
print 'Disk image copied.'
# Create the cloned object.
clonedDiskImage = DiskImage(cloneDiskImageFilepath)
return clonedDiskImage
################################################################################################################
# Calculates an unique ID for the image file.
################################################################################################################
@staticmethod
def calculateId(imageFilepath):
return DiskImage.__calculateFileHash(imageFilepath)
################################################################################################################
# Calculates the MD5 hash of a file without loading it completely into memory.
################################################################################################################
@staticmethod
def __calculateFileHash(inputFilename, blockSize=2**20):
print "Calculating hash for file %s" % inputFilename
# Create a simple progress bar to show progress of calculated hash.
numIterations = os.path.getsize(inputFilename) / blockSize
progressBar = progressbar.LoopAnimatedProgressBar(end=100, width=80, numberOfIterations=numIterations)
# Loop over the file to calculate the hash incrementally.
currIteration = 0
with open(inputFilename, 'rb') as inputFile:
hashCalculator = hashlib.md5()
while True:
# Update the progress bar, if required.
progressBar.update(currIteration)
currIteration += 1
# Get a data chunk (window) and add it to the hash calculation.
data = inputFile.read(blockSize)
if not data:
break
hashCalculator.update(data)
# Once we finished reading all the file, get the readable hash from the calculator.
print '' # Just to ensure next prints will start in a new line.
hashValue = str(hashCalculator.hexdigest())
print "Hash result for file %s: %s" % (inputFilename, hashValue)
return hashValue
################################################################################################################
# Functions to test the class.
################################################################################################################
################################################################################################################
# Get the command line arguments.
################################################################################################################
import argparse
def get_args():
parser = argparse.ArgumentParser(description='Manage a disk image.')
parser.add_argument('-diskImageFilepath', required=True, action='store', help='The disk image path.')
parser.add_argument('-sourceDiskImageFilepath', required=True, action='store', help='The source disk image path.')
parsedArguments = parser.parse_args()
return parsedArguments
################################################################################################################
# Command line test
################################################################################################################
def testDiskImage():
parsedArguments = get_args()
print 'Starting Disk Image test...'
dm = DiskImage(parsedArguments.diskImageFilepath)
dm.clone(parsedArguments.sourceDiskImageFilepath)
print 'Test finished'
|
#!/usr/bin/env python
#-*-coding:utf-8-*-
# @File:preprocess.py
# @Author: Michael.liu
# @Date:2020/5/8 17:31
# @Desc: this code is ....
import codecs
import os
def character_tagging(input_file_, output_file_):
input_data = codecs.open(input_file_, 'r', 'utf-8')
output_data = codecs.open(output_file_, 'w', 'utf-8')
for line in input_data.readlines():
# 移除字符串的头和尾的空格。
word_list = line.strip().split()
for word in word_list:
words = word.split("/")
word = words[0]
if len(word) == 1:
if word == '。' or word == '?' or word == '!':
output_data.write("\n")
else:
output_data.write(word + "/S ")
elif len(word) >= 2:
output_data.write(word[0] + "/B ")
for w in word[1: len(word) - 1]:
output_data.write(w + "/M ")
output_data.write(word[len(word) - 1] + "/E ")
output_data.write("\n")
input_data.close()
output_data.close()
# def character_tagging(input_file, output_file):
# input_data = codecs.open(input_file, 'r', 'utf-8')
# output_data = codecs.open(output_file, 'w', 'utf-8')
# for line in input_data.readlines():
# word_list = line.strip().split()
# for word in word_list:
# if len(word) == 1:
# output_data.write(word + "\tS\n")
# else:
# output_data.write(word[0] + "\tB\n")
# for w in word[1:len(word)-1]:
# output_data.write(w + "\tM\n")
# output_data.write(word[len(word)-1] + "\tE\n")
# output_data.write("\n")
# input_data.close()
# output_data.close()
if __name__=="__main__":
print("......start......")
fileName1 = '../../data/chapter4/seg/msr_training.utf8'
fileName2 = '../../data/chapter4/seg/pku_training.utf8'
character_tagging(fileName1, "msr_training_out.csv")
character_tagging(fileName2, "pku_training_out.csv")
print(".....finished.....")
|
#!/usr/bin/env python
import sys
import ebmlite
from .elf import *
from .helpers import *
def pprint(el, values=True, out=sys.stdout, depth=0):
if isinstance(el, ebmlite.core.Document):
if el.size and depth < 3:
print("[0x%X,0x%X) %s (Document, type %s)\n" % (el.offset, el.offset + el.size, str(el.name), el.type))
for i in el:
pprint(i, values, out, depth+1)
else:
if el.size and depth < 3:
print("[0x%X,0x%X) %s (ID: 0x%0X)" % (el.offset, el.offset + el.size, str(el.name), el.id))
if isinstance(el, ebmlite.core.MasterElement):
#print(": (master) %d subelements\n" % len(el.value))
for i in el:
pprint(i, values, out, depth+1)
else:
#print(": (%s)" % el.dtype.__name__)
#if values and not isinstance(el, ebmlite.core.BinaryElement):
# print(" %r\n" % (el.value))
#else:
# print("\n")
pass
def analyze(fp):
if not fp.read(4) == b'\x1A\x45\xDF\xA3':
return
fp.seek(0)
schema = ebmlite.loadSchema('matroska.xml')
doc = schema.load(fp)
pprint(doc)
if __name__ == '__main__':
import sys
with open(sys.argv[1], 'rb') as fp:
analyze(fp)
|
from classes.model.package import Package
from classes.util.tree import Tree
from pathlib import Path
from typing import Any, Dict, IO, List, Optional, Union
import yaml
class YamlParser:
def __init__(self, path: str):
self.path: str = path
self.data: Optional[Dict[str, Any]] = None
def __valid_path(self):
file_path = Path(self.path)
if not file_path.exists():
raise Exception("Unable to find file \"{}\"".format(self.path))
if not file_path.is_file():
raise Exception("Path \"{}\" is not file".format(file_path.resolve()))
def get_data(self) -> Dict[str, Any]:
if self.data is None:
self.__valid_path()
file_path: Path = Path(self.path)
stream: IO = file_path.open("r")
self.data = yaml.safe_load(stream)
stream.close()
return self.data
def get_directories(self) -> List[str]:
data: Dict[str, Any] = self.get_data()
return data["directories"]
def get_packages(self) -> List[Package]:
packages: List[Package] = []
data: Dict[str, Any] = self.get_data()
for name, values in data["packages"].items():
packages.append(Package(name, values))
return packages
def get_value(self, keys: Union[str, List[str]]):
return Tree.get_value(self.get_data(), keys)
def get_repositories(self) -> List[str]:
repositories: List[str] = []
packages: List[Package] = self.get_packages()
for package in packages:
repositories.append(package.repository)
return repositories
def get_token(self) -> Optional[str]:
return self.get_value(["api", "token"])
def get_login(self) -> Optional[str]:
return self.get_value(["api", "login"])
def get_password(self) -> Optional[str]:
return self.get_value(["api", "password"])
def get_url(self) -> Optional[str]:
return self.get_value(["api", "url"])
def save(self, data):
self.__valid_path()
file_path: Path = Path(self.path)
stream: IO = file_path.open("w")
yaml.dump(data, stream)
stream.close()
|
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import inspect
import time
__CACHE = {}
def cached(ttl=60000):
def _hash_function_signature(func):
hash_object = hashlib.md5(str(inspect.getargspec(func)).encode("utf8") + str(func).encode("utf8"))
return hash_object.hexdigest()
def _now():
return int(round(time.time() * 1000))
def _expired(t):
if t is None or _now() - t > ttl:
return True
else:
return False
def _cached_decorator(func):
def func_wrapper(self, computeOptions, **args):
hash = _hash_function_signature(func)
force = computeOptions.get_boolean_arg("nocached", default=False)
if force or hash not in __CACHE or (hash in __CACHE and _expired(__CACHE[hash]["time"])):
result = func(self, computeOptions, **args)
__CACHE[hash] = {
"time": _now(),
"result": result
}
return __CACHE[hash]["result"]
return func_wrapper
return _cached_decorator
|
import cv2 as cv
img = cv.imread("irene.jpg", cv.IMREAD_UNCHANGED) # opencv에서는 BGR 순서로 색을 numpy에 저장
# mac에서는 keyevent로 움직이는게 불가능한가???
# ten keyless도 고려하면 asdw로 ...
# asdw로 구현하자...
ESC = 27
arrowL = ord('a')
arrowR = ord('d')
arrowU = ord('w')
arrowD = ord('s')
delta = 10
row_num, col_num, _ = img.shape
x_pos = int(1680 / 2 - col_num / 2)
y_pos = int(1050 / 2 - row_num / 2)
cv.namedWindow('irene', cv.WINDOW_NORMAL)
cv.imshow('irene', img)
cv.resizeWindow('irene', col_num, row_num)
cv.moveWindow('irene', x_pos, y_pos)
while True:
key = cv.waitKey()
cv.moveWindow('irene', x_pos, y_pos)
if key == arrowL:
x_pos -= delta
elif key == arrowR:
x_pos += delta
elif key == arrowU:
y_pos -= delta
elif key == arrowD:
y_pos += delta
elif key == ESC:
break
cv.destroyAllWindows()
|
c = int(input("How much celsuis are you going to convert?"))
ce = c * 1.5
print (ce + 32)
|
import os
from pathlib import Path
import random
import pickle
import os
import json
from .run_cross_val import un_cross_val_training
from .train_fetal import main_train
### Training using prediction has several stages:
# 1. Cross-training using some non-prediction-using configuration
# 2. Run prediction of all the cross-trained experiments to get unbiased predictions of all scans
# 3. move all predictions to the data folders
# 4. Run a prediction-using training configuration
# 5. Run the new model prediction
# 6. If want to apply consecutively - save step 5's results in data folder, recreate data file and run pred
### Base model cross val train ###
# create dummy experiment with wanted config
|
# -*- coding: utf-8 -*-
# Generated by Django 1.10.1 on 2016-11-01 17:42
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('jars', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sixjars',
name='acc_bal',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='sixjars',
name='edu',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='sixjars',
name='ffa',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='sixjars',
name='giv',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='sixjars',
name='lts',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='sixjars',
name='nec',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='sixjars',
name='ply',
field=models.FloatField(default=0),
),
migrations.AlterField(
model_name='sixjars',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
migrations.DeleteModel(
name='User',
),
]
|
"""empty message
Revision ID: 88bbc83d3dc4
Revises: aab74117e579
Create Date: 2021-02-27 22:40:53.615375
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '88bbc83d3dc4'
down_revision = 'aab74117e579'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('user_history', 'timestamp',
existing_type=postgresql.TIMESTAMP(),
nullable=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('user_history', 'timestamp',
existing_type=postgresql.TIMESTAMP(),
nullable=True)
# ### end Alembic commands ###
|
"""Update Packages Classes."""
import logging
from .listapplicabledevices import ListApplicableDevices
from .listapplicabledevices import ApplicableDevices
from .upgradepackages import UpgradePackages
from .upgradepackages import UpgradePackage
from .upgradepackage import Upgrades
logging.debug("In the update_packages __init__.py file.")
__all__ = [
"ListApplicableDevices",
"ApplicableDevices",
"UpgradePackages",
"UpgradePackage",
"Upgrades",
]
|
import numpy as np
import matplotlib.pyplot as plt
import configparser
#def phase_filer():
config = configparser.ConfigParser()
config.read('snip-config.ini')
next_config = configparser.ConfigParser()
next_config.read('NeXtRAD.ini')
data_file = config.get('Config','data_file')
update_rate = config.get('Config','update_rate')
outfile = config.get('Config','outfile')
num_pulses = next_config.get('PulseParameters','NUM_PRIS')
num_range_bins = next_config.get('PulseParameters','SAMPLES_PER_PRI')
pri = float(next_config.get('PulseParameters','PULSES').split(',')[1])
print(pri)
# for i in range()
# data = np.fromfile(data_file, dtype=np.int16, count=2*num_pulses*num_range_bins)
# IQ = data[0::2] + 1j*data[1::2] #Create complex IQ samples
# IQ_ref = ref_data[0::2] + 1j*ref_data[1::2] #Complex reference and trim away noise samples
# ref = np.pad(IQ_ref,(0, num_range_bins-len(IQ_ref)),'constant')
# pulse_matrix = np.reshape(IQ,(num_pulses,num_range_bins)) # Form a num_pulses x num_range_bin matrix
# temp_matrix = np.zeros((num_pulses,2*num_range_bins-1),dtype=complex)
# for pulse in range(num_pulses):
# temp_matrix[pulse,:] = np.correlate(pulse_matrix[pulse,:], ref, mode='full')
# pulse_matrix[pulse,:] = temp_matrix[pulse,num_range_bins-1::]
# if __name__ == '__main__':
# pri = phase_filer
# print(pri)
|
# -*- coding: utf-8 -*-
import math
import re
import os
import pdb
file_dir = os.path.dirname(os.path.realpath(__file__))
def tf(text):
dict = {}
list_word = text.split()
for word in list_word:
if word in dict:
dict[word] += 1.0
else:
dict[word] = 1.0
try:
maxtf = max(dict.values())
except:
maxtf = 1
for key in dict.keys():
dict[key] = dict[key]/maxtf
return dict
def tfidf(text):
dict = {}
with open(file_dir + '/idf.txt', 'r', encoding='utf-8') as openfileobject:
for line in openfileobject:
l = line.split('\t')
dict[l[0]] = float(l[1])
t = tf(text)
tf_idf = {}
for word in t:
try:
d = dict[word]
except:
d = 1
D = 340.0
tf_idf[word] = t[word] * math.log(D / d)
return tf_idf
def top(n,dict):
list_key = list(dict.keys())
list_value = []
dict1 = {}
for key in list_key:
list_value.append(dict[key])
for i in range(int(n)):
if len(list_value) == 0:
break
a = max(list_value)
index = list_value.index(a)
list_value.pop(index)
k = list_key.pop(index)
if re.match(u'[0-9A-Za-zăâáắấảẳẩàằầạặậãẵẫđéèẹẻẽíìịỉĩóòọỏõớờơợởỡôồốổỗộụủũùúưứừữửựêếềệểễýĂÂÁẮẤẢẲẨÀẰẦẠẶẬÃẴẪĐÉÈẸẺẼÍÌỊỈĨÓÒỌỎÕỚỜƠỢỞỠÔỒỐỔỖỘỤỦŨÙÚƯỨỪỮỬỰÊẾỀỆỂỄÝ_]+',k):
if re.match(u'^[0-9]+$',k):
i = i - 1
continue
dict1[k] = a
return dict1
|
import cv2 as cv
#读取图片并显示
src = cv.imread('C:/Users/zx/Desktop/jpg1.jpg')#读图片
cv.namedWindow('input image', cv.WINDOW_AUTOSIZE)
cv.imshow("input image", src)#通过opcv的GUI显示图像
cv.waitKey(0)#等待一个按键输入后退出
cv.destroyAllWindows()#关闭所有窗口
|
#!/usr/bin/env python
# encoding: utf-8
# @author: Zhipeng Ye
# @contact: Zhipeng.ye19@xjtlu.edu.cn
# @file: full_chinese.py
# @time: 2020-01-17 17:19
# @desc:
import os
import re
if __name__ == "__main__":
files_name = sorted(os.listdir('/Data_SSD/zhipengye/zhipengye/LM/processed_data/small_dictory_filtered'))
for file_name in files_name:
content_list = []
with open('/Data_SSD/zhipengye/zhipengye/LM/processed_data/small_dictory_filtered/'+file_name, encoding='utf-8') as file:
for line in file:
segments = line.split('\t')
words = segments[0].replace(' ','')
if re.match('^[\u4e00-\u9fa5]{1,}$',words):
content_list.append(line.strip())
with open('/Data_SSD/zhipengye/zhipengye/LM/processed_data/small_full_chinese_filtered/'+file_name, 'a',encoding='utf-8') as file:
file.write('\n'.join(content_list))
print('Program is ok!')
|
amt=int(input("enter amount"))
if(amt>=100 and amt<=1000):
disc=amt
elif(amt>=1001 and amt<=2000):
disc=0.1*amt
elif(amt>=2001 and amt<=3000):
disc=0.2*amt
elif(amt>=3001):
disc=0.25*amt
else:
print("invalid amount")
print("amount=",amt)
print("disount=",disc)
print("net pay=",amt+disc)
|
'''
Binary Search
'''
class BinarySearch:
def __init__(self, arr, val):
self.arr = arr
self.val = val
def bubsortArray(self):
for i in range(len(self.arr)):
for j in range(len(self.arr)-1):
if self.arr[i] < self.arr[j]:
self.arr[i], self.arr[j] = self.arr[j], self.arr[i]
return self.arr
def selectsortArray(self):
for i in range(len(self.arr)):
for j in range(i+1, len(self.arr)):
if self.arr[i] > self.arr[j]:
self.arr[j], self.arr[i] = self.arr[i], self.arr[j]
return self.arr
def insertsortArray(self):
for i in range(len(self.arr)):
for j in range(i ,0 , -1):
if self.arr[j] < self.arr[j-1]:
self.arr[j], self.arr[j-1] = self.arr[j-1], self.arr[j]
return self.arr
def binarySearch(self):
# self.arr = self.bubsortArray()
# self.arr = self.selectsortArray()
self.arr = self.insertsortArray()
beg = 0
end = len(self.arr)-1
mid = 0
while beg < end:
mid = (beg + end)//2
if self.arr[mid] == self.val:
return mid+1
elif self.arr[mid] < self.val:
beg = mid+1
else:
end = mid-1
return -1
if __name__ == "__main__":
obj = BinarySearch([4,1,7,3,8,2,9,5,6], 5)
print("Element not present." if obj.binarySearch == -1 else f"Element found at position: {obj.binarySearch()}.")
|
filecontent = open("data.in")
text = filecontent.readline()
for i in text:
if i == '(':
print("Sumando")
else:
print("Restando")
|
from django.db import models
class Foo(models.Model):
name = models.CharField(max_length=255)
content = models.TextField()
boolean = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True)
birthday = models.DateField(auto_now_add=True)
decimal = models.DecimalField(max_digits=4, decimal_places=2)
|
#!/usr/bin/env python3
import sys
import re
def get_region(info):
region_list = re.findall(";Func.refGene=(.+?);", info)
if region_list[0] == "splicing":
return region_list[0]
elif region_list[0] == "exonic":
region = re.findall(';ExonicFunc.refGene=(.+?);', info)
return region[0]
def get_af(info, population_in):
if population_in == "EAS":
exac_eas_list = re.findall(';ExAC_EAS=(.+?);', info)
if exac_eas_list[0] != ".":
exac_eas = exac_eas_list[0]
else:
exac_eas = 0
eas_1000g_list = re.findall(';1000g2015aug_eas=(.+?);', info)
if eas_1000g_list[0] != ".":
eas_1000g = eas_1000g_list[0]
else:
eas_1000g = 0
return exac_eas, eas_1000g
elif population_in == "EUR":
exac_af_list_1 = re.findall(';ExAC_NFE=(.+?);', info)
if exac_af_list_1[0] != ".":
exac_nfe = exac_af_list_1[0]
else:
exac_nfe = 0
exac_af_list_2 = re.findall(';ExAC_FIN=(.+?);', info)
if exac_af_list_2[0] != ".":
exac_fin = exac_af_list_1[0]
else:
exac_fin = 0
exac_eur = float(exac_nfe) + float(exac_fin)
eur_1000g_list = re.findall(';1000g2015aug_eur=(.+?);', info)
if eur_1000g_list[0] != ".":
eur_1000g = eur_1000g_list[0]
else:
eur_1000g = 0
return exac_eur, eur_1000g
elif population_in == "AMR":
exac_amr_list = re.findall(';ExAC_AMR=(.+?);', info)
if exac_amr_list[0] != ".":
exac_amr = exac_amr_list[0]
else:
exac_amr = 0
amr_1000g_list = re.findall(';1000g2015aug_amr=(.+?);', info)
if amr_1000g_list[0] != ".":
amr_1000g = amr_1000g_list[0]
else:
amr_1000g = 0
return exac_amr, amr_1000g
elif population_in == "AFR":
exac_afr_list = re.findall(';ExAC_AFR=(.+?);', info)
if exac_afr_list[0] != ".":
exac_afr = exac_afr_list[0]
else:
exac_afr = 0
afr_1000g_list = re.findall(';1000g2015aug_afr=(.+?);', info)
if afr_1000g_list[0] != ".":
afr_1000g = afr_1000g_list[0]
else:
afr_1000g = 0
return exac_afr, afr_1000g
elif population_in == "SAS":
exac_sas_list = re.findall(';ExAC_SAS=(.+?);', info)
if exac_sas_list[0] != ".":
exac_sas = exac_sas_list[0]
else:
exac_sas = 0
sas_1000g_list = re.findall(';1000g2015aug_sas=(.+?);', info)
if sas_1000g_list[0] != ".":
sas_1000g = sas_1000g_list[0]
else:
sas_1000g = 0
return exac_sas, sas_1000g
elif population_in == "ALL":
exac_all_list = re.findall(';ExAC_ALL=(.+?);', info)
if exac_all_list[0] != ".":
exac_all = exac_all_list[0]
else:
exac_all = 0
all_1000g_list = re.findall(';1000g2015aug_all=(.+?);', info)
if all_1000g_list[0] != ".":
all_1000g = all_1000g_list[0]
else:
all_1000g = 0
return exac_all, all_1000g
def get_prediction(info):
gerp_list = re.findall(';GERP\+\+_RS=(.+?);', info)
if gerp_list[0] != ".":
gerp = gerp_list[0]
else:
gerp = 0
phylop_list = re.findall("phyloP20way_mammalian=(.+?);", info)
if phylop_list[0] != ".":
phylop = phylop_list[0]
else:
phylop = 0
vest_list = re.findall(";VEST3_score=(.+?);", info)
if vest_list[0] != ".":
vest = vest_list[0]
else:
vest = 0
cadd_list = re.findall(';CADD_phred=([-+]?\d*\.\d+|\d+);', info)
if cadd_list != []:
cadd = cadd_list[0]
else:
cadd = 0
sift_list = re.findall(';SIFT_pred=([A-Z]);', info)
if sift_list != []:
sift = sift_list[0]
else:
sift = "NA"
polyphen_list = re.findall(';Polyphen2_HVAR_pred=([A-Z]);', info)
if polyphen_list != []:
polyphen = polyphen_list[0]
else:
polyphen = "NA"
return gerp, phylop, vest, cadd, sift, polyphen
in_file = sys.argv[1]
population = sys.argv[2]
if population not in ["ALL", "EAS", "EUR", "AMR", "AFR", "SAS"]:
print("\n***invalid population input***\n")
else:
for line in open(in_file):
#skip header
if line[0] == "#":
continue
data = line.strip().split("\t")
gene_region = get_region(data[7])
exac_pop, pop_1000g = get_af(data[7], population)
gerp, phylop, vest, cadd, sift, polyphen = get_prediction(data[7])
#keep the rare variants
if float(exac_pop) >= 0.01 or float(pop_1000g) >= 0.01:
continue
#keep the lof variants
if gene_region == "splicing" or gene_region == "frameshift_insertion" or gene_region == "frameshift_deletion" or gene_region == "stopgain" or gene_region == "stoploss":
print(line.strip())
#use the prediction algorithms to keep the lgd missenese muations,
#require the nonsynonymous_SNV occured at the conserved nucliotide and has the cadd>15 with at least one of the other algrithms predicted as deleterious
elif gene_region == "nonsynonymous_SNV":
if float(gerp) > 2.0:
if float(cadd) > 15.0:
if sift == "D" or polyphen == "P" or polyphen == "D":
print(line.strip())
|
# Find if a given number is a power of 3
test_num = raw_input("Enter number under test: ")
test_num = int(test_num)
test_num1 = test_num
rem=100
if test_num < 0:
print "The number is less than 0, so it is not power of 3"
elif test_num == 0:
print "The given number is 0. Please provide a number other than 0"
elif (test_num%3) != 0:
print "Number",test_num1,"is NOT a power of 3"
else:
rem = test_num%3
while test_num>=3 and rem==0:
rem = test_num%3
test_num = test_num/3
print "test_num: "+str(test_num)+"\nrem: "+str(rem)
if rem == 0:
print "Number",test_num1,"is a power of 3"
else:
print "Number",test_num1,"is NOT a power of 3"
|
from collections import defaultdict
from functools import partial
from pathlib import Path
import joblib
import numpy as np
import pandas as pd
import sklearn.metrics
from torch.utils.data import DataLoader
from tqdm import tqdm
from ..datasets import VOCDetection
from ..transforms.util import get_transforms
from ..train import VSD_PAD_SIZE
HERE = Path(__file__).parent
DATA_ROOT = HERE.joinpath('../../../data')
DEFAULT_VOC_ROOT = Path('~/Documents/data/voc').expanduser()
def vsd_results_df(results_gz,
root=DEFAULT_VOC_ROOT,
pad_size=VSD_PAD_SIZE,
batch_size=64,
num_workers=32,
results_csv_path=None,
):
"""creates Pandas dataframe from results of training models with Visual Search Difficulty dataset.
The resulting dataframe can be used with searchnets.plot.figures.f1_v_vds_score_df
Parameters
----------
results_gz : str
paths to .gz file created by running 'searchnets test' from the command line.
root : str
path to root of VOC dataset, as defined for torchvision.VOCDetection.
Default is '~/Documents/data/voc'
pad_size : int
size of padding for images in Visual Search Difficulty dataset,
applied by transform passed to Dataset in searchnets.train.
Default is 500, declared as a constant in searchnets.train.
batch_size : int
Default is 64.
num_workers : int
Default is 32.
results_csv_path : str
Path to use to save dataframe as a csv.
Default is None, in which case no csv is saved.
Returns
-------
df : pandas.DataFrame
Notes
-----
for each image in the 'test' subset of the Visual Search Difficulty dataset, this function computes the
F1 score between the classes present in that image and the (multi-label) predictions of each trained network
that are in the results_gz file. In addition it computes the arithmetic mean of F1 scores
across all models. The individual F1 scores + mean F1 score are added as columns to the returned dataframe.
"""
vsd_split_csv = DATA_ROOT.joinpath('Visual_Search_Difficulty_v1.0/VSD_dataset_split.csv')
vsd_df = pd.read_csv(vsd_split_csv)
vsd_df = vsd_df.drop('Unnamed: 0', axis=1)
vsd_df_test = vsd_df[vsd_df['split'] == 'test']
results = joblib.load(results_gz)
# model paths to checkpoint with saved model, *** used as keys for dicts in results_gz files ***
# in theory they should be sorted numerically already because they were added to the dictionary in the loop
# and dict keys are insertion-ordered as of 3.6
# but let's be extra paranoid and sort anyway!
model_keys_for_results_gz = sorted(
results['img_names_per_model_dict'].keys(),
key=lambda x: int(x.name.split('_')[-1])
)
model_key_num_map = {}
for model_key in model_keys_for_results_gz:
model_key_num_map[model_key] = f'model_{int(model_key.name.split("_")[-1])}'
# make sure that img names list will be the same for all models
for model_key in model_keys_for_results_gz:
assert vsd_df_test['img'].values.tolist() == results['img_names_per_model_dict'][model_key]
# grab one of them to use to find index for the img from each sample from the Dataset
test_img_names = results['img_names_per_model_dict'][model_keys_for_results_gz[0]]
transform, target_transform = get_transforms(dataset_type='VSD', loss_func='BCE')
# need to make Dataset so we know what ground truth labels are
testset = VOCDetection(root=root,
csv_file=vsd_split_csv,
image_set='trainval',
split='test',
download=True,
transform=transform,
target_transform=target_transform,
return_img_name=True
)
test_loader = DataLoader(testset, batch_size=batch_size,
shuffle=False, num_workers=num_workers,
pin_memory=True)
# also if img names list is the same as that for the dataframe (like we just asserted)
# then we can use the same ind when we index into the new columns we're making
# for the dataframe
new_columns = defaultdict(
# for any new key, default to an array the same length as our dataframe, will be a new column
partial(np.zeros, shape=len(vsd_df_test))
)
pbar = tqdm(test_loader)
n_batch = len(test_loader)
for i, sample in enumerate(pbar):
pbar.set_description(f'batch {i} of {n_batch}')
# don't care about batch_x, just what y should be, and the img name
_, batch_y, batch_img_name = sample
# and we iterate through each sample in the batch
for y, img_name in zip(batch_y, batch_img_name):
y_true = y.cpu().numpy() # convert to numpy array to pass to sklearn.metrics.f1_score
row = test_img_names.index(img_name) # use the image name to get its index from the list
# and get predictions for that image from **all** models!
# (because we need votes from multiple models for an f1 score)
f1_scores_all_models = []
acc_scores_all_models = []
hamming_loss_all_models = []
for model_key, model_num in model_key_num_map.items():
y_pred = results['predictions_per_model_dict'][model_key][row]
f1 = sklearn.metrics.f1_score(y_true, y_pred, average='macro')
new_columns[f'f1_score_{model_num}'][row] = f1
f1_scores_all_models.append(f1) # we'll use to get means after
acc = sklearn.metrics.accuracy_score(y_true, y_pred)
new_columns[f'acc_{model_num}'][row] = acc
acc_scores_all_models.append(acc)
hl = sklearn.metrics.hamming_loss(y_true, y_pred)
new_columns[f'hamming_loss_{model_num}'][row] = hl
hamming_loss_all_models.append(hl)
mean_f1 = np.mean(f1_scores_all_models)
new_columns['mean_f1_score'][row] = mean_f1
mean_acc = np.mean(acc_scores_all_models)
new_columns['mean_acc'][row] = mean_acc
mean_hamming_loss = np.mean(hamming_loss_all_models)
new_columns['mean_hamming_loss'][row] = mean_hamming_loss
for column_name, values in new_columns.items():
vsd_df_test[column_name] = values
if results_csv_path:
vsd_df_test.to_csv(results_csv_path)
return vsd_df_test
|
import json
def save_response(response, filename,mydir=None):
try:
if mydir is not None:
filename= f"{mydir}/{filename}"
file = open(filename, 'w')
json.dump(response.json(), file)
file.close()
except FileNotFoundError:
print(filename + " not found. ")
def save_response_part(response, filename, part):
try:
file = open(filename, 'w')
json.dump(response.json()[part], file)
file.close()
except FileNotFoundError:
print(filename + " not found. ")
def read_init(filename,mydir=None):
try:
if mydir is not None:
filename= f"{mydir}/{filename}"
file = open(filename, 'r')
data = json.load(file)
file.close()
return { 'key': data['keys'][0] }
except FileNotFoundError:
print(filename + " not found. ")
|
from api.models import *
from api.serializers import *
from django.contrib.auth.models import User
from django.shortcuts import render
from rest_framework import viewsets
from rest_framework.views import APIView
from rest_framework.authentication import SessionAuthentication, BasicAuthentication
from rest_framework.permissions import IsAuthenticated, AllowAny
from rest_framework.response import Response
from rest_framework.views import APIView
from django.contrib.auth import authenticate, login, logout
from rest_framework import status
from rest_framework_json_api.views import RelationshipView
from rest_framework_json_api import serializers
class TeamViewSet(viewsets.ModelViewSet):
"""
API endpoint that allows users to be viewed.
"""
permission_classes = (AllowAny,)
resource_name = 'teams'
queryset = Team.objects.all()
serializer_class = TeamSerializer
|
from django.conf.urls import url,include
from django.conf.urls.static import static
from django.conf import settings
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^', include('inicio.urls')),
url(r'^selecao/', include('selecao.urls')),
url(r'^downloads/', include('downloads.urls')),
]+ static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
import hashlib
import binascii
def process_request(request):
password = request.GET["password"]
# BAD: Inbound authentication made by comparison to string literal
if password == "myPa55word":
redirect("login")
hashed_password = load_from_config('hashed_password', CONFIG_FILE)
salt = load_from_config('salt', CONFIG_FILE)
|
# -*- coding: utf-8 -*-
"""
Created on Sat Sep 2 07:45:53 2017
@author: my7pro
"""
import redis
r = redis.Redis()
class RQueue(object):
"""An abstract FIFO queue"""
def __init__(self, local_id=None):
if local_id is None:
local_id = r.incr("queue_space")
id_name = "q:%s" %(local_id)
self.id_name = id_name
def push(self, element):
"""Push an element to the tail of the queue"""
id_name = self.id_name
push_element = r.rpush(id_name, element)
def pop(self):
"""Pop an element from the head of the queue"""
id_name = self.id_name
popped_element = r.lpop(id_name)
return popped_element
|
'''
Defines base block of a map.
@author: stevenh47
'''
from google.appengine.ext import db
from util.type_cast import Cast
class MapElement(db.Model):
'''Base block of a map.
'''
width = db.IntegerProperty()
height = db.IntegerProperty()
x = db.IntegerProperty()
y = db.IntegerProperty()
pic = db.BlobProperty()
_children_save = db.StringProperty()
def __init__(self, width, height, x, y, data, sub_column, sub_row):
'''Constructor
Args:
width: width in pixel
height: height in pixel
x: x coordinate of upper left corner in the map
y: y coordinate of upper left corner in the map
data: picture data
sub_column: number of columns of sub elements
sub_row: number of rows of sub elements
'''
self.width = width # width in pixel
self.height = height # height in pixel
self.x = x # x of left upper corner in the root element
self.y = y # y of left upper corner in the root element
self.pic = data # picture data
self._children = [[''] * sub_column for _ in range(sub_row)]
# children's IDs
def set_child(self, row, column, key='', child=None):
if child:
key = child.key()
self._children[row][column] = key
def get_child(self, row, column):
return self._children[row][column]
def put(self, **kwargs):
self._children_save = Cast.list_to_string(self._children)
super(MapElement, self).put(**kwargs)
@classmethod
def get(cls, keys, **kwargs):
temp = super(MapElement, cls).get(keys, **kwargs)
temp._children = Cast.string_to_list(temp._children_save)
return temp
|
from rest_framework import serializers
from django.contrib.auth import get_user_model
from .models import Product, OrderProduct, Order
User = get_user_model()
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = '__all__'
class ProductSerializer(serializers.ModelSerializer):
class Meta:
model = Product
fields = '__all__'
class OrderProductSerializer(serializers.ModelSerializer):
user = UserSerializer()
product = ProductSerializer()
class Meta:
model = OrderProduct
fields = '__all__'
class OrderSerializer(serializers.Serializer):
products = ProductSerializer(required=False, many=True)
user = UserSerializer(required=False)
class Meta:
model = Order
fields = '__all__'
|
'''
You are climbing a stair case. It takes n steps to reach to the top.
Each time you can either climb 1 or 2 steps. In how many distinct ways
can you climb to the top?
Note: Given n will be a positive integer.
'''
class Solution(object):
def climbStairs(self, n):
"""
:type n: int
:rtype: int
"""
return self.helper(n)
def helper(self, n, memo=None):
if memo is None:
memo = {}
if n == 0 or n == 1:
return 1
# if result at the ith index is not computed
# compute it and assign the value
if n not in memo:
memo[n] = self.helper(n-1, memo) + self.helper(n-2, memo)
return memo[n]
|
from datetime import timedelta
import os
import redis
REDIS_URL = os.environ.get('REDIS_URL')
redis_pool = redis.from_url(url=REDIS_URL, db=0)
for key in redis_pool.keys('board*'):
print(key)
# redis_pool.expire(key, timedelta(minutes=30))
for key in redis_pool.keys('turn*'):
print(key)
# redis_pool.expire(key, timedelta(minutes=30))
|
# Generated by Django 3.0.5 on 2020-05-07 17:53
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('catalog', '0022_auto_20181028_1731'),
]
operations = [
migrations.RemoveField(
model_name='bookinstance',
name='book',
),
migrations.RemoveField(
model_name='bookinstance',
name='borrower',
),
migrations.RemoveField(
model_name='book',
name='isbn',
),
migrations.RemoveField(
model_name='book',
name='summary',
),
migrations.AddField(
model_name='book',
name='bookcase',
field=models.CharField(default=' ', max_length=3),
),
migrations.AddField(
model_name='book',
name='format',
field=models.CharField(default=' ', max_length=50),
),
migrations.AddField(
model_name='book',
name='other',
field=models.CharField(default=' ', max_length=100),
),
migrations.AddField(
model_name='book',
name='shelf',
field=models.CharField(default=' ', max_length=3),
),
migrations.AlterField(
model_name='book',
name='author',
field=models.CharField(default=' ', max_length=100),
),
migrations.RemoveField(
model_name='book',
name='genre',
),
migrations.AddField(
model_name='book',
name='genre',
field=models.CharField(default=' ', max_length=50),
),
migrations.AlterField(
model_name='book',
name='language',
field=models.CharField(default=' ', max_length=50),
),
migrations.AlterField(
model_name='book',
name='title',
field=models.CharField(default=' ', max_length=50),
),
migrations.DeleteModel(
name='Author',
),
migrations.DeleteModel(
name='BookInstance',
),
migrations.DeleteModel(
name='Genre',
),
migrations.DeleteModel(
name='Language',
),
]
|
from django.core.exceptions import ValidationError
def validate_non_empty_string(value):
try:
value += ''
if len(value) == 0:
raise ValidationError(
('%(value)s must be a non-empty string'),
params={'value': value})
except TypeError:
raise ValidationError(
('%(value)s is not a string'),
params={'value': value})
|
import tensorflow as tf
def get_product_type(title):
# uses model trained by train.py to get the product name
return 'RX 480 8GB'
|
# coding=utf-8
# @Author: wjn
import unittest
from common import HTMLTestRunnerCN
import time
# dir = './case/browser/'
dir = './case/'
# suite = unittest.defaultTestLoader.discover(dir, 'test_*.py')
if __name__ == '__main__':
runner = unittest.TextTestRunner()
# runner.run(suite)
"""
cur_time = time.strftime('%Y-%m-%d %H-%M-%S')
filePath = './reports/{}report.html'.format(cur_time)
fp = open(filePath, 'wb')
runner = HTMLTestRunnerCN.HTMLTestReportCN(
stream=fp,
title='自动化测试报告',
description='browser+小程序',
tester='王佳宁',
# 显示用例打印内容
verbosity=2
)
"""
'''
verbosity:
=1的时候 默认值为1,不限制完整结果,即单个用例成功输出’.’,失败输出’F’,错误输出’E’
=0的时候。不输出信息
=2的时候,需要打印详细的返回信息
'''
# 运行测试用例
# runner.run(suite)
for i in range(2):
suite = unittest.defaultTestLoader.discover(dir, 'test_*.py')
runner.run(suite)
# suite = unittest.defaultTestLoader.discover(dir, 'test_*.py')
time.sleep(3)
# 关闭文件,否则会无法生成文件
# fp.close()
|
#coding:utf8
from base.IterativeRecommender import IterativeRecommender
import numpy as np
from random import choice,random
from tool import config
import tensorflow as tf
from collections import defaultdict
from tensorflow import set_random_seed
set_random_seed(2)
class CDAE(IterativeRecommender):
def __init__(self,conf,trainingSet=None,testSet=None,fold='[1]'):
super(CDAE, self).__init__(conf,trainingSet,testSet,fold)
def readConfiguration(self):
super(CDAE, self).readConfiguration()
eps = config.LineConfig(self.config['CDAE'])
self.corruption_level = float(eps['-co'])
self.n_hidden = int(eps['-nh'])
self.batch_size = int(eps['-batch_size'])
def initModel(self):
super(CDAE, self).initModel()
self.n_hidden = 128
self.num_items = self.data.getSize(self.recType)
self.num_users = self.data.getSize('user')
self.negative_sp = 5
initializer = tf.contrib.layers.xavier_initializer()
self.X = tf.placeholder(tf.float32, [None, self.num_items])
self.mask_corruption = tf.placeholder(tf.float32, [None, self.num_items])
self.sample = tf.placeholder(tf.float32, [None, self.num_items])
self.U = tf.Variable(initializer([self.num_users, self.n_hidden]))
self.u_idx = tf.placeholder(tf.int32, [None], name="u_idx")
self.U_embed = tf.nn.embedding_lookup(self.U, self.u_idx)
self.weights = {
'encoder': tf.Variable(tf.random_normal([self.num_items, self.n_hidden])),
'decoder': tf.Variable(tf.random_normal([self.n_hidden, self.num_items])),
}
self.biases = {
'encoder': tf.Variable(tf.random_normal([self.n_hidden])),
'decoder': tf.Variable(tf.random_normal([self.num_items])),
}
self.userListen = defaultdict(dict)
for item in self.data.trainingData:
uid = self.data.getId(item['user'], 'user')
tid = self.data.getId(item['track'], 'track')
if tid not in self.userListen[uid]:
self.userListen[uid][tid] = 1
else:
self.userListen[uid][tid] += 1
def encoder(self,x,v):
layer = tf.nn.sigmoid(tf.matmul(x, self.weights['encoder'])+self.biases['encoder']+v)
return layer
def decoder(self,x):
layer = tf.nn.sigmoid(tf.matmul(x, self.weights['decoder'])+self.biases['decoder'])
return layer
def row(self, u):
k = self.userListen[u].keys()
v = self.userListen[u].values()
vec = np.zeros(self.num_items)
for pair in zip(k,v):
iid = pair[0]
vec[iid] = pair[1]
return vec
def next_batch(self):
X = np.zeros((self.batch_size, self.num_items))
uids = []
sample = np.zeros((self.batch_size, self.num_items))
userList = list(self.data.name2id['user'].keys())
itemList = list(self.data.name2id['track'].keys())
for n in range(self.batch_size):
user = choice(userList)
uid = self.data.name2id['user'][user]
uids.append(uid)
vec = self.row(uid)
ratedItems = self.userListen[uid].keys()
values = self.userListen[uid].values()
for iid in ratedItems:
sample[n][iid]=1
for i in range(self.negative_sp*len(ratedItems)):
ng = choice(itemList)
while ng in self.data.userRecord[user]:
ng = choice(itemList)
ng_id = self.data.name2id['track'][ng]
sample[n][ng_id]=1
X[n]=vec
return X, uids, sample
def buildModel(self):
self.corruption_input = tf.multiply(self.X, self.mask_corruption)
self.encoder_op = self.encoder(self.corruption_input, self.U_embed)
self.decoder_op = self.decoder(self.encoder_op)
self.y_pred = tf.multiply(self.sample, self.decoder_op)
y_true = tf.multiply(self.sample, self.corruption_input)
self.y_pred = tf.maximum(1e-6, self.y_pred)
self.loss = -tf.multiply(y_true,tf.log(self.y_pred))-tf.multiply((1-y_true),tf.log(1-self.y_pred))
self.reg_loss = self.regU*(tf.nn.l2_loss(self.weights['encoder'])+tf.nn.l2_loss(self.weights['decoder'])+
tf.nn.l2_loss(self.biases['encoder'])+tf.nn.l2_loss(self.biases['decoder']))
self.reg_loss = self.reg_loss + self.regU*tf.nn.l2_loss(self.U_embed)
self.loss = self.loss + self.reg_loss
self.loss = tf.reduce_mean(self.loss)
optimizer = tf.train.AdamOptimizer(self.lRate).minimize(self.loss)
self.sess = tf.Session()
init = tf.global_variables_initializer()
self.sess.run(init)
for epoch in range(self.maxIter):
mask = np.random.binomial(1, self.corruption_level, (self.batch_size, self.num_items))
batch_xs,users,sample = self.next_batch()
_, loss,y = self.sess.run([optimizer, self.loss, self.y_pred], feed_dict={self.X: batch_xs,self.mask_corruption:mask,self.u_idx:users,self.sample:sample})
print (self.foldInfo,"Epoch:", '%04d' % (epoch + 1),"loss=", "{:.9f}".format(loss))
# self.ranking_performance()
print("Optimization Finished!")
def predictForRanking(self, u):
'invoked to rank all the items for the user'
if self.data.containsUser(u,'user'):
vec = self.row(u).reshape((1,len(self.data.TrackRecord)))
uid = [self.data.name2id['user'][u]]
return self.sess.run(self.decoder_op, feed_dict={self.X:vec,self.v_idx:uid})[0]
else:
return [self.data.globalMean] * len(self.data.TrackRecord)
|
# _*_coding:utf-8_*_
__author__ = "Jorden Hai"
from modules import models
from modules.db_conn import engine, session
from modules.utils import print_err, json_parser
import json
def syncdb(argvs):
print("Syncing DB....")
models.Base.metadata.create_all(engine) # 创建所有表结构
def create_tables(argvs):
if "-f" in argvs:
flowfile = argvs[argvs.index("-f") + 1]
else:
print_err(
"invalid usage, should be:\ncreate_hosts -f <the new hosts file>", quit=True
)
json_file = json_parser(flowfile)
obj_list = []
with open(json_file, encoding="utf-8") as load_f:
load_dict = json.load(load_f)
for key, value in load_dict.items():
print("key:", key, "value:", value)
obj = models.PathTag(Packet_in_times=key,Dpid=value['Dpid'],Cookie=value['Cookie'],Source_address=value['Source_address'],Destination_address=value['Destination_address'],Data_len=value['Data_len'])
obj_list.append(obj)
print(obj_list)
models.Base.metadata.create_all(engine)
for var in obj_list:
session.add(var)
session.commit()
|
greet="Hello i am Aziz."
a=greet.startswith("Hello")
print(a)
a=greet.startswith("h")
print(a)
a=greet.startswith("H")
print(a)
|
from nose.tools import *
def test_foo():
return
|
def sc(strng):
seen = set(strng)
return ''.join(a for a in strng if a.swapcase() in seen)
|
"""
test_sql_tables.py: unit test file for sql_tables.py
"""
import sys
import datetime
import json
import flask
import flask_sqlalchemy
import unittest
from unittest.mock import MagicMock, patch
sys.path.insert(1, '/home/ec2-user/environment/Distribution-System-CS490/backend')
import sql_tables
class testcases_sql_tables(unittest.TestCase):
def setUp(self):
self.result_sys_user_role = {
"id": None,
"name": "test"
}
self.result_sys_user = {
'id': None,
'name_first': 'first',
'name_last': 'last',
'sys_username': 'username',
'email_google': 'google',
'email_fb': 'facebook',
'image_url': 'url',
'phone_number': '1234567890'
}
self.result_zone = {
"id": None,
"name": "test"
}
self.result_shop_category = {
"id": None,
"type": "test"
}
self.result_shop = {
'id': None,
'name': 'Temp Store 1',
'image_url': 'url',
'email': None,
'phone_number': '1234567890',
'category': None,
'zones': [],
'street': 'Street 1',
'city': 'City 1',
'providence': 'Providence 1',
'zip_4': '12345-1234'
}
self.result_company = {
'id': None,
'name': 'test',
'image_url': 'url',
'zones': []
}
self.result_company_product = {
'id': None,
'name': 'C1 Item 1',
'price_buy': 1.0,
'price_sell': 2.0,
'units_per_price': 1,
'price_sell_per_unit': 2.0,
'image_url': 'url',
'description': None
}
self.result_shop_order = {
'id': None,
'price_due': 40614.0,
'price_paid': True,
'memo': "",
'date_delivered': None,
'order_fulfiller': None,
'completed': False,
'shop_order_items': []}
self.result_shop_order_item = {
'quantity_units': 6
}
self.result_order_taker_goal = {
'order_taker': 1,
'month': 12,
'year': 2020,
'goal_value': 10000.00
}
def test_sys_user_role_info(self):
sys_user_role = sql_tables.Sys_user_role("test")
info = sys_user_role.request_sys_user_role_info()
self.assertDictEqual(self.result_sys_user_role, info)
def test_sys_user_info(self):
session = MagicMock()
sys_user = sql_tables.Sys_user(
"first",
"last",
"username",
"password",
"google",
"facebook",
"url",
"1234567890",
1
)
info = sys_user.request_sys_user_info(session)
self.assertEqual(self.result_sys_user['id'], info['id'])
self.assertEqual(self.result_sys_user['name_first'], info['name_first'])
self.assertEqual(self.result_sys_user['name_last'], info['name_last'])
self.assertEqual(self.result_sys_user['sys_username'], info['sys_username'])
self.assertEqual(self.result_sys_user['email_google'], info['email_google'])
self.assertEqual(self.result_sys_user['email_fb'], info['email_fb'])
self.assertEqual(self.result_sys_user['image_url'], info['image_url'])
self.assertEqual(self.result_sys_user['phone_number'], info['phone_number'])
def test_zone_info(self):
zone = sql_tables.Zone("test")
info = zone.request_zone_info()
self.assertDictEqual(self.result_zone, info)
def test_shop_category(self):
shop_category = sql_tables.Shop_category("test")
info = shop_category.request_category_info()
self.assertDictEqual(self.result_shop_category, info)
def test_shop_info(self):
session = MagicMock()
shop = sql_tables.Shop(
"Temp Store 1",
None,
"url",
"1234567890",
None,
"Street 1",
"City 1",
"Providence 1",
"12345-1234"
)
info = shop.request_shop_info(session)
self.assertDictEqual(self.result_shop, info)
def test_shop_zone_info(self):
session = MagicMock()
shop_zone = sql_tables.Shop_zone(1, 1)
info = shop_zone.request_zone_info(session)
def test_company_info(self):
session = MagicMock()
company = sql_tables.Company(
"test",
"url"
)
info = company.request_company_info(session)
self.assertDictEqual(self.result_company, info)
def test_company_zone_info(self):
session = MagicMock()
company_zone = sql_tables.Company_zone(1, 1)
info = company_zone.request_zone_info(session)
def test_company_product_info(self):
session = MagicMock()
company_product = sql_tables.Company_product(
1,
"C1 Item 1",
1.00,
2.00,
1,
0,
"url",
None
)
info = company_product.request_company_product_info(session)
self.assertEqual(self.result_company_product['id'], info['id'])
self.assertEqual(self.result_company_product['name'], info['name'])
self.assertEqual(self.result_company_product['price_buy'], info['price_buy'])
self.assertEqual(self.result_company_product['price_sell'], info['price_sell'])
self.assertEqual(self.result_company_product['units_per_price'], info['units_per_price'])
self.assertEqual(self.result_company_product['price_sell_per_unit'], info['price_sell_per_unit'])
self.assertEqual(self.result_company_product['image_url'], info['image_url'])
self.assertEqual(self.result_company_product['description'], info['description'])
def test_shop_order_info(self):
session = MagicMock()
current_time_utc = datetime.datetime.now(datetime.timezone.utc)
week_forward = current_time_utc + datetime.timedelta(days=7)
shop_order = sql_tables.Shop_order(
1,
40614.00,
True,
"",
current_time_utc,
week_forward,
None,
1,
None,
False
)
info = shop_order.request_shop_order(session)
self.assertEqual(self.result_shop_order['id'], info['id'])
self.assertEqual(self.result_shop_order['price_due'], info['price_due'])
self.assertEqual(self.result_shop_order['price_paid'], info['price_paid'])
self.assertEqual(self.result_shop_order['memo'], info['memo'])
self.assertEqual(self.result_shop_order['date_delivered'], info['date_delivered'])
self.assertEqual(self.result_shop_order['order_fulfiller'], info['order_fulfiller'])
self.assertEqual(self.result_shop_order['completed'], info['completed'])
self.assertEqual(self.result_shop_order['shop_order_items'], info['shop_order_items'])
def test_shop_order_item_info(self):
session = MagicMock()
shop_order_item = sql_tables.Shop_order_item(
1,
2,
6
)
info = shop_order_item.request_shop_order_item_info(session)
self.assertEqual(self.result_shop_order_item['quantity_units'], info['quantity_units'])
def test_order_taker_goal_info(self):
session = MagicMock()
order_taker_goal = sql_tables.Order_taker_goal(
1,
12,
2020,
10000.00
)
info = order_taker_goal.request_order_taker_goal_info()
self.assertDictEqual(self.result_order_taker_goal, info)
def test_bootstraps(self):
session = MagicMock()
sql_tables.database_bootstrap(session)
if __name__ == '__main__':
unittest.main()
|
# Enter script code
keyboard.send_keys("<f6>7")
|
from socket import AF_INET, socket, SOCK_STREAM
from threading import Thread
import tkinter
import sys
client_socket = socket(AF_INET, SOCK_STREAM)
# ---------------------------------------------------------
# Definindo o endereço de destino - conjunto (ip, porta)
# ---------------------------------------------------------
HOST = str(sys.argv[1])
USER_NAME = sys.argv[2]
PORT = 33001
BUFSIZ = 1024
# receber mensagens.
def receber_mensagem():
while True:
try:
msg = client_socket.recv(BUFSIZ).decode('utf8')
msg_list.insert(tkinter.END, msg)
except OSError:
break
# enviar mensagens
def enviar_mensagem(event=None):
msg = my_msg.get()
my_msg.set('') # limpar campo de escrita.
client_socket.send(bytes(msg, 'utf8'))
if msg == '{quit}':
client_socket.close()
top.quit()
# fechar a janela de chat
def fechar_chat(event=None):
my_msg.set('{quit}')
enviar_mensagem()
# manda para o server o nome do usuario
def nome_usuario():
client_socket.send(bytes(USER_NAME, 'utf8'))
top = tkinter.Tk()
top.title('Chat usando Protocolo TCP')
top.resizable(width=True, height=True)
messages_frame = tkinter.Frame(top)
my_msg = tkinter.StringVar()
my_msg.set('')
scrollbar = tkinter.Scrollbar(messages_frame)
scrollbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)
msg_list = tkinter.Listbox(messages_frame, height=25, width=70, yscrollcommand=scrollbar.set)
msg_list.pack(side=tkinter.LEFT, fill=tkinter.BOTH)
messages_frame.pack()
entry_field = tkinter.Entry(top, textvariable=my_msg, width=65)
entry_field.bind('<Return>', enviar_mensagem)
entry_field.pack()
entry_field.focus()
send_button = tkinter.Button(top, text='Enviar', font='Helvetica 10 bold', width=20, command=enviar_mensagem)
send_button.pack()
top.protocol('WM_DELETE_WINDOW', fechar_chat)
client_socket.connect((HOST, PORT))
nome_usuario()
receive_thread = Thread(target=receber_mensagem)
receive_thread.start()
tkinter.mainloop()
|
import random
file = open('dataWithoutTeacher.txt','w')
maxValue = 1000000
dataSize = 100000
for i in range(0,dataSize):
file.write(str((random.randint(-maxValue,maxValue))/(maxValue/100))+'\n')
file.write("EOF")
file = open('dataWithTeacher.txt','w')
for i in range(0,dataSize):
value = (random.randint(-maxValue,maxValue))/(maxValue/100)
file.write(str(value))
if(value>=0):
file.write(' 1\n')
else:
file.write(' 0\n')
file.write("EOF")
|
import marshaltools
# test source and some keys to play with
name, src_id = 'ZTF18abjyjdz', 3329
keys = [
'classification',
'redshift',
'uploaded_spectra.observer',
'autoannotations.username',
'redshift'
]
prog = marshaltools.ProgramList("AMPEL Test", load_sources=True, load_candidates=True)
# get the source (just to see that it's in the candidates)
#src = prog.find_source(name)
#print (src)
out = prog.retrieve_from_src(name, keys)#, default=None, src_dict=None, append_summary=True, include_candidates=True):
print (out)
## pass the source name
#print ("----------------")
#out = prog.retrieve_from_src(name, keys)
#print (out)
## if you've looked in the summary (as we are doing in this example), the summary
## is downloaded at the first call, then it is simlpy used. Now it should be faster:
#print ("----------------")
#out = prog.retrieve_from_src(name, keys)
#print (out)
## try with some missing key and default argument. You should see a warning there.
#print ("----------------")
#keys2 = keys+['fuffa']
#out = prog.retrieve_from_src(name, keys2, default="merci")
#print (out)
## now try passing the source instead of the name
#print ("----------------")
#src = prog.find_source(name, include_candidates=False)
#out = prog.retrieve_from_src(name, keys, src_dict=src)
#print (out)
|
#!/usr/bin/env python3
"""Model construction functions."""
# Copyright (c) 2021 Mahdi Biparva, mahdi.biparva@gmail.com
# miTorch: Medical Imaging with PyTorch
# Deep Learning Package for 3D medical imaging in PyTorch
# Implemented by Mahdi Biparva, April 2021
# Brain Imaging Lab, Sunnybrook Research Institute (SRI)
import torch
from fvcore.common.registry import Registry
from torch import nn
import utils.logging as logging
logger = logging.get_logger(__name__)
MODEL_REGISTRY = Registry("MODEL")
MODEL_REGISTRY.__doc__ = """
Registry for video model.
The registered object will be called with `obj(cfg)`.
The call should return a `torch.nn.Module` object.
"""
# noinspection PyCallingNonCallable
def build_model(cfg, cur_device):
"""
Builds the video model.
Args:
cfg (configs): configs that contains the hyper-parameters to build the backbone.
cur_device (int): select the GPU id to load the model to its memory
"""
# Construct the model
name = cfg.MODEL.MODEL_NAME
model = MODEL_REGISTRY.get(name)(cfg)
if cfg.USE_GPU:
# Transfer the model to the current GPU device
model = model.cuda(device=cur_device)
else:
logger.info('Using CPU for the network operations')
if torch.cuda.device_count() > 1 and cfg.DP:
print("Let's use", torch.cuda.device_count(), "GPUs!")
model = nn.DataParallel(model)
print(model.device_ids)
elif cfg.DDP:
# Make model replica operate on the current device
model = torch.nn.parallel.DistributedDataParallel(
module=model, device_ids=[cur_device], output_device=cur_device
)
return model
|
from django.apps import AppConfig
class KrwConfig(AppConfig):
name = 'krw'
|
from licant.modules import module, submodule
from licant.scripter import scriptq
scriptq.execute("libc/libc.g.py")
scriptq.execute("std/std.g.py")
scriptq.execute("posix/posix.g.py")
scriptq.execute("gxx/debug/debug.g.py")
scriptq.execute("gxx/diag/diag.g.py")
module("gxx.util.c",
srcdir = "gxx",
sources = [
"util/numconvert.c",
"util/hexer.c",
"gstuff/gstuff.c",
"gstuff/autorecv.c",
"impl/trace.cpp",
"util/dstring.c",
"impl/sshell.c",
"impl/panic_abort.c",
"math/util.c",
"debug/delay.c",
],
mdepends=["gxx.printf"]
)
module("gxx.util.cxx",
srcdir = "gxx",
sources = [
"util/string.cpp",
"util/base64.cpp",
"util/hexascii.cpp",
],
)
module("gxx.include",
include_paths = ["."]
)
module("gxx", "posix",
srcdir = "gxx",
sources = [
"datastruct/src/tree.c",
"io/file_unix.cpp",
"io/std.cpp",
"io/src/ostream.cpp",
"osutil/src/posix.cpp",
"path/path.cpp",
],
mdepends = [
"gxx.include",
"gxx.util.c",
"gxx.util.cxx",
"gxx.syslock",
"gxx.dprint",
"gxx.print",
"gxx.trent",
"gxx.inet",
"gxx.log"
],
)
module("gxx", "windows",
srcdir = "gxx",
sources = [
"datastruct/src/tree.c",
"io/file_windows.cpp",
"io/std.cpp",
"io/src/ostream.cpp",
"osutil/src/osutil_windows.cpp",
"path/path.cpp",
],
include_modules = [ submodule("gxx.include"), submodule("gxx.util.c"), submodule("gxx.util.cxx") ],
)
module_defimpl("gxx", "posix")
module("gxx.c_only",
srcdir = "gxx",
sources = [
"datastruct/src/tree.c",
],
include_modules = [ submodule("gxx.include"), submodule("gxx.util.c")],
)
#module("gxx", "windows",
# srcdir = "gxx",
# sources = [
# "io/file_windows.cpp",
# "io/std.cpp",
# "impl/panic_abort.cpp",
#"osutil/src/windows.cpp",
# "path/path.cpp",
# "util/string.cpp",
# "util/base64.cpp",
# "util/hexascii.cpp",
# "util/numconvert.c",
# "util/hexer.c",
# ],
#include_modules = ["gxx.util_sources"],
# include_paths = ["."]
#)
#module("gxx.util",
# srcdir = "gxx",
# sources = [
# "util/string.cpp",
# "util/base64.cpp",
# "util/hexascii.cpp",
# "util/numconvert.c",
# "util/hexer.c",
# ],
#
# include_paths = ["."]
#)
#
#module("gxx.atomic_section", impl="mutex",
# srcdir = "gxx/impl",
# sources = ["atomic_section_mutex.cpp"]
#)
module("gxx.syslock", impl="mutex", default=True,
srcdir = "gxx/impl",
sources = ["syslock_mutex.cpp"]
)
module("gxx.syslock", impl="genos.atomic",
srcdir = "gxx/impl",
sources = ["syslock_genos_atomic.cpp"]
)
module("gxx.panic", impl="abort", default=True,
srcdir = "gxx/impl",
sources = ["panic_abort.c"]
)
module("gxx.serial", "posix", default=True,
srcdir = "gxx",
sources = ["serial/src/impl/unix.cpp", "serial/src/serial.cpp"]
)
module("gxx.serial", "windows",
srcdir = "gxx",
sources = ["serial/src/impl/win.cpp", "serial/src/serial.cpp"]
)
module("gxx.print", impl = "cout", default=True,
sources = ["gxx/print/src/impl/print_cout.cpp"],
)
module("gxx.print", impl = "dprint",
sources = ["gxx/print/src/impl/print_debug.cpp"],
)
module("gxx.log", impl = "posix", default=True,
sources = ["gxx/log/posix_timestamp.cpp", "gxx/log/targets/stdout.cpp"],
)
#module("gxx.log2", "stub",
# sources = ["gxx/log/src/logger_stub.cpp"],
#)
#
#module("gxx.log2", "impl",
# sources = ["gxx/log/src/logger.cpp", "gxx/log/src/synconly.cpp"],
#)
module("gxx.trent",
srcdir = "gxx/trent",
sources = ["json.cpp", "gbson.cpp", "trent.cpp"],
)
#module("gxx.geom",
# sources = ["gxx/geom/geom2.cpp", "gxx/geom/geom3.cpp", "gxx/geom/intersect.cpp", "gxx/geom/topo.cpp"],
#)
module("gxx.cxx_support",
sources = ["compiler/__cxa_pure_virtual.c"],
)
module("gxx.rabbit",
sources = ["gxx/rabbit/crvints.cpp"],
)
module("gxx.inet", "posix", default=True,
srcdir = "gxx/inet/src",
sources = [ "common.cpp", "posix.cpp" ],
)
module("gxx.madgwick",
srcdir = "gxx/math",
sources = [ "madgwick.cpp" ],
)
module("gxx.printf",
sources=["gxx/src/printf_impl.c"]
)
#module("gxx.inet", "windows",
# srcdir = "gxx/inet/src",
# sources = [ "common.cpp", "windows.cpp" ],
#)
|
import cv2
import numpy as np
cap = cv2.VideoCapture('./data/vtest.avi')
ret, first_frame = cap.read()
ret, second_frame = cap.read()
while cap.isOpened():
diff = cv2.absdiff(first_frame, second_frame)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(gray, (5, 5), 0)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, width, height) = cv2.boundingRect(contour)
if cv2.contourArea(contour) < 700:
continue
cv2.rectangle(first_frame, (x, y), (x + width, y + height), (0, 255, 0), 2)
cv2.putText(first_frame, "Status: {}".format('Movement'), (15, 25), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2)
# cv2.drawContours(first_frame, contours, -1, (0, 255, 0), 2)
cv2.imshow("inter", first_frame)
first_frame = second_frame
ret, second_frame = cap.read()
if cv2.waitKey(40) == 27:
break
cv2.destroyAllWindows()
cap.release()
|
# Generated by Django 3.0.3 on 2020-03-30 04:10
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shorten', '0005_auto_20200330_0358'),
]
operations = [
migrations.AlterField(
model_name='urltable',
name='no_clicks',
field=models.IntegerField(blank=True, null=True),
),
migrations.AlterField(
model_name='urltable',
name='user',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL),
),
]
|
#-*- coding: utf-8 -*-
'''
Created on Jun 14, 2011
FP-Growth FP means frequent pattern
the FP-Growth algorithm needs:
1. FP-tree (class treeNode)
2. header table (use dict)
This finds frequent itemsets similar to apriori but does not
find association rules.
@author: Peter
'''
import os
# 推文文本处理函数
def textParse(bigString):
urlsRemoved = re.sub('(http:[/][/]|www.)([a-z]|[A-Z]|[0-9]|[/.]|[~])*', '', bigString)
listOfTokens = re.split(r'\W*', urlsRemoved)
return [tok.lower() for tok in listOfTokens if len(tok) > 2]
# 数据加载函数
def loadDataSet():
retDict = {}
dataDir = '/home/shiyanlou/mylab16/twitterdata/'
files = os.listdir(dataDir)
# 遍历数据文件夹下所有文件
for f in files:
fpath = os.path.join(dataDir, fpath)
# 读取文件并处理后添加到retDict
retDict[frozenset(textParse(open(fpath).read()))] = 1
return retDict
# 定义树节点类型
class treeNode:
# 初始化FP树节点
# 参数:节点名字,计数值,父节点
def __init__(self, nameValue, numOccur, parentNode):
self.name = nameValue
self.count = numOccur
# 链接相似元素项
self.nodeLink = None
self.parent = parentNode
# 子节点集合
self.children = {}
# 增加计数
def inc(self, numOccur):
self.count += numOccur
# 以文本方式显示树结构,便于调试
def disp(self, ind=1):
print ' '*ind, self.name, ' ', self.count
for child in self.children.values():
child.disp(ind+1)
# 更新节点 nodeLink 链表,确保节点链接指向树中该元素项的每一个实例
# 遍历nodeToTest的 nodeLink 链表,直到表尾部,然后再链接上 targetNode
def updateHeader(nodeToTest, targetNode):
while (nodeToTest.nodeLink != None):
nodeToTest = nodeToTest.nodeLink
nodeToTest.nodeLink = targetNode
# 更新树结构
# 参数:排序后的频繁项集,树节点,头指针表,增加频次
def updateTree(items, inTree, headerTable, count):
# 测试集合中的第一个元素是否为树的子节点
if items[0] in inTree.children:
# 增加子节点的计数值
inTree.children[items[0]].inc(count)
else:
# 如果不是子节点则创建新的节点并添加到FP树中
inTree.children[items[0]] = treeNode(items[0], count, inTree)
# 更新头指针表
if headerTable[items[0]][1] == None:
headerTable[items[0]][1] = inTree.children[items[0]]
else:
updateHeader(headerTable[items[0]][1], inTree.children[items[0]])
# 对剩下的元素项进行递归调用,继续让 FP 生长新的节点
if len(items) > 1:
updateTree(items[1::], inTree.children[items[0]], headerTable, count)
# 构建 FP 树
# 参数:数据集,最小支持度
def createTree(dataSet, minSup=1):
headerTable = {}
# 第一次遍历数据集
for trans in dataSet:
# 遍历每条记录里的每个元素项
for item in trans:
# 统计出现的频次
headerTable[item] = headerTable.get(item, 0) + dataSet[trans]
# 删除没有满足最小支持度的项目
for k in headerTable.keys():
if headerTable[k] < minSup:
del(headerTable[k])
freqItemSet = set(headerTable.keys())
# 如果所有项都没有满足最小支持度要求则返回
if len(freqItemSet) == 0: return None, None
# 扩展头指针表,包含频次及指向每种类型第一个元素项的 nodeLink 指针
for k in headerTable:
headerTable[k] = [headerTable[k], None]
# 初始化创建FP树
retTree = treeNode('Null Set', 1, None)
# 第二次遍历数据集
for tranSet, count in dataSet.items():
localD = {}
# 只考虑先前获得的频繁项
for item in tranSet:
if item in freqItemSet:
localD[item] = headerTable[item][0]
# 如果当前记录中包含了频繁项
if len(localD) > 0:
# 则对localD中的频繁项进行排序,见P227 图12-2
orderedItems = [v[0] for v in sorted(localD.items(), key=lambda p: p[1], reverse=True)]
# 使用排序后的频繁项集,调用 updateTree 让FP树生长
updateTree(orderedItems, retTree, headerTable, count)
# 返回FP树结构头指针表
return retTree, headerTable
# 从树的叶子节点向上迭代遍历整棵树,并收集所有元素项的名字
def ascendTree(leafNode, prefixPath):
if leafNode.parent != None:
prefixPath.append(leafNode.name)
ascendTree(leafNode.parent, prefixPath)
# 为给定的元素项查找条件模式基
def findPrefixPath(basePat, treeNode):
condPats = {}
# 遍历元素项的 nodeLink 链表
while treeNode != None:
prefixPath = []
# 收集迭代上溯整棵树过程中遇到的节点名称
ascendTree(treeNode, prefixPath)
# 将节点名称列表添加到条件模式基字典中
if len(prefixPath) > 1:
condPats[frozenset(prefixPath[1:])] = treeNode.count
treeNode = treeNode.nodeLink
# 返回条件模式基字典
return condPats
# FP 树递归查找频繁项集
# 参数:FP 树,头指针表,最小支持度,
def mineTree(inTree, headerTable, minSup, preFix, freqItemList):
# 对头指针表元素项按出现频次进行排序,顺序从小到大
bigL = [v[0] for v in sorted(headerTable.items(), key=lambda p: p[1])]
# 遍历排序后的集合
for basePat in bigL:
# 添加频繁项到频繁项集中
newFreqSet = preFix.copy()
newFreqSet.add(basePat)
freqItemList.append(newFreqSet)
# 查找条件模式基
condPattBases = findPrefixPath(basePat, headerTable[basePat][1])
# 创建条件FP树
myCondTree, myHead = createTree(condPattBases, minSup)
# 判断该树是否只包含一个元素项
if myHead != None:
# 递归调用 mineTree 创建条件FP树
mineTree(myCondTree, myHead, minSup, newFreqSet, freqItemList)
initSet = loadDataSet()
minSup = 5
myFPtree, myHeaderTab = createTree(initSet, minSup)
myFreqList = []
mineTree(myFPtree, myHeaderTab, minSup, set([]), myFreqList)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from django.core.checks import Error, register
@register()
def check_settings(app_configs, **kwargs):
from django.conf import settings
errors = []
if hasattr(settings, 'CHLOROFORM_DOMAIN'):
parsed = urlparse(settings.CHLOROFORM_DOMAIN)
if not parsed.scheme or parsed.path:
errors.append(Error(
'setting CHLOROFORM_DOMAIN must be an URL with a scheme and without a path',
id='chloroform.E0001',
))
return errors
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
""" 根据二维码位置,计算出目标距离 """
import rospy
from math import pow, atan2, sqrt
from tf.transformations import *
import smach
import smach_ros
from smach_ros import SimpleActionState
from smach_ros import ServiceState
import threading
import time
# Navigation
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from nav_msgs.msg import Odometry
from geometry_msgs.msg import Twist,Vector3
# Manipulator
from geometry_msgs.msg import Pose
from open_manipulator_msgs.msg import JointPosition
from open_manipulator_msgs.msg import KinematicsPose
from open_manipulator_msgs.srv import SetJointPosition
from open_manipulator_msgs.srv import SetKinematicsPose
# AR Markers
from ar_track_alvar_msgs.msg import AlvarMarker
from ar_track_alvar_msgs.msg import AlvarMarkers
class getPoseOfTheObject(smach.State):
def __init__(self):
smach.State.__init__(self, outcomes=['succeeded', 'aborted'],
output_keys=['output_object_pose'])
self.namespace = rospy.get_param("~robot_name")
self.marker_pose_sub = rospy.Subscriber(self.namespace + '/ar_pose_marker', AlvarMarkers, self.arMarkerMsgCallback)
self.ball_pose=rospy.Subscriber("/ball_pose",Vector3,self.ball_pose_callBack)
self.goal_height = rospy.get_param("~goal_height")
self.ar_marker_pose = False
self.canot_find_times = 0
self.ball_pos=None
def arMarkerMsgCallback(self, ar_marker_pose_msg):
if len(ar_marker_pose_msg.markers) == 0:
if self.ar_marker_pose != False and self.canot_find_times < 3:
self.canot_find_times += 1
else:
self.ar_marker_pose = False
rospy.loginfo("CANNOT FIND AR POSE")
else:
self.ar_marker_pose = AlvarMarker()
self.ar_marker_pose = ar_marker_pose_msg.markers[0]
# rospy.loginfo("FIND AR POSE")
def ball_pose_callBack(self,data):
self.ball_pos=data
def execute(self, userdata):
if self.ar_marker_pose == False:
rospy.logwarn('Failed to get pose of the marker')
return 'aborted'
else:
object_pose = Pose()
object_pose.position.x =self.ar_marker_pose.pose.pose.position.x+ 0.0
object_pose.position.y =self.ar_marker_pose.pose.pose.position.y +0.0
object_pose.position.z = self.goal_height
rospy.loginfo(object_pose.position)
dist = math.sqrt((object_pose.position.x * object_pose.position.x) +
(object_pose.position.y * object_pose.position.y))
if object_pose.position.y > 0:
yaw = math.acos(object_pose.position.x / dist)
else:
yaw = (-1) * math.acos(object_pose.position.x / dist)
roll = 0.0
pitch = 0.0
cy = math.cos(yaw * 0.5)
sy = math.sin(yaw * 0.5)
cr = math.cos(roll * 0.5)
sr = math.sin(roll * 0.5)
cp = math.cos(pitch * 0.5)
sp = math.sin(pitch * 0.5)
object_pose.orientation.w = cy * cr * cp + sy * sr * sp
object_pose.orientation.x = cy * sr * cp - sy * cr * sp
object_pose.orientation.y = cy * cr * sp + sy * sr * cp
object_pose.orientation.z = sy * cr * cp - cy * sr * sp
userdata.output_object_pose = object_pose
rospy.loginfo('Succeeded to get pose of the object')
return 'succeeded'
|
#! /usr/bin/env python
# Uber's original name was "UberCab". Let's say we have a bunch of bumper stickers left over from those days
# which say "UBERCAB" and we decide to cut these up into their separate letters to make new words.
# So, for example, one sticker would give us the letters "U", "B", "E", "R", "C", "A", "B",
# which we could rearrange into other word[s] (like "car", "bear", etc)
# Challenge:
# Write a function that takes as its input an arbitrary string and as output,
# returns the number of intact "UBERCAB" stickers we would need to cut up to recreate that string.
# For instance:
# ubercab_stickers("car brace") would return "2", since we would need to cut up 2 stickers to provide enough letters to write "car brace"
import sys
UBERCAB = "UberCab"
def ubercab_stickers(phrase):
letter_usage = {}
sticker_count = 0
for letter in phrase:
if letter == " ":
continue
letter = letter.lower()
if letter in UBERCAB.lower():
if letter in letter_usage:
letter_usage[letter] += 1
stickers = (letter_usage[letter] / UBERCAB.lower().count(letter))
if letter_usage[letter] % UBERCAB.lower().count(letter) > 0:
stickers += 1
if sticker_count < stickers:
sticker_count = stickers
else:
return -1
return sticker_count
def ubercab_stickers2(phrase):
words = phrase.split(' ')
letter_usage = {}
for word in words:
for letter in word.lower():
if letter in UBERCAB.lower():
if letter in letter_usage:
letter_usage[letter] += 1
else:
return -1
# letter_usage has the letter usage distribution.
sticker_usage = {}
sticker_count = 0
for letter in letter_usage:
sticker_usage[letter] = letter_usage[letter] / UBERCAB.lower().count(letter)
if sticker_count < sticker_usage[letter]:
sticker_count = sticker_usage[letter]
return sticker_count
#-------------------------------------------------------------------------------
if len(sys.argv) > 1:
phrase = " ".join(sys.argv[1:])
else:
phrase = "bbb"
print "computing '{}' sticker usage for: {}".format(UBERCAB,phrase)
stickers = ubercab_stickers(phrase)
if stickers > 0:
print "it takes {} sticker(s) to print '{}'".format(stickers, phrase)
else:
print "it can't be done. '{}' has extra letters not found in '{}'".format(phrase, UBERCAB)
|
from settings import *
class CrossHairs(pygame.sprite.Sprite):
def __init__(self):
pygame.sprite.Sprite.__init__(self)
self.image = pygame.image.load(os.path.join(img_folder, "crosshairs.png")).convert()
self.image = pygame.transform.scale(self.image, (30, 30))
self.image.set_colorkey(BLACK)
self.rect = self.image.get_rect()
self.rect.x = 100
self.rect.y = 100
self.pos = vec(0,0)
def update(self):
mouseState = pygame.mouse.get_pressed()
pos = pygame.mouse.get_pos()
self.rect.centerx = pos[0]
self.rect.centery = pos[1]
|
from rest_framework import serializers
from .models import Match
from teams.serializers import TeamsSerializer
class MatchSerializer(serializers.ModelSerializer):
team1 = serializers.SerializerMethodField(source="get_team1")
team2 = serializers.SerializerMethodField(source="get_team2")
winner = serializers.SerializerMethodField(source="get_winner")
def get_team1(self, obj):
if obj.team1 is not None:
return TeamsSerializer(obj.team1).data
return None
def get_team2(self, obj):
if obj.team2 is not None:
return TeamsSerializer(obj.team2).data
return None
def get_winner(self, obj):
if obj.winner is not None:
return TeamsSerializer(obj.winner).data
return None
class Meta:
model = Match
fields = '__all__'
|
import boto3
import hashlib
import uuid
from base64 import b64encode, b64decode
from cfn_random_bytes_provider import RandomBytesProvider
from secrets import handler
kms = boto3.client("kms")
default_length = 8
def test_defaults():
request = Request("Create", "abc")
r = RandomBytesProvider()
r.set_request(request, {})
assert r.is_valid_request()
assert not r.get("ReturnSecret")
assert r.get("KeyAlias") == "alias/aws/ssm"
assert r.get("Description") == ""
assert r.get("Length") == default_length
assert isinstance(r.get("NoEcho"), bool) and r.get("NoEcho")
def test_type_convert():
request = Request("Create", "abc")
request["ResourceProperties"]["Length"] = "62"
request["ResourceProperties"]["ReturnSecret"] = "true"
request["ResourceProperties"]["RefreshOnUpdate"] = "true"
r = RandomBytesProvider()
r.set_request(request, {})
assert r.is_valid_request()
assert r.get("Length") == 62
assert r.get("ReturnSecret")
request["ResourceProperties"]["Length"] = "fouteboole62"
r = RandomBytesProvider()
r.set_request(request, {})
assert not r.is_valid_request()
request["ResourceProperties"]["Length"] = u"62"
request["ResourceProperties"]["ReturnSecret"] = u"true"
r.set_request(request, {})
assert r.is_valid_request()
assert r.get("Length") == 62
assert r.get("ReturnSecret")
def test_create():
# create a test parameter
name = "/test/1-parameter-%s" % uuid.uuid4()
request = Request("Create", name)
request["ResourceProperties"]["ReturnSecret"] = True
request["ResourceProperties"]["Description"] = "A beautiful secret"
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
physical_resource_id = response["PhysicalResourceId"]
assert isinstance(physical_resource_id, str)
assert "Data" in response
assert "Secret" in response["Data"]
assert len(b64decode(response["Data"]["Secret"])) == default_length
assert "Arn" in response["Data"]
assert "Hash" in response["Data"]
assert "Version" in response["Data"]
assert "NoEcho" in response
assert response["Data"]["Arn"] == physical_resource_id
assert (
response["Data"]["Hash"]
== hashlib.md5(response["Data"]["Secret"].encode("utf8")).hexdigest()
)
assert response["Data"]["Version"] == 1
assert response["NoEcho"] == True
assert "ParameterName" in response["Data"]
assert response["Data"]["ParameterName"] == name
# no update the key
hash = response["Data"]["Hash"]
request["RequestType"] = "Update"
request["ResourceProperties"]["Length"] = "32"
request["ResourceProperties"]["RefreshOnUpdate"] = False
request["PhysicalResourceId"] = physical_resource_id
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert response["Data"]["Arn"] == physical_resource_id
assert response["Data"]["Version"] == 2
assert response["Data"]["Hash"] == hash
assert len(b64decode(response["Data"]["Secret"])) == default_length
# update the key
hash = response["Data"]["Hash"]
request["RequestType"] = "Update"
request["ResourceProperties"]["RefreshOnUpdate"] = True
request["ResourceProperties"]["Length"] = "32"
request["PhysicalResourceId"] = physical_resource_id
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert response["Data"]["Arn"] == physical_resource_id
assert response["Data"]["Version"] == 3
assert response["Data"]["Hash"] != hash
assert len(b64decode(response["Data"]["Secret"])) == 32
response = handler(request, {})
# delete the parameters
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_request_duplicate_create():
# prrequest duplicate create
name = "/test/2-parameter-%s" % uuid.uuid4()
request = Request("Create", name)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
physical_resource_id = response["PhysicalResourceId"]
request = Request("Create", name)
response = handler(request, {})
assert response["Status"] == "FAILED", response["Reason"]
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_update_name():
# update parameter name
name = "/test/3-parameter-%s" % uuid.uuid4()
request = Request("Create", name)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
physical_resource_id = response["PhysicalResourceId"]
name_2 = "%s-2" % name
request = Request("Update", name_2, physical_resource_id)
request["ResourceProperties"]["ReturnSecret"] = True
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
assert "Data" in response and "Secret" in response["Data"]
assert "ParameterName" in response["Data"]
assert name_2 == response["Data"]["ParameterName"]
physical_resource_id_2 = response["PhysicalResourceId"]
assert physical_resource_id != physical_resource_id_2
# delete the parameters
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
request = Request("Delete", name, physical_resource_id_2)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_update_secret():
name = "k%s" % uuid.uuid4()
request = Request("Create", name)
request["ResourceProperties"]["ReturnSecret"] = True
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
physical_resource_id = response["PhysicalResourceId"]
secret_1 = response["Data"]["Secret"]
secure_hash = response["Data"]["Hash"]
assert secure_hash == hashlib.md5(secret_1.encode("utf8")).hexdigest()
assert len(b64decode(response["Data"]["Secret"])) == default_length
name_2 = "k2%s" % name
request = Request("Update", name_2, physical_resource_id)
request["ResourceProperties"]["RefreshOnUpdate"] = True
request["ResourceProperties"]["ReturnSecret"] = True
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
secure_hash_2 = response["Data"]["Hash"]
physical_resource_id_2 = response["PhysicalResourceId"]
assert physical_resource_id != physical_resource_id_2
secret_2 = response["Data"]["Secret"]
assert secret_1 != secret_2
assert secure_hash != secure_hash_2
# delete secrets
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
request = Request("Delete", name, physical_resource_id_2)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_request_duplicate_through_update():
# update parameter name
name = "/test/4-parameter-%s" % uuid.uuid4()
request = Request("Create", name)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
physical_resource_id = response["PhysicalResourceId"]
name_2 = "%s-2" % name
request = Request("Create", name_2)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
physical_resource_id_2 = response["PhysicalResourceId"]
request = Request("Update", name, physical_resource_id_2)
response = handler(request, {})
assert response["Status"] == "FAILED", response["Reason"]
# delete the parameters
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
request = Request("Delete", name, physical_resource_id_2)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_create_no_return_secret():
# create a test parameter
name = "/test/5-parameter-%s" % uuid.uuid4()
request = Request("Create", name)
request["ResourceProperties"]["ReturnSecret"] = False
request["ResourceProperties"]["NoEcho"] = False
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
physical_resource_id = response["PhysicalResourceId"]
assert "Data" in response
assert "Secret" not in response["Data"]
assert "Arn" in response["Data"]
assert "NoEcho" in response and response["NoEcho"] == False
assert response["Data"]["Arn"] == physical_resource_id
# delete the parameters
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_no_echo():
# create a test parameter
name = "/test/parameter-%s" % uuid.uuid4()
request = Request("Create", name)
request["ResourceProperties"]["ReturnSecret"] = True
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "NoEcho" in response
assert response["NoEcho"] == True
physical_resource_id = response["PhysicalResourceId"]
# update NoEcho
request["PhysicalResourceId"] = physical_resource_id
request["ResourceProperties"]["NoEcho"] = False
request["RequestType"] = "Update"
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "NoEcho" in response
assert response["NoEcho"] == False
# delete NoEcho parameter
request["RequestType"] = "Delete"
request = Request("Delete", name, physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
def test_unchanged_physical_resource_id():
name = "k%s" % uuid.uuid4()
request = Request("Create", name)
request["ResourceProperties"]["ReturnSecret"] = True
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert "PhysicalResourceId" in response
physical_resource_id = response["PhysicalResourceId"]
old_style_physical_resource_id = physical_resource_id.split("/", 2)[0] + "//" + name
request = Request("Update", name, old_style_physical_resource_id)
request["ResourceProperties"]["RefreshOnUpdate"] = True
request["ResourceProperties"]["ReturnSecret"] = True
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
assert old_style_physical_resource_id == response["PhysicalResourceId"]
# delete secrets
request = Request("Delete", name, old_style_physical_resource_id)
response = handler(request, {})
assert response["Status"] == "SUCCESS", response["Reason"]
class Request(dict):
def __init__(self, request_type, name, physical_resource_id=None):
self.update(
{
"RequestType": request_type,
"ResponseURL": "https://httpbin.org/put",
"StackId": "arn:aws:cloudformation:us-west-2:EXAMPLE/stack-name/guid",
"RequestId": "request-%s" % uuid.uuid4(),
"ResourceType": "Custom::RandomBytes",
"LogicalResourceId": "MySecret",
"ResourceProperties": {"Name": name},
}
)
self["PhysicalResourceId"] = (
physical_resource_id
if physical_resource_id is not None
else str(uuid.uuid4())
)
|
def sequenze_parametriche(n, m, k):
'''
Presi i tre interi n, m e k, stampa tutte le sequenze di n interi positivi
con interi di valore al più m e nelle quali nessun intero compare più di k
volte.
Ad esempio per n = 3, m = 2 e k = 2 le sequenze da stampare sono:
[1, 1, 2] [1, 2, 1] [2, 1, 1] [1, 2, 2] [2, 1, 2] [1, 2, 2]
L'algoritmo deve avere complessità O(nS(n)) dove S(n) è il numero di
sequenza da stampare.
'''
def genera(count = {}, sol = []):
nonlocal interni, foglie # TEST
if len(sol) == n:
foglie += 1 # TEST
print(sol)
else:
interni += 1 # TEST
for i in range(1, m + 1):
if i not in count:
count[i] = 0
if count[i] < k:
count[i] += 1
sol.append(i)
genera()
sol.pop()
count[i] -= 1
foglie = 0 # TEST
interni = 0 # TEST
genera()
return interni, foglie # TEST
|
import numpy as np
import pandas as pd
try:
from ..dbutil.temptable_template import temptable_template
except:
from dbutil.temptable_template import temptable_template
def order_filter(trips, legs, points, trip_link, point_meta, DB, CONFIG):
"""
Given the dataframes describing trips, and the associated points, this function determines whether a point should be
tagged as out of order. The SQL statement snapes the point to the candidate train route, then determines the spatial
order, and the temporal order of these points. If there is too much disagreement (as measured by a comparison to the
standard deviation of the out of order) it is flagged.
:param trips: The trips dataframe
:param legs: The legs dataframe
:param points: The points dataframe
:param trip_link: The trip_link dataframe
:param point_meta: The point_meta (obviously without the order_filter column)
:param conn: A database connection
:param CONFIG: A parsed config file pointing to the relevant application.ini
:return: Returns a dataframe containin the itinerary_id, segment_id, point_id, and the outlier flag (boolean)
"""
ORDER_FILTER_CUTOFF = CONFIG.getfloat('params', 'ORDER_FILTER_CUTOFF')
ORDER_FILTER_MIN_STD = CONFIG.getfloat('params', 'ORDER_FILTER_MIN_STD')
legs = legs.reset_index()
# Join all the dfs together to make a super dataframe from which I can get my data!
points_joined = legs.merge(trip_link.reset_index(), on='leg_id')
points_joined = points_joined.merge(point_meta.reset_index(), on='segment_id')
points_joined = points_joined.merge(points.reset_index(), on='point_id')
points_joined = points_joined.merge(trips.reset_index(), on='mot_segment_id', suffixes=('_legs', '_trips'))
sql = DB.get_query('temporaryooo', __file__)
sql = temptable_template(points_joined[['itinerary_id', 'leg_id', 'segment_id', 'point_id', 'route_name', 'agency_id',
'stop_id_start_legs', 'stop_id_end_legs', 'time', 'lat', 'lon']], sql, DB)
sql += DB.get_query('getgeoms', __file__)
# points_joined.to_excel('2.xlsx')
df = DB.postgres2pandas(sql)
# We first define how different the data order is from the time order (i.e. the true order)
df['orderdiff'] = (df['data_order'] - df['time_order']).abs()
# df.sort(columns=['point_id','time_order','data_order']).to_excel('2.xlsx')
if df.empty:
grouped_df = df.astype(float).groupby('leg_id')
else:
grouped_df = df.groupby('leg_id')
# Calculate the stats for this trip, how out of order is the average point?
itinerary_stats = grouped_df['orderdiff'].agg({'orderdiff_mean': np.mean, 'orderdiff_std': np.std}).reset_index()
# print itinerary_stats
df = df.merge(itinerary_stats, on='leg_id')
# We don't want the standard deviation to be too small or most points could get flagged as out of order. Make it a
# minimum of ORDER_FILTER_MIN_STD (at the time of coding, this is 2)
df.ix[df['orderdiff_std'] < ORDER_FILTER_MIN_STD, 'orderdiff_std'] = ORDER_FILTER_MIN_STD
# How many standard deviations out of order is this point? If it's greater than ORDER_FILTER_CUTOFF then flag it
# as an ooo outlier by setting ooo_outlier=True
df['ooo_outlier'] = ((df.orderdiff - df.orderdiff_mean) / df.orderdiff_std) >= ORDER_FILTER_CUTOFF
return df[['itinerary_id', 'segment_id', 'point_id', 'ooo_outlier']]
|
def main():
lst = []
for a in range(2,101):
for b in range(2,101):
lst.append(a**b)
return len(set(lst))
if __name__=='__main__':
print(main())
|
# tg
from flask import Blueprint
tg = Blueprint('tg', __name__)
from . import views
|
from PIL import Image, ImageDraw, ImageOps, ImageColor
# Super sample and then downscale to reduce jaggy edges.
multiplier = 2
# Wallpaper Parameters
x = 3440 * multiplier
y = 1440 * multiplier
# Colours
colour1 = ImageColor.getrgb("#1047A9")
colour2 = ImageColor.getrgb("#E20048")
colour3 = ImageColor.getrgb("#DCF900")
background = ImageColor.getrgb("#23262A")
# Load center logo
logo = Image.open("logoSR.png")
# Find center of primary circle
cx = x/2
cy = y/2
# Set initial radius
cr1 = int(y/4.3)
# calculate line width and circle width
width = cr1/5
cwidth = cr1/8
# Calculate space between lines
spacing = width + cwidth
# Extend x line 1/10 further on both sides to ensure lines fully draw
linebuffer = x/10
# Create initial image
image = Image.new('RGB', (x, y), background)
draw = ImageDraw.Draw(image)
# calculate start and end points from middle line
endy = - linebuffer
endx = cx + (cy-endy)
startx = - linebuffer
starty = cy + (cx-startx)
# draw initial lines
draw.line([(startx, starty), (endx, endy)], colour2, width)
# Draw lines offset by spacing
draw.line([(startx, starty-spacing), (endx, endy-spacing)], colour1, width)
draw.line([(startx, starty+spacing), (endx, endy+spacing)], colour3, width)
# Calculate radius for smaller circles
cr2 = cr1 - cwidth
cr3 = cr2 - cwidth
cr4 = cr3 - cwidth
# draw circles
draw.ellipse((cx-cr1, cy-cr1, cx+cr1, cy+cr1), fill=colour1)
draw.ellipse((cx-cr2, cy-cr2, cx+cr2, cy+cr2), fill=colour2)
draw.ellipse((cx-cr3, cy-cr3, cx+cr3, cy+cr3), fill=background)
# draw.ellipse((cx-cr4, cy-cr4, cx+cr4, cy+cr4), fill=background)
# Calculate and resize logo to fit inside center circle using radius of largest circle.
logowidth, logoheight = logo.size
logo = ImageOps.fit(logo,
((cr1+(cwidth)), (cr1+(cwidth))*logoheight/logowidth),
Image.ANTIALIAS)
logowidth, logoheight = logo.size
# Passing logo twice since it indicates a mask that will be used to paste
# the image. If you pass a image with transparency, then the alpha channel
# is used as mask.
image.paste(logo, (cx - logowidth/2, cy - logoheight/2), logo)
# Scale image down from multiplier
image = ImageOps.fit(image, (x/multiplier, y/multiplier), Image.ANTIALIAS)
# Save and output image.
image.save("outputSR.png")
|
import subprocess
import shlex
import math
import os
import sys
import stat
import time
import getopt
class Line:
def __init__(self, num, m, b):
self.num = num
self.m = m
self.b = b
self.visible = True
def __str__(self):
return str(self.num) + ": " + str(self.m) + "x " + "+ " + str(self.b)
debug_filename = "debug.txt"
debug_file = open(debug_filename, "w")
def usage():
print("Usage:")
print("-d: debug")
print("-s: silent")
print("-t: timing")
print("-i: input file")
print("-o: output file")
# function: hidden_by_intersect
# takes: 3 Lines, i, j, and k
# returns: True the intersection of j and k is strictly above i, otherwise False
def hidden_by_intersect(i, j, k):
return i.m * (j.b - k.b) + i.b * (k.m - j.m) < j.m * (j.b - k.b) + j.b * (k.m - j.m)#:
# return True
# else:
# return False
# function: MergeVisible
# takes: 2 arrays a and b of visible lines
# returns an array of the visible lines
def MergeVisible(a, b):
j = 0
k = 1
l = -2
m = -1
if debug:
debug_file.write("Start MergeVisible\n")
debug_file.write("a: \n")
for line in a:
debug_file.write(str(line) + "\n")
debug_file.write("b: \n")
for line in b:
debug_file.write(str(line) + "\n")
if len(a) == len(b) == 1:
if debug:
debug_file.write("Length a and b are 1\n")
return [a[0], b[0]]
if len(a) == 1:
if debug:
debug_file.write("Length of a is 1\n")
for line in b:
debug_file.write(str(line) + "\n")
if hidden_by_intersect(b[l], a[j], b[m]):
return MergeVisible(a, [b[m]])
else:
c = b.pop(m)
return MergeVisible(a, b) + [c]
if len(b) == 1:
if debug:
debug_file.write("Length of b is 1\n")
if hidden_by_intersect(a[k], a[j], b[m]):
return MergeVisible([a[j]], b)
else:
c = a.pop(j)
return [c] + MergeVisible(a, b)
if debug:
debug_file.write(str(a[j]) + "\n" + str(a[k]) + "\n" + str(b[l]) + "\n" + str(b[m]) + "\n")
if hidden_by_intersect(a[k], a[j], b[m]):
if debug:
debug_file.write("Line " + str(a[k]) + " is hidden by the intersection of lines " + str(a[j]) + " and " + str(b[m]) + "\n")
a[k].visible = False
if hidden_by_intersect(b[l], a[j], b[m]):
if debug:
debug_file.write("Line " + str(b[l]) + " is hidden by the intersection of lines " + str(a[j]) + " and " + str(b[m]) + "\n")
b[l].visible = False
if b[l].visible == a[k].visible == False:
if debug:
debug_file.write("Lines " + str(b[l]) + " and " + str(a[k]) + " are not visible\n")
a.pop(k)
b.pop(l)
return MergeVisible(a, b)
if a[k].visible == False and b[l].visible == True:
a.pop(k)
c = b.pop(m)
return MergeVisible(a, b) + [c]
if b[l].visible == False and a[k].visible == True:
c = a.pop(j)
b.pop(l)
return [c] + MergeVisible(a, b)
if b[l].visible == a[k].visible == True:
if hidden_by_intersect(a[k], a[j], b[l]):
a.pop(k)
c = b.pop(m)
return MergeVisible(a, b) + [c]
elif hidden_by_intersect(b[l], a[k], b[m]):
c = a.pop(j)
b.pop(l)
return [c] + MergeVisible(a, b)
else:
c = a.pop(j)
d = b.pop(m)
return [c] + MergeVisible(a, b) + [d]
# function: Algorithm4
# takes: an array of lines sorted by slope in increasing order
# returns: an array of lines that are visible
def Algorithm4(lines):
if debug:
print(math.floor(len(lines)/2))
print(math.floor(len(lines)/2) + 1)
print( )
if len(lines) <= 2:
return lines
p = Algorithm4(lines[:math.floor(len(lines)/2) + 1])
q = Algorithm4(lines[math.floor(len(lines)/2) + 1:])
return MergeVisible(p, q)
# function: read_line_data
# takes: a file object
# returns: a list of lists of line objects - each list is a problem set
def read_line_data(file):
all_problems = []
for line in file:
problem = []
line_data = eval(line)
slopes = line_data[0]
intercepts = line_data[1]
for num in list(range(len(slopes))):
problem.append(Line(num, slopes[num], intercepts[num]))
all_problems.append(problem)
return all_problems
# function: create_solution_array
# takes: array of visible lines and length of input array
# returns: array of true false for visible and non-visible lines
def create_solution_array(solution, length):
sol = [False] * length
for line in solution:
sol[line.num] = True
return sol
def main():
try:
opts, args = getopt.getopt(sys.argv[1:], 'dstui:o:', ['usage','debug='])
except:
usage()
sys.exit(2)
input_filename = "test_set.txt"
output_filename = "output.txt"
global debug
debug = False
timing = False
for o, a in opts:
if o == "-d":
debug = True
debug_file = open(debug_filename, 'w')
elif o == "-s":
silent = True
elif o == "-t":
timing = True
elif o == "-i":
input_filename = a
elif o == "-o":
output_filename = a
elif o in ("-u", "--usage"):
usage()
sys.exit(1)
input_file = open(input_filename, 'r')
output_file = open(output_filename, 'w')
problems = read_line_data(input_file)
input_file.close()
for problem in problems:
start = time.time()
sol = Algorithm4(problem)
end = time.time()
if timing:
print(str(end-start))
solutions = create_solution_array(sol, len(problem))
output_file.write(str(solutions).strip('[]') + '\n')
output_file.close()
if __name__ == '__main__':
main()
|
# -*- generated by 1.0.12 -*-
import da
PatternExpr_424 = da.pat.TuplePattern([da.pat.ConstantPattern('CTL_Ready')])
PatternExpr_429 = da.pat.FreePattern('source')
PatternExpr_451 = da.pat.TuplePattern([da.pat.ConstantPattern('CTL_Done'), da.pat.FreePattern('rudata'), da.pat.FreePattern('rugroup_id')])
PatternExpr_460 = da.pat.FreePattern('source')
PatternExpr_689 = da.pat.TuplePattern([da.pat.ConstantPattern('CTL_Start')])
PatternExpr_744 = da.pat.TuplePattern([da.pat.ConstantPattern('CTL_Terminate')])
PatternExpr_766 = da.pat.TuplePattern([da.pat.ConstantPattern('CTL_Stop')])
PatternExpr_694 = da.pat.TuplePattern([da.pat.FreePattern(None), da.pat.TuplePattern([da.pat.FreePattern(None), da.pat.FreePattern(None), da.pat.FreePattern(None)]), da.pat.TuplePattern([da.pat.ConstantPattern('CTL_Start')])])
PatternExpr_749 = da.pat.TuplePattern([da.pat.FreePattern(None), da.pat.TuplePattern([da.pat.FreePattern(None), da.pat.FreePattern(None), da.pat.FreePattern(None)]), da.pat.TuplePattern([da.pat.ConstantPattern('CTL_Terminate')])])
_config_object = {}
import sys
import time
import json
from itertools import chain
class WinResourceUsageData():
'Tracks process time only.'
def start(self):
self.start_cputime = time.process_time()
def end(self):
self.end_cputime = time.process_time()
self.results = {'Total_process_time': (self.end_cputime - self.start_cputime)}
@classmethod
def aggregate(cls, rudata_points):
return {'Total_process_time': sum((p.results['Total_process_time'] for p in rudata_points)), 'Total_processes': len(rudata_points)}
class PosixResourceUsageData():
'Tracks utime, stime, and maxrss.'
def start(self):
self.start_data = resource.getrusage(resource.RUSAGE_SELF)
def end(self):
self.end_data = resource.getrusage(resource.RUSAGE_SELF)
def diff(attr):
return (getattr(self.end_data, attr) - getattr(self.start_data, attr))
self.results = {'Total_user_time': diff('ru_utime'), 'Total_system_time': diff('ru_stime'), 'Total_process_time': (diff('ru_utime') + diff('ru_stime')), 'Total_memory': self.end_data.ru_maxrss}
@classmethod
def aggregate(cls, rudata_points):
def sumof(attr):
return sum((p.results[attr] for p in rudata_points))
aggr_results = {k: sumof(k) for k in ['Total_user_time', 'Total_system_time', 'Total_process_time', 'Total_memory']}
aggr_results['Total_processes'] = len(rudata_points)
return aggr_results
if (sys.platform == 'win32'):
ResourceUsageData = WinResourceUsageData
else:
import resource
ResourceUsageData = PosixResourceUsageData
class Controller(da.DistProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_ControllerReceivedEvent_0', PatternExpr_424, sources=[PatternExpr_429], destinations=None, timestamps=None, record_history=None, handlers=[self._Controller_handler_423]), da.pat.EventPattern(da.pat.ReceivedEvent, '_ControllerReceivedEvent_1', PatternExpr_451, sources=[PatternExpr_460], destinations=None, timestamps=None, record_history=None, handlers=[self._Controller_handler_450])])
def setup(self, nprocs, threshold=None, **rest_831):
super().setup(nprocs=nprocs, threshold=threshold, **rest_831)
self._state.nprocs = nprocs
self._state.threshold = threshold
if (self._state.threshold is None):
self._state.threshold = self._state.nprocs
self._state.ps = set()
self._state.done_ps = set()
self._state.readys = 0
self._state.dones = 0
self._state.sent_stop = False
self._state.rudata_points = {}
self._state.ctl_verbose = True
def run(self):
super()._label('_st_label_534', block=False)
_st_label_534 = 0
while (_st_label_534 == 0):
_st_label_534 += 1
if (self._state.readys == self._state.nprocs):
_st_label_534 += 1
else:
super()._label('_st_label_534', block=True)
_st_label_534 -= 1
self.verboutput('Controller starting everyone')
t1 = time.perf_counter()
self.send(('CTL_Start',), to=self._state.ps)
super()._label('_st_label_554', block=False)
_st_label_554 = 0
while (_st_label_554 == 0):
_st_label_554 += 1
if (self._state.dones == self._state.nprocs):
_st_label_554 += 1
else:
super()._label('_st_label_554', block=True)
_st_label_554 -= 1
t2 = time.perf_counter()
self.verboutput('Everyone done')
self.send(('CTL_Terminate',), to=self._state.ps)
jsondata = {}
for (rugroup_id, points) in self._state.rudata_points.items():
if (rugroup_id is None):
continue
jsondata[rugroup_id] = ResourceUsageData.aggregate(points)
allpoints = list(chain(*self._state.rudata_points.values()))
jsondata['All'] = ResourceUsageData.aggregate(allpoints)
jsondata['Wallclock_time'] = (t2 - t1)
jsonoutput = json.dumps(jsondata)
print(('###OUTPUT: ' + jsonoutput))
'\n f = open("stats.txt", "a")\n f.write("Total Processes:" + str(jsondata[\'All\'][\'Total_processes\']) + "\n")\n f.write("Total User Time:" + str(jsondata[\'All\'][\'Total_user_time\']) + "\n")\n f.write("Total System Time:" + str(jsondata[\'All\'][\'Total_system_time\']) + "\n")\n f.write("Total Process Time:" + str(jsondata[\'All\'][\'Total_process_time\']) + "\n")\n f.write("Total Memory:" + str(jsondata[\'All\'][\'Total_memory\']) + "\n")\n f.write("Total Wall Clock Time:" + str(jsondata[\'Wallclock_time\']) + "\n")\n f.write("\n\n")\n f.close()\n '
def verboutput(self, s):
if self._state.ctl_verbose:
self.output(s)
def _Controller_handler_423(self, source):
self._state.ps.add(source)
self._state.readys += 1
self.verboutput('Got Ready from {} ({}/{})'.format(source, self._state.readys, self._state.nprocs))
_Controller_handler_423._labels = None
_Controller_handler_423._notlabels = None
def _Controller_handler_450(self, rudata, rugroup_id, source):
self._state.dones += 1
self._state.done_ps.add(source)
self._state.rudata_points.setdefault(rugroup_id, []).append(rudata)
if (self._state.threshold == self._state.nprocs):
self.verboutput('Got Done from {} ({}/{})'.format(source, self._state.dones, self._state.nprocs))
else:
self.verboutput('Got Done from {} ({}/{}, need {} to stop)'.format(source, self._state.dones, self._state.nprocs, self._state.threshold))
if ((self._state.dones >= self._state.threshold) and (not self._state.sent_stop)):
rest_ps = (self._state.ps - self._state.done_ps)
self.verboutput('Controller stopping everyone')
self.send(('CTL_Stop',), to=rest_ps)
self._state.sent_stop = True
_Controller_handler_450._labels = None
_Controller_handler_450._notlabels = None
class Controllee(da.DistProcess):
def __init__(self, procimpl, props):
super().__init__(procimpl, props)
self._ControlleeReceivedEvent_0 = []
self._ControlleeReceivedEvent_1 = []
self._events.extend([da.pat.EventPattern(da.pat.ReceivedEvent, '_ControlleeReceivedEvent_0', PatternExpr_689, sources=None, destinations=None, timestamps=None, record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_ControlleeReceivedEvent_1', PatternExpr_744, sources=None, destinations=None, timestamps=None, record_history=True, handlers=[]), da.pat.EventPattern(da.pat.ReceivedEvent, '_ControlleeReceivedEvent_2', PatternExpr_766, sources=None, destinations=None, timestamps=None, record_history=None, handlers=[self._Controllee_handler_765])])
def setup(self, ctl, **rest_831):
super().setup(ctl=ctl, **rest_831)
self._state.ctl = ctl
self._state.rudata = ResourceUsageData()
self._state.ctl_verbose = True
self._state.ctl_done = False
def run(self):
pass
def verboutput(self, s):
if self._state.ctl_verbose:
self.output(s)
def ctl_begin(self):
self.send(('CTL_Ready',), to=self._state.ctl)
super()._label('_st_label_686', block=False)
_st_label_686 = 0
while (_st_label_686 == 0):
_st_label_686 += 1
if PatternExpr_694.match_iter(self._ControlleeReceivedEvent_0, SELF_ID=self._id):
_st_label_686 += 1
else:
super()._label('_st_label_686', block=True)
_st_label_686 -= 1
self._state.rudata.start()
def ctl_end(self):
self._state.ctl_done = True
self._state.rudata.end()
rugroup_id = getattr(self._id, 'ctl_rugroup_id', None)
self.send(('CTL_Done', self._state.rudata, rugroup_id), to=self._state.ctl)
super()._label('_st_label_741', block=False)
_st_label_741 = 0
while (_st_label_741 == 0):
_st_label_741 += 1
if PatternExpr_749.match_iter(self._ControlleeReceivedEvent_1, SELF_ID=self._id):
_st_label_741 += 1
else:
super()._label('_st_label_741', block=True)
_st_label_741 -= 1
self.verboutput('Terminating...')
def _Controllee_handler_765(self):
self.verboutput('Received stop')
if self._state.ctl_done:
return
self.ctl_end()
self.exit()
_Controllee_handler_765._labels = None
_Controllee_handler_765._notlabels = None
def run(func):
'Decorator for Process.run() to call controllee hooks.'
def ctl_run(self):
self.ctl_begin()
func(self)
self.ctl_end()
return ctl_run
def rugroup(rugroup_id):
'Decorator for annotating a process controllee subclass\n with a resource usage group identifier. Results for processes\n in the same group will be aggregated reported together.\n '
def f(proc):
proc.ctl_rugroup_id = rugroup_id
return proc
return f
|
from fastapi import APIRouter, Depends, HTTPException
from sqlalchemy.orm import Session
from app.api import deps
from app import models, schemas, crud
route = APIRouter()
@route.post('/', response_model=schemas.User)
def create_user(user: schemas.UserCreate, db: Session = Depends(deps.get_db)) -> models.User:
return crud.user.create(obj_in=user, db=db)
@route.get('/{user_id}', response_model=schemas.User)
def get_user(user_id: int, db: Session = Depends(deps.get_db)) -> models.user:
user = crud.user.get(db=db, id=user_id)
if not user:
raise HTTPException(status_code=404, detail="User Not Found")
return user
|
# encoding: utf-8
from tastypie.validation import *
|
# Generated by Django 2.2.12 on 2020-05-06 12:15
from django.db import migrations, models
import upload.models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ImageBed',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_path', models.ImageField(upload_to=upload.models.hashed_bed_image_path)),
('origin_name', models.CharField(max_length=20)),
],
),
migrations.CreateModel(
name='ImageTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('count', models.IntegerField(default=0)),
('tag_name', models.CharField(max_length=7)),
],
),
migrations.CreateModel(
name='MyImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image_path', models.ImageField(upload_to=upload.models.hashed_mine_image_path)),
('origin_name', models.CharField(max_length=20)),
('authority', models.CharField(default='075', max_length=4)),
('tags', models.ManyToManyField(to='upload.ImageTag')),
],
options={
'ordering': ['image_path'],
},
),
]
|
from collections import defaultdict
import sys
# arguments should be the length of the mer and hla types
mer_len = int(sys.argv[1]) #for example 8
peptide_len = int(sys.argv[2]) #for example 15
# Obtain a dictionary of score
score_dict = defaultdict(list) #key is the peptide and values are scores
with open(sys.argv[3], 'r') as f:
for line in f:
if not line.startswith('allele'):
tempLine = line.rstrip('\n').split('\t')
if "netmhcpan" in tempLine[6]:
score_dict[tempLine[5]].append(float(tempLine[14]))
elif len(tempLine) < 9:
score_dict[tempLine[5]].append(float(tempLine[6]))
else:
score_dict[tempLine[5]].append(float(tempLine[8]))
transcript = ""
trans_ = ""
counter = 0
mutant_Map = defaultdict(list) # Key is the transcript name and values are all the possible x-mers
with open(sys.argv[4]) as f: # open A7-A26G.15.txt
for line in f:
seq = ""
if ">" in line:
transcript = line.strip().replace(">", "")
trans_ = transcript.replace("MT.", "").replace("WT.", "")
counter += 1
else:
seq = line.strip()
counter += 1
if counter % 2 == 0:
if "MT." in transcript:
peptideScore = defaultdict()
all_mer_len_peptide = [seq[i:i+mer_len] for i in range(mer_len)]
# print all_mer_len_peptide
# for i in all_mer_len_peptide:
# mutant_Map[trans_].append(i)
for mer_len_peptide in all_mer_len_peptide:
# print score_dict[mer_len_peptide]
if len(score_dict[mer_len_peptide]) != 0:
# print min(score_dict[mer_len_peptide])
peptideScore[mer_len_peptide] = min(score_dict[mer_len_peptide])
# print peptideScore
out = min(peptideScore.items(), key=lambda l: l[1])
out_to_print = [out[0], str(out[1])]
print '\t'.join(x for x in out_to_print)
# print mutant_Map
# peptide = 'TFRHSVVVPHEPPEVGSDC'
# length = 10
# seqs = [peptide[i:i+length] for i in range(length)]
# # print seqs
# dict = defaultdict(list)
# with open("c://Users/tuyen/Documents/github_repo/Neoepitope_Prediction/A7-A26G/HLA-A_03-01/output_IEDB.19.txt", 'r') as f:
# for line in f:
# if not line.startswith('allele'):
# items = line.rstrip('\n').split('\t')
# dict[items[5]].append(float(items[8]))
# print dict
# peptideScore = defaultdict()
# for seq in seqs:
# peptideScore[seq] = min(dict[seq])
# print peptideScore
# print min(peptideScore.items(), key=lambda l: l[1])
# all_scores_flat = [item for sublist in all_scores for item in sublist]
# print all_scores_flat
# print min(all_scores_flat)
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
import jinja2
import webapp2
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True)
class LoginHandler(webapp2.RequestHandler):
def get(self):
if self.request.get('pass') == 'password':
template_values = {
'data': 'private page',
'more_data': 'bar',
}
template = JINJA_ENVIRONMENT.get_template('templates/page.html')
self.response.write(template.render(template_values))
else:
template = JINJA_ENVIRONMENT.get_template('templates/login.html')
self.response.write(template.render())
class LinkHandler(webapp2.RequestHandler):
def get(self):
template_values = {
'data': 'lorem ipsum',
'more_data': 'bar',
}
template = JINJA_ENVIRONMENT.get_template('templates/page.html')
self.response.write(template.render(template_values))
routes = [
('/link1', LinkHandler),
('/link2', LinkHandler),
('/login', LoginHandler),
]
app = webapp2.WSGIApplication(routes, debug=True)
def handle_404(request, response, exception):
template = JINJA_ENVIRONMENT.get_template('templates/404.html')
response.set_status(404)
response.write(template.render())
app.error_handlers[404] = handle_404
|
# from selenium.common.exceptions import TimeoutException
# from selenium.webdriver.chrome.webdriver import WebDriver
#
# from pages.ldma import ParseLinkBudget
# from pages.base import BasePage
# from rich.traceback import install
# from rich.table import Table
# from rich.panel import Panel
# from rich.align import Align
# from rich.live import Live
# from rich import print
#
# # RICH Implementation for providing beautiful CLI visuals
# install() # Traceback install
# table = Table(title="Parser Status", show_lines=True)
# table.add_column("ROW ID", style="cyan", no_wrap=True, justify="center")
# table.add_column("LINK ID/SIDE CODE", style="green", justify="center")
# table.add_column("STATUS", justify="center", style="green")
#
# panel = Panel(Align.center(table, vertical="middle"), border_style="cyan")
#
#
# class LDMA_Parser(BasePage):
# """ LinkBudget Parser """
#
# def __init__(self, driver: WebDriver) -> None:
# super().__init__(driver)
#
# def parse_link_budget(self, link_codes: list[str], site_codes: list[str]):
# """ Parse the Link Budget """
# if link_codes is not None:
# parse_info = ParseLinkBudget(driver=self.driver, timeout=3)
# parse_info.login_ldma()
# parse_info.make_dir()
# with Live(panel, refresh_per_second=1):
# try:
# for _index, _link_code in enumerate(link_codes):
# parse_info.goto_links()
# parse_info.insert_link_code(_link_code)
# parse_info.select_all_dropdown()
# parse_info.click_search()
# try:
# parse_info.select_found_link_code(_link_code)
# table.add_row(f"{(_index + 1)}", f"{_link_code}", "✅")
# except TimeoutException:
# table.add_row(f"{(_index + 1)}", f"{_link_code}", "❌")
# continue
# # parse_info.export_pdf_file(id) # Export As PDF
# parse_info.export_file(_link_code) # Export As HTML
# # parse_info.export_word_file(id) # Export As DOC
# # parse_info.delete_html_file(id) # Delete the Exported HTML file
# parse_info.logout_ldma()
# self.driver.quit()
# except Exception as error:
# print(error)
# else:
# parse_info = ParseLinkBudget(driver=self.driver, timeout=3)
# parse_info.login_ldma()
# parse_info.make_dir()
#
# with Live(panel, refresh_per_second=1):
# for _index, _site_code in enumerate(site_codes):
# parse_info.goto_links()
# parse_info.select_all_dropdown()
# parse_info.insert_site_code_1(_site_code)
# parse_info.click_search()
# if parse_info.is_available_site_1():
# _link_id = parse_info.get_link_id()
# parse_info.search_lb_with_sitecode(_site_code)
# parse_info.export_file(_link_id)
# table.add_row(f"{(_index + 1)}", f"{_site_code}", "✅")
# continue
# parse_info.clear_site_code_1()
# parse_info.insert_site_code_2(_site_code)
# parse_info.click_search()
# if parse_info.is_available_site_2():
# _link_id = parse_info.get_link_id()
# parse_info.search_lb_with_sitecode(_site_code)
# parse_info.export_file(_link_id)
# table.add_row(f"{(_index + 1)}", f"{_site_code}", "✅")
# continue
# else:
# table.add_row(f"{(_index + 1)}", f"{_site_code}", "❌")
# parse_info.logout_ldma()
# self.driver.quit()
|
# Python backend for go-lite-bot, the small Go bot
# Should really use the fnctl package with the flock() function
# to place locks on the files being read. Setting read/write locks as appropriate
# will allow multiple instances to access the same filesystem for reading and
# writing files. A really extensible solution might access a database
# instead, but that's not necessary for now.
# Our game-board abstraction
from board import Node, Empty, Board
# So we can draw the board
from PIL import Image, ImageDraw, ImageFont
from io import StringIO
from math import ceil
# So we can save the board
import os.path
from os import mkdir
import pickle
# So we can pick random colors
import random
# Events names
import events
# To lock and unlock files
import fcntl
# Things to put our definitions into
from bot import Bot
# Size of the board
default_board_size = 9
# Directory for save files
save_dir = 'games/'
# If it already exists, just passes
if not os.path.isdir(save_dir):
mkdir(save_dir)
# Represents the game state, which can be loaded from a file
def get_board(filename):
if os.path.isfile(save_dir + str(filename) + '.p'):
f = open(save_dir + str(filename) + '.p', 'rb')
fcntl.flock(f, fcntl.LOCK_EX)
try:
out = pickle.load(f)
except:
print("Error loading!")
return Board(default_board_size)
board = out
else:
board = Board(default_board_size)
f = open(save_dir + str(filename) + '.p', 'wb')
pickle.dump(board, f, pickle.HIGHEST_PROTOCOL)
fcntl.flock(f, fcntl.LOCK_UN)
f.close()
return board
# Helper fuctions
# Note that these contain White/Black game specific stuff, and so are not rolled into the generic class
def score_str(board):
scores = board.score()
return "Black: " + str(scores["Black"]) + " White: " + str(scores["White"])
def save_board(board, filename):
f = open(save_dir + str(filename) + '.p', 'wb')
fcntl.flock(f, fcntl.LOCK_EX)
try:
pickle.dump(board, f, pickle.HIGHEST_PROTOCOL)
except Exception as ex:
print("Error saving!")
print("exception: " + str(ex))
fcntl.flock(f, fcntl.LOCK_UN)
f.close()
def are_indices(argList, size):
if len(argList) != 3:
return False
try:
i = int(argList[1])
j = int(argList[2])
if (i <= size and j <= size and i >= 0 and j >= 0):
return True
return False
except ValueError:
return False
# Converts from human readible to computer friendly
def convert_move(args):
if (len(args) != 3 and len(args) != 2):
return args
ret = [args[0], 0, 0]
if (len(args) == 3):
j = args[1]
else:
j = args[1][0]
ret[2] = ord(j.lower()) - 97
try:
if (len(args) == 2 and len(args[1]) >= 2):
ret[1] = int(args[1][1:]) - 1
return ret
elif len(args) == 3:
ret[1] = int(args[2]) - 1
return ret
else:
return args
except ValueError:
return args
# Turns any shorthand string into a nice named string
def to_name(name):
if name == "Black" or name == "White":
return name
else:
lower = name.lower()
if lower == "w" or lower == "white":
return "White"
if lower == "b" or lower == "black":
return "Black"
if lower == "o" or lower == "empty":
return Empty
else:
return None
def start(bot, update):
bot.sendMessage(chat_id=update.message.chat_id, text="Hey there!")
# Makes a move
def make_move(bot, update, args):
# Load the board
board = get_board(update.message.chat_id)
converted = convert_move(args)
if ((len(args) != 3 and len(args) != 2) or
(not are_indices(converted, board.size))):
print("Got bad input")
return
# The date of the update for our journal
# The update_id is an authoritative ordering like a date
date = update.update_id
# Name our positions
name = to_name(converted[0])
row = converted[1]
col = converted[2]
# We didn't get something that either represented White or Black
if name == None:
return
# Apply the move, noting whether or not to send an image
sendImage = board.addEvent((date, events.move, (name, row, col)))
# Now that we've moved, save the board and send the new image if appropriate
save_board(board, update.message.chat_id)
if (sendImage):
send_board_image(bot, update)
# double_reset nonsense
bot.double_resets[str(update.message.chat_id)] = False
def bmove(bot, update, args):
make_move(bot, update, ["Black"] + args)
def wmove(bot, update, args):
make_move(bot, update, ["White"] + args)
# Undo the last action
def undo (bot, update, args):
# Load the board
board = get_board(update.message.chat_id)
# The date of the update for our journal
# The update_id is an authoritative ordering like a date
date = update.update_id
# Apply the undo
board.addEvent((date, events.undo, None))
# Now that we've undoed, save the board and send the new image
save_board(board, update.message.chat_id)
send_board_image(bot, update)
# double_reset nonsense
bot.double_resets[str(update.message.chat_id)] = False
# Sends an image of the game board
def send_board_image(bot, update):
# Load the board
board = get_board(update.message.chat_id)
# Empirically determined, this seems to look fine with the JPG compression that Telegram does
space_width = 60
image_dim = (board.size + 3) * space_width
width = image_dim
height = image_dim
wholesize = width - space_width * 4
img = Image.new("RGB", (width, height), color="hsl(" + str(random.randrange(0,361)) + ", 100%, 80%)")
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("Lato-Regular.ttf", int(0.5 * (wholesize / (board.size - 1))))
def drawBoxAt(x, y, edgelen):
#Outline
linewd = int(board.size / 6) * 2
draw.rectangle([ x, y, x + edgelen, y + edgelen]
, fill = None
, outline = "black")
# Note that the circles are specified from their upper left corner
def drawWhiteAt(x, y, cellwidth):
draw.ellipse([ (x + cellwidth / 10, y + cellwidth / 10)
, (x + 9 * cellwidth / 10, y + 9 * cellwidth / 10) ],
outline = "white", fill = "white")
def drawBlackAt(x, y, cellwidth):
draw.ellipse([ (x + cellwidth / 10, y + cellwidth / 10)
, (x + 9 * cellwidth / 10, y + 9 * cellwidth / 10) ],
outline = "black", fill = "black")
def drawBoardAt(x, y, wholelen, board):
# We'll say the board starts/ends 10% in from each side
# Also, note that there are size - 2 boxes in each dimension to make the correct number
# of crossed spaces
numBoxes = board.size - 1
spacing = space_width
# We need a background to be able to see the white pieces
draw.rectangle([x - spacing * 1.5, y - spacing * 1.5, x + wholelen +
spacing * 1.5, y + wholelen + spacing * 1.5], fill = "burlywood",
outline = "#7B4A12")
for i in range(numBoxes):
for j in range(numBoxes):
drawBoxAt(x + i * spacing, y + j * spacing, spacing)
# Draw the labels
for i in range(board.size):
draw.text( ( x - 0.75 * spacing - font.getsize(str(i + 1))[0] / 2
, y - font.getsize(str(i + 1))[1] / 2 + i * spacing )
, str(i + 1)
, fill = 'black'
, font = font )
draw.text( ( x + wholelen + 0.75 * spacing - font.getsize(str(i+1))[0] / 2
, y - font.getsize(str(i + 1))[1] / 2 + i * spacing )
, str(i + 1)
, fill = 'black'
, font = font )
for i in range(board.size):
draw.text( ( x - font.getsize(chr(i + 97).upper())[0] / 2 + i * spacing
, y - spacing )
, chr(i + 97).upper()
, fill = 'black'
, font = font )
draw.text( ( x - font.getsize(chr(i + 97).upper())[0] / 2 + i * spacing
, y + wholelen + spacing - font.getsize('M')[1] )
, chr(i + 97).upper()
, fill = 'black'
, font = font )
# Now we step over the spaces and draw the pieces if need be
for i in range(board.size):
for j in range(board.size):
if board.get(i,j) == "White":
drawWhiteAt(x - spacing / 2 + j * spacing, y - spacing / 2 + i * spacing, spacing)
elif board.get(i,j) == "Black":
drawBlackAt(x - spacing / 2 + j * spacing, y - spacing / 2 + i * spacing, spacing)
drawBoardAt(space_width * 2, space_width * 2, wholesize, board)
output = StringIO.StringIO()
img.save(output, 'PNG')
bot.sendImage(chat_id = str(update.message.chat_id), photo = output)
# double_reset nonsense
bot.double_resets[str(update.message.chat_id)] = False
# Creates a new game, resizing the board possibly
# Shares the double_reset variable with reset_all
def new_game(bot, update):
# Check our state
double_reset = bot.double_resets[str(update.message.chat_id)]
# Load the board
board = get_board(update.message.chat_id)
# If double_reset is a number
if double_reset != True and double_reset != False:
# Create a new board and save it
# Otherwise just clear it
if board.size != double_reset:
board = Board(double_reset)
else:
board.clear()
save_board(board, update.message.chat_id)
bot.double_resets[str(update.message.chat_id)] = False
send_board_image(bot, update)
def confirm_resize(bot, update, args):
# See if the number input was valid (or no number was input)
# Only allow up to a 19 x 19 board (arbitrarily chosen)
if (len(args) == 0): # Don't change the size
new_size = get_board(update.message.chat_id).size
bot.double_resets[str(update.message.chat_id)] = new_size
elif (len(args) == 1):
try:
new_size = int(args[0])
except:
bot.sendMessage( chat_id=update.message.chat_id
, text="Please provide a valid number for the new board size.")
return
# Check to make sure the number is okay
if new_size not in [7, 9, 13, 17, 19]:
bot.sendMessage( chat_id=update.message.chat_id
, text="Please provide a valid number for the new board size.")
return
# Remember this number by putting it into the dictionary and ask for
# confirmation
bot.double_resets[str(update.message.chat_id)] = new_size
bot.sendMessage( chat_id=update.message.chat_id
, text="Send the confirm_new command to start a new game.")
# The function that exposes the appropriate loading method
def load (bot):
# Start response
bot.addHandler('start', start)
# Move commands
bot.addHandler('move', make_move)
bot.addHandler('bmove', bmove)
bot.addHandler('Bmove', bmove)
bot.addHandler('BMove', bmove)
bot.addHandler('wmove', wmove)
bot.addHandler('Wmove', wmove)
bot.addHandler('WMove', wmove)
bot.addHandler('bmvoe', bmove)
bot.addHandler('Bmvoe', bmove)
bot.addHandler('BMvoe', bmove)
bot.addHandler('wmvoe', wmove)
bot.addHandler('Wmvoe', wmove)
bot.addHandler('WMvoe', wmove)
# And shortcuts!
bot.addHandler('b', bmove)
bot.addHandler('w', wmove)
# Undo
bot.addHandler('undo', undo)
# Send the board image
bot.addHandler("game", send_board_image)
# Making new games
bot.addHandler("new_game", confirm_resize)
bot.addHandler("confirm_new", new_game)
|
'''
Desarollado Por:
Martin Galvan-201614423
Tomas Kavanagh-201615122
'''
from __future__ import division
from pyomo.environ import *
from pyomo.opt import SolverFactory
from matplotlib import pyplot as plt
import sys
import os
f1=[]
f2=[]
##############################################################################
##################### FUNCIONES ################################
##############################################################################
#FUNCION ELIMINAR COMPONENTE
def delete_component(Model, comp_name):
list_del = [vr for vr in vars(Model)
if comp_name == vr
or vr.startswith(comp_name + '_index')
or vr.startswith(comp_name + '_domain')]
list_del_str = ', '.join(list_del)
print('Deleting model components ({}).'.format(list_del_str))
for kk in list_del:
Model.del_component(kk)
def inv_constraint(Model,i,j):
return sum(Model.x[i,j,k] for k in K)<=inv[i,j]
def dem_constraint(Model,i,k):
return sum(Model.x[i,j,k] for j in J)==dem[i,k]
##############################################################################
tipoPaquete = 2
nodoOrigen = 3
nodoDestino = 4
modelo = ConcreteModel()
I=RangeSet(1,tipoPaquete)
J=RangeSet(1,nodoOrigen)
K=RangeSet(1,nodoDestino)
epsilon = 7000
costo = {(1,1):10,(1,2):9,(1,3):10,(1,4):11,
(2,1):9,(2,2):10,(2,3):11,(2,4):10,
(3,1):11,(3,2):9,(3,3):10,(3,4):10}
delay ={(1,1):12,(1,2):14,(1,3):10,(1,4):11,
(2,1):11,(2,2):8,(2,3):7,(2,4):13,
(3,1):6,(3,2):11,(3,3):4,(3,4):15}
inv = {(1,1):60,(1,2):80,(1,3):50,
(2,1):20,(2,2):20,(2,3):30}
dem = {(1,1):50,(1,2):90,(1,3):40,(1,4):10,
(2,1):10,(2,2):20,(2,3):10,(2,4):30}
step=20 #Si se cambia el step, la grafica del frente Optimo pude obtener mas o menos puntos, ademas de cambiar su forma un poco
try:
for lim in reversed(range(0,epsilon+1,step)):
print('Epsilon: ',lim)
modelo.x=Var(I,J,K,domain=PositiveIntegers)
modelo.f1 = Objective(expr = sum(costo[j,k]*modelo.x[i,j,k] for i in I for j in J for k in K), sense = minimize)
modelo.f2 = Constraint(expr = sum(delay[j,k]*modelo.x[i,j,k] for i in I for j in J for k in K)<=lim)
modelo.inv = Constraint(I,J,rule=inv_constraint)
modelo.dem = Constraint(I,K,rule=dem_constraint)
SolverFactory('glpk').solve(modelo)
modelo.display()
f1 = f1 +[value(modelo.f1)]
f2 = f2 +[value(modelo.f2)]
delete_component(modelo, 'x')
delete_component(modelo, 'f1')
delete_component(modelo, 'f2')
delete_component(modelo, 'inv')
delete_component(modelo, 'dem')
except Exception:
pass #Si entra aca es por que la solución no se puede determinar y ya se acabo la iteración
finally:
plt.plot(f1,f2,'o-.')
plt.title('Frente Óptimo de Pareto')
plt.xlabel('F1')
plt.ylabel('F2')
plt.grid(True);
plt.show()
|
from datetime import datetime
from app import db, login
from config import Config
from flask_login import UserMixin
from werkzeug.security import generate_password_hash, check_password_hash
followers = db.Table('followers',
db.Column('follower_id', db.Integer, db.ForeignKey('user.id')),
db.Column('followed_id', db.Integer, db.ForeignKey('user.id'))
)
likes_table = db.Table('likes_table',
db.Column('liker_id', db.Integer, db.ForeignKey('user.id')),
db.Column('post_id', db.Integer, db.ForeignKey('post.id'))
)
class User(UserMixin, db.Model):
id = db.Column(db.Integer, primary_key=True)
handle = db.Column(db.String(Config.MAX_HANDLE_LEN), index=True, unique=True)
email = db.Column(db.String(Config.MAX_EMAIL_LEN), index=True, unique=True)
password_hash = db.Column(db.String(128))
posts = db.relationship('Post', backref='author', lazy='dynamic')
followed = db.relationship(
'User', secondary=followers,
primaryjoin=(followers.c.follower_id == id),
secondaryjoin=(followers.c.followed_id == id),
backref=db.backref('followers', lazy='dynamic'), lazy='dynamic')
liked_posts = db.relationship(
"Post",
secondary=likes_table,
back_populates="likes")
def __repr__(self):
return f'@{self.handle}'
def set_password(self, password):
self.password_hash = generate_password_hash(password)
def check_password(self, password):
return check_password_hash(self.password_hash, password)
def follow(self, user):
if not self.is_following(user):
self.followed.append(user)
def unfollow(self, user):
if self.is_following(user):
self.followed.remove(user)
def is_following(self, user):
return self.followed.filter(
followers.c.followed_id == user.id).count() > 0
def like(self, post):
if not self.has_liked(post):
self.liked_posts.append(post)
def unlike(self, post):
if self.has_liked(post):
self.liked_posts.remove(post)
def has_liked(self, post):
return self.liked_posts.filter(
likes_table.c.post_id == post.id).count() > 0
def followed_posts(self):
followed = Post.query.join(
followers, (followers.c.followed_id == Post.user_id)).filter(
followers.c.follower_id == self.id)
own = Post.query.filter_by(user_id=self.id)
return followed.union(own).order_by(Post.timestamp.desc())
class Post(UserMixin, db.Model):
# Authorship and record keeping
id = db.Column(db.Integer, primary_key=True)
user_id = db.Column(db.Integer, db.ForeignKey('user.id'))
timestamp = db.Column(db.DateTime, index=True, default=datetime.utcnow)
# Post contents
item1 = db.Column(db.String(Config.MAX_ITEM_LEN))
item2 = db.Column(db.String(Config.MAX_ITEM_LEN))
item3 = db.Column(db.String(Config.MAX_ITEM_LEN))
# The people who liked the post
likes = db.relationship(
"User",
secondary=likes_table,
back_populates="liked_posts")
def __repr__(self):
return f'<Post {self.id} by @{User.query.filter(User.id == self.user_id).first()}>'
@login.user_loader
def load_user(id):
return User.query.get(int(id))
def create_user(handle: str, email: str, password: str):
# Make sure someone isn't already using this handle or email
handle_check = User.query.filter(User.handle == handle).first()
email_check = User.query.filter(User.email == email).first()
if handle_check:
raise ValueError("A user with this handle already exists!")
if email_check:
raise ValueError("A user with this email already exists!")
# Create the user
u = User(handle=handle, email=email)
u.set_password(password)
db.session.add(u)
db.session.commit()
return u
def validate_user(email: str, password: str):
user = User.query.filter_by(email=email).first()
if user is None or not user.check_password(password):
raise ValueError("Invalid email or password.")
return user
def create_post(user: User, items: list):
if len(items) != 3:
raise ValueError("Items must have exactly three elements")
p = Post(item1=items[0], item2=items[1], item3=items[2], author=user)
db.session.add(p)
db.session.commit()
return p
|
import boto3
region = 'us-west-2'
info = []
client = boto3.client('ec2', region_name=region)
response = client.describe_network_interfaces()
for security_group in response['NetworkInterfaces']:
for sg in security_group['Groups']:
info.append(sg['GroupId'])
for network_interface in response['NetworkInterfaces']:
info.append(network_interface['NetworkInterfaceId'])
info.sort()
print(info)
# def get_
# def main():
# region = 'us-west-2'
# network_int = set()
# get_network_interfaces(region, network_int)
# network_interfaces = get_network_interfaces(region, network_int)
# for sg in network_interfaces:
# print(sg)
# if __name__== "__main__":
# main()
|
#!/usr/bin/python
import os
import re
import sys
import time
import mmap
import random
import string
import argparse
import threading
from time import sleep
from multiprocessing import Pool, Lock
# echo 3 > /proc/sys/vm/drop_caches
lock = Lock()
##############################################################################################################################################
def msg(message):
msg = '%s%s' % (message, os.linesep)
with lock:
sys.stdout.write(msg)
##############################################################################################################################################
class File:
def __init__(self, filename, filehandle):
self.filename = filename
self.filehandle = filehandle
##############################################################################################################################################
def formatRate( rate ):
srate = "%.2f B/s" % (rate)
if rate>=1024:
srate = "%.2f KB/s" % (float(rate) / float(1024))
if rate>=1024*1024:
srate = "%.2f MB/s" % (float(rate) / float(1024*1024))
if rate>=1024*1024*1024:
srate = "%.2f GB/s" % (float(rate) / float(1024*1024*1024))
if rate>=1024*1024*1024*1024:
srate = "%.2f TB/s" % (float(rate) / float(1024*1024*1024*1024))
return srate
##############################################################################################################################################
def formatSize( size ):
ssize = "%d Bytes" % (size)
if size>=1024:
ssize = "%d KBytes" % (float(size) / float(1024))
if size>=1024*1024:
ssize = "%d MBytes" % (float(size) / float(1024*1024))
if size>=1024*1024*1024:
ssize = "%d GBytes" % (float(size) / float(1024*1024*1024))
if size>=1024*1024*1024*1024:
ssize = "%d TBytes" % (float(size) / float(1024*1024*1024*1024))
return ssize
##############################################################################################################################################
def formatNum( size ):
ssize = "%d Bytes" % (size)
if size>=1024:
ssize = "%d K" % (float(size) / float(1024))
if size>=1024*1024:
ssize = "%d M" % (float(size) / float(1024*1024))
if size>=1024*1024*1024:
ssize = "%d G" % (float(size) / float(1024*1024*1024))
if size>=1024*1024*1024*1024:
ssize = "%d T" % (float(size) / float(1024*1024*1024*1024))
return ssize
###############################################################################################################################################
class Writer:
def __init__(self, offset_array, buffer, file, writerid, pedantic, sync ):
self.writerid = writerid
self.file = file
self.offsets = offset_array
self.buffer = buffer
self.delay = float(0)
self.pedantic = pedantic
self.sync = sync
def doWrite( self, block_delay=-1.0 ):
#print "Writer #%d - Start writing into file %s" % (self.writerid, self.file.filename)
if self.pedantic:
msg("Writer #%d - Start writing into file %s" % (self.writerid, self.file.filename))
before = 1000000*time.time()
for offset in self.offsets:
#print "Writer #%d - Seeking to %d" % (self.writerid, offset)
if self.pedantic:
msg( "Writer #%d - Seeking to %d and writing %d bytes" % (self.writerid, offset, len(self.buffer)) )
os.lseek(self.file.filehandle, offset, os.SEEK_SET)
os.write(self.file.filehandle, self.buffer)
if block_delay >- 1:
time.sleep( block_delay )
if self.sync:
os.fsync( self.file.filehandle )
after = 1000000*time.time()
#print "Writer #%d - Stopped writing into %s" % (self.writerid, self.file.filename)
if self.pedantic:
msg("Writer #%d - Stopped writing into %s" % (self.writerid, self.file.filename))
self.delay = float(after)-float(before)
###############################################################################################################################################
class Reader:
def __init__(self, offset_array, bsize, destbuf, file, readir, pedantic):
self.readerit = readerid
self.file = file
self.offsets = offset_array
self.buffer = destbuf
#self.filename = filename
#self.fh = filehandle
self.delay = float(0)
self.bsize = bsize
self.pedantic = pedantic
def doRead(self):
#print "Reader #%d - Start reading from file %s" % (self.readerid, self.file.filename)
if self.pedantic:
msg( "Reader #%d - Start reading from file %s" % (self.readerid, self.file.filename) )
before = 1000000*time.time()
for offset in self.offsets:
os.lseek(self.file.filehandle, offset, os.SEEK_SET)
os.read(self.file.filehandle, self.buffer)
after = 1000000*time.time()
#print "Reader #%d - Stopped reading from %s" % (self.readerit, self.file.filename)
if self.pedantic:
msg( "Reader #%d - Stopped reading from %s" % (self.readerit, self.file.filename) )
self.delay = float(after)-float(before)
###############################################################################################################################################
if __name__ == "__main__":
bsize = 256 * 1024
fsize = 1024 * 1024 * 1024
parser = argparse.ArgumentParser(description="I/O benchmark")
parser.add_argument('filename', metavar='TESTFILE', type=str, nargs=1, help='Specify the mandatory filename for test')
parser.add_argument('-B', '--blocksize', type=str, required=False, default='256k', help='Specify block size (format <number> or <number><unit>, <unit> can be "k","K","g","G","t","T", e.g. 1024M, 1048576, 1T, 10G, ...)')
parser.add_argument('-b', '--blockdelay', type=str, required=False, default='-1', help='Specify the delay (in microseconds, or as a string 1s, 100ms, 50us) between two subsequent block writes; helps to reduce the writing speed')
parser.add_argument('-f', '--filesize', type=str, required=False, default='1G', help='Specify file size (format is the same as for blocksize)')
parser.add_argument('-s', '--osync', action="store_true", required=False, default=False, help='Open file with O_SYNC flag')
parser.add_argument('-n', '--sync', action="store_true", required=False, default=False, help='Call the fsync at the end of entire write process')
parser.add_argument('-d', '--odirect', action="store_true", required=False, default=False, help='Open file with O_DIRECT flag')
parser.add_argument('-i', '--iterations', type=int, required=False, default=1, help='Number of times the test must be repeated')
parser.add_argument('-t', '--numthreads', type=int, required=False, default=1, help='Number of threads that must write different files, or write in the same file (-S)')
parser.add_argument('-S', '--samefile', action="store_true", required=False, default=False, help='When a number > 1 of thread is specified, all threads will write on the same file')
parser.add_argument('-r', '--remove', action="store_true", required=False, default=False, help='Remove each testfile after each iteration has completed')
parser.add_argument('-R', '--random', action="store_true", required=False, default=False, help='Randomly write each block (default is sequential)')
parser.add_argument('-T', '--csv', action="store_true", required=False, default=False, help='Final output is in CSV colon-separated format')
parser.add_argument('-P', '--pedantic', action="store_true", required=False, default=False, help='Print much information about that threads do')
parser.add_argument('-a', '--preallocate',action="store_true", required=False, default=False, help='Inkove the posix_fallocate syscall')
options = parser.parse_args()
if len(options.filename) == 0 or options.filename[0] == "":
print "filename is mandatory and it must not be an empty string"
sys.exit(1)
m = re.match(r'([0-9]+)([kKmMgGtT]{1})', options.blocksize)
if not m:
print "blocksize must have the format [::digit::]X, where the optional 'X' can be one of the following: k,K,m,M,g,G,t,T"
sys.exit(1)
bs_base = m.group(1)
bsize = int(bs_base)
bs_mult = m.group(2)
if bs_mult:
if bs_mult == 'k' or bs_mult == 'K':
bsize = int(bs_base) * 1024
if bs_mult == 'g' or bs_mult == 'G':
bsize = int(bs_base) * 1024 * 1024 * 1024
if bs_mult == 'm' or bs_mult == 'M':
bsize = int(bs_base) * 1024 * 1024
if bs_mult == 't' or bs_mult == 'T':
bsize = int(bs_base) * 1024 * 1024 * 1024 * 1024
m = re.match(r'([0-9]+)([kKmMgGtT]{1})', options.filesize)
if not m:
print "filesize must have the format [::digit::]X, where the optional 'X' can be one of the following: k,K,m,M,g,G,t,T"
sys.exit(1)
fs_base = m.group(1)
fsize = int(fs_base)
fs_mult = m.group(2)
if fs_mult:
if fs_mult == 'k' or fs_mult == 'K':
fsize = int(fs_base) * 1024
if fs_mult == 'g' or fs_mult == 'G':
fsize = int(fs_base) * 1024 * 1024 * 1024
if fs_mult == 'm' or fs_mult == 'M':
fsize = int(fs_base) * 1024 * 1024
if fs_mult == 't' or fs_mult == 'T':
fsize = int(fs_base) * 1024 * 1024 * 1024 * 1024
if fsize < bsize:
print "Filesize cannot be smaller than blocksize."
sys.exit(1)
if (float(fsize)/float(bsize)).is_integer() == False:
print "The ratio filesize/blocksize must be integer."
sys.exit(1)
flags = os.O_RDWR|os.O_CREAT
if options.osync:
flags |= os.O_SYNC
if options.odirect:
try:
if os.O_DIRECT:
flags |= os.O_DIRECT
except:
flags = flags
offsets = []
for i in range(fsize/bsize):
offsets.append( i * bsize )
if options.random:
random.shuffle(offsets)
offsets_slices = []
for i in range(options.numthreads):
offsets_slices.append( offsets )
if options.samefile and options.numthreads and options.numthreads>1:
offsets_slices = []
slicesize = (float(len(offsets)))/float(options.numthreads)
if slicesize - int(slicesize) > 0:
print 'Filesize / blocksize is %d which is not multiple integer of the number of threads %d. Stop!' % (len(offsets), options.numthreads)
sys.exit(1)
for i in range(options.numthreads):
offsets_slices.append(offsets[int(i*slicesize):int(i*slicesize+slicesize)])
iterations = 1
if options.iterations:
iterations = options.iterations
bdelay = -1.0
if options.blockdelay:
m = re.match(r'^(-?[0-9]+)([smu])?$', options.blockdelay)
if not m:
print "Format for block delay us <num><unit>, where unit can be 's' (for seconds) or 'm' (for milliseconds) or 'u' (for microseconds)"
sys.exit(1)
num = int(m.group(1))
unit = ""
if m.group(2):
unit = m.group(2)
if unit == "" or unit == "s":
bdelay = float(num)
if unit == "u":
bdelay = float(num) / 1000000.0
if unit == "m":
bdelay = float(num) / 1000.0
print "Filling buffer with random characters..."
m = mmap.mmap( -1, bsize ) # this is required to allign buffer in memory to 4KB (then to 512 bytes too! which is required by O_DIRECT)
buffer = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(bsize))
m.write( buffer )
delay = 0
print "Start writing: filesize=%d (%s) blocksize=%d (%s) num-blocks-to-write=%d (%s) iterations=%d" % (fsize, formatSize(fsize),bsize, formatSize(bsize), fsize/bsize, formatNum(fsize/bsize), iterations)
for i in range(iterations):
print "--> Iteration %d" % (i+1)
final_filename = "%s-%d" % (options.filename[0], i)
numfiles = options.numthreads
if options.samefile:
numfiles = 1
files = []
writers = []
threads = []
for i in range(numfiles):
final_filename_t = final_filename + "-th-%d" % i
print " Opening file %s" % (final_filename_t)
fh = os.open(final_filename_t, flags)
if options.preallocate:
os.posix_fallocate( fh, 0, fsize )
file = File( final_filename_t, fh )
files.append(file)
filecounter = 0
for t in range(options.numthreads):
writer = Writer( offsets_slices[t], m, files[filecounter], t, options.pedantic )
writers.append(writer)
t = threading.Thread(name='writer', target=writer.doWrite, args=(bdelay,))
threads.append(t)
t.start()
if not options.samefile:
filecounter = filecounter + 1
for t in threads:
t.join()
for file in files:
print " Closing file %s" % (file.filename)
os.close(file.filehandle)
for writer in writers:
delay += writer.delay
if options.remove:
for file in files:
os.remove(file.filename)
multiplier = numfiles;
if numfiles == 1 and options.numthreads>1:
multiplier = options.numthreads
rate = 1000000*(float(multiplier*fsize*len(files))/delay) * iterations
if not options.csv:
print "\n--- SUMMARY ---"
print " Elapsed time: %.2f ms - Total bytes written: %s - Rate: %s" % (delay/1000, formatSize(fsize * iterations * len(files)), formatRate(rate))
else:
print "SUMMARY:%.2f:%d:%.2f" % (delay/1000, fsize * iterations, rate)
|
# -^- coding:utf-8 -^-
import inspect
import re
class ValidateException(Exception): pass
def validParam(*varargs, **keywords):
'''验证参数的装饰器。'''
varargs = map(_toStardardCondition, varargs)
keywords = dict((k, _toStardardCondition(keywords[k]))
for k in keywords)
def generator(func):
args, varargname, kwname = inspect.getargspec(func)[:3]
dctValidator = _getcallargs(args, varargname, kwname,
varargs, keywords)
# print 'dctValidator', dctValidator
def wrapper(*callvarargs, **callkeywords):
dctCallArgs = _getcallargs(args, varargname, kwname,
callvarargs, callkeywords)
# print 'dctCallArgs', dctCallArgs
k, item = None, None
try:
for k in dctValidator:
if k == varargname:
for item in dctCallArgs[k]:
assert dctValidator[k](item)
elif k == kwname:
for item in dctCallArgs[k].values():
assert dctValidator[k](item)
else:
item = dctCallArgs[k]
assert dctValidator[k](item)
except:
raise ValidateException,\
('%s() parameter validation fails, param: %s, value: %s(%s)'
% (func.func_name, k, item, item.__class__.__name__))
return func(*callvarargs, **callkeywords)
wrapper = _wrapps(wrapper, func)
return wrapper
return generator
def _toStardardCondition(condition):
'''将各种格式的检查条件转换为检查函数'''
if inspect.isclass(condition):
return lambda x: isinstance(x, condition)
if isinstance(condition, (tuple, list)):
cls, condition = condition[:2]
if condition is None:
return _toStardardCondition(cls)
if cls in (str, unicode) and condition[0] == condition[-1] == '/':
return lambda x: (isinstance(x, cls)
and re.match(condition[1:-1], x) is not None)
return lambda x: isinstance(x, cls) and eval(condition)
return condition
def nullOk(cls, condition=None):
'''这个函数指定的检查条件可以接受None值'''
return lambda x: x is None or _toStardardCondition((cls, condition))(x)
def multiType(*conditions):
'''这个函数指定的检查条件只需要有一个通过'''
lstValidator = map(_toStardardCondition, conditions)
def validate(x):
for v in lstValidator:
if v(x):
return True
return validate
def _getcallargs(args, varargname, kwname, varargs, keywords):
'''获取调用时的各参数名-值的字典'''
# print args, varargname, kwname, varargs, keywords
dctArgs = {}
varargs = tuple(varargs)
keywords = dict(keywords)
argcount = len(args)
varcount = len(varargs)
callvarargs = None
if argcount <= varcount:
for n, argname in enumerate(args):
dctArgs[argname] = varargs[n]
callvarargs = varargs[-(varcount-argcount):]
else:
for n, var in enumerate(varargs):
dctArgs[args[n]] = var
for argname in args[-(argcount-varcount):]:
if argname in keywords:
dctArgs[argname] = keywords.pop(argname)
callvarargs = ()
if varargname is not None:
dctArgs[varargname] = callvarargs
if kwname is not None:
dctArgs[kwname] = keywords
dctArgs.update(keywords)
# print dctArgs
return dctArgs
def _wrapps(wrapper, wrapped):
'''复制元数据'''
for attr in ('__module__', '__name__', '__doc__'):
setattr(wrapper, attr, getattr(wrapped, attr))
for attr in ('__dict__',):
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
return wrapper
|
#!/usr/bin/env python
from setuptools import setup
setup(name='floatdict',
version='0.1',
packages=['floatdict'])
|
from PIL import Image
import os
imgwidth={}
for img in os.listdir('./myfont/'):
if img.endswith('.png'):
char,ext=os.path.splitext(img)
# print(char)
pic=Image.open("./myfont/"+img)
imgwidth[char]=pic.width;
|
# -*- coding: utf-8 -*-
import numpy as np
def getactions(u1,u2):
actions= np.array([ [u2,u2],[u1,u1],[u2,u1], [u1,0], [u2,0],[0,u2],[0,u1],[u1,u2]])
calc=((2*np.pi)/60)
actions=actions*calc
return actions
|
# Licensed to Elasticsearch B.V under one or more agreements.
# Elasticsearch B.V licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information
from .utils import NamespacedClient, SKIP_IN_PATH, query_params, _make_path
class EqlClient(NamespacedClient):
@query_params()
def search(self, index, body, params=None, headers=None):
"""
Returns results matching a query expressed in Event Query Language (EQL)
`<https://www.elastic.co/guide/en/elasticsearch/reference/current/eql-search-api.html>`_
:arg index: The name of the index to scope the operation
:arg body: Eql request body. Use the `query` to limit the query
scope.
"""
for param in (index, body):
if param in SKIP_IN_PATH:
raise ValueError("Empty value passed for a required argument.")
return self.transport.perform_request(
"POST",
_make_path(index, "_eql", "search"),
params=params,
headers=headers,
body=body,
)
|
import os
import sublime
def truncate(s, l, ellipsis="…"):
"""Truncates string to length `l` if need be and adds `ellipsis`."""
try:
_l = len(s)
except:
return s
if _l > l:
try:
return s[:l] + ellipsis # in case s is a byte string
except:
return s[:l]
return s
def clean_url(url):
"""Remove trailing slash and query string from `url`."""
url = url.split("?")[0]
if url and url[-1] == "/":
return url[:-1]
return url
def absolute_path(filename, view):
"""Get absolute path to `filename` relative to view."""
if os.path.isabs(filename):
return filename
file_path = view.file_name()
if file_path:
return os.path.join(os.path.dirname(file_path), filename)
return None
def get_transfer_indicator(filename, transferred, total, spaces=50):
"""Returns progress indicator for byte stream transfer."""
if not total:
return "{}, ?".format(filename)
transferred = min(transferred, total)
spaces_filled = int(spaces * transferred / total)
return "{}, [{}] {}kB".format(
filename,
"·" * spaces_filled + " " * (spaces - spaces_filled - 1),
transferred // 1024,
)
def prepend_scheme(url):
"""Prepend scheme to URL if necessary."""
if isinstance(url, str) and len(url.split("://")) == 1:
scheme = sublime.load_settings("Requester.sublime-settings").get("scheme", "http")
return scheme + "://" + url
return url
def is_instance(obj, s):
"""Is object an instance of class named `s`?"""
return s in str(type(obj))
def is_auxiliary_view(view):
"""Was view opened by a Requester command? This is useful, e.g., to
avoid resetting `env_file` and `env_string` on these views.
"""
if view.settings().get("requester.response_view", False):
return True
if view.settings().get("requester.test_view", False):
return True
return False
|
import tensorflow as tf
from matplotlib import pyplot as plt
fashion = tf.keras.datasets.fashion_mnist
(x_train, y_train),(x_test, y_test) = fashion.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(256, activation='relu'),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(64, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
history=model.fit(x_train, y_train, batch_size=128, epochs=10, validation_data=(x_test, y_test), validation_freq=1)
model.summary()
# 显示训练集和验证集的acc和loss曲线
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()
plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.