content
stringlengths 0
1.55M
|
---|
"""
Language enumeration. Part of the StoryTechnologies project.
June 12, 2016
<NAME> (<EMAIL>)
"""<import_from_stmt>enum Enum<class_stmt>Language(Enum)# https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes
# https://en.wikipedia.org/wiki/ISO_639-2
<block_start>ENG=1# English
SPA=2# Spanish
DEU=3# German
ITA=4# Italian
FRA=5# French
NLD=6# Dutch
<def_stmt>__str__ self<block_start><return>self.name<block_end><block_end> |
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>L1Trigger.Configuration.L1TReco_cff *<line_sep> |
# Imports for urn construction utility methods
<import_stmt>logging<import_from_stmt>datahub.emitter.mce_builder make_dataset_urn make_tag_urn<import_from_stmt>datahub.emitter.mcp MetadataChangeProposalWrapper<import_from_stmt>datahub.emitter.rest_emitter DatahubRestEmitter<line_sep># Imports for metadata model classes
<import_from_stmt>datahub.metadata.schema_classes ChangeTypeClass GlobalTagsClass TagAssociationClass <line_sep>log=logging.getLogger(__name__)<line_sep>logging.basicConfig(level=logging.INFO)<line_sep>dataset_urn=make_dataset_urn(platform="hive" name="realestate_db.sales" env="PROD")<line_sep>tag_urn=make_tag_urn("purchase")<line_sep>event:MetadataChangeProposalWrapper=MetadataChangeProposalWrapper(entityType="dataset" changeType=ChangeTypeClass.UPSERT entityUrn=dataset_urn aspectName="globalTags" aspect=GlobalTagsClass(tags=[TagAssociationClass(tag=tag_urn)]) )<line_sep># Create rest emitter
rest_emitter=DatahubRestEmitter(gms_server="http://localhost:8080")<line_sep>rest_emitter.emit(event)<line_sep>log.info(f"Set tags to {tag_urn} for dataset {dataset_urn}")<line_sep> |
<import_from_future_stmt> print_function<import_from_stmt>sympy symbols Matrix<import_from_stmt>galgebra.printer xpdf Format<def_stmt>main <block_start>Format()<line_sep>a=Matrix(2 2 (1 2 3 4))<line_sep>b=Matrix(2 1 (5 6))<line_sep>c=a<times>b<line_sep>print(a b '=' c)<line_sep>x,y=symbols('x, y')<line_sep>d=Matrix(1 2 (x<power>3 y<power>3))<line_sep>e=Matrix(2 2 (x<power>2 2<times>x<times>y 2<times>x<times>y y<power>2))<line_sep>f=d<times>e<line_sep>print('%' d e '=' f)<line_sep># xpdf()
xpdf(pdfprog=<none>)<line_sep><return><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
<import_stmt>inviwopy<import_from_stmt>inviwopy.glm *<line_sep>v1=vec3(1 2 3)<line_sep>v2=size2_t(4 5)<line_sep>m1=mat4(1)<line_sep>m2=mat3(0 1 0 -1 0 0 0 0 2)<line_sep>v3=m2<times>v1<line_sep>v4=vec4(1 2 3 4)<line_sep>w=v4.w<line_sep>a=v4.a<line_sep>q=v4.q<line_sep>z=v4.z<line_sep>b=v4.b<line_sep>p=v4.p<line_sep>y=v4.y<line_sep>g=v4.g<line_sep>t=v4.t<line_sep>x=v4.x<line_sep>r=v4.r<line_sep>s=v4.s<line_sep> |
<import_stmt>datetime<import_stmt>os<import_stmt>random<import_stmt>sys<line_sep>sys.path=[os.path.abspath(os.path.dirname(__file__))]+sys.path<import_from_stmt>auto_ml Predictor<import_from_stmt>auto_ml.utils_models load_ml_model<import_stmt>dill<import_from_stmt>nose.tools assert_equal assert_not_equal with_setup<import_stmt>numpy<as>np<import_from_stmt>sklearn.model_selection train_test_split<import_stmt>utils_testing<as>utils<def_stmt>optimize_final_model_regression model_name=<none><block_start>np.random.seed(0)<line_sep>df_boston_train,df_boston_test=utils.get_boston_regression_dataset()<line_sep># We just want to make sure these run, not necessarily make sure that they're super accurate (which takes more time, and is dataset dependent)
df_boston_train=df_boston_train.sample(frac=0.5)<line_sep>column_descriptions={'MEDV':'output' 'CHAS':'categorical'}<line_sep>ml_predictor=Predictor(type_of_estimator='regressor' column_descriptions=column_descriptions)<line_sep>ml_predictor.train(df_boston_train optimize_final_model=<true> model_names=model_name)<line_sep>test_score=ml_predictor.score(df_boston_test df_boston_test.MEDV)<line_sep>print('test_score')<line_sep>print(test_score)<line_sep># the random seed gets a score of -3.21 on python 3.5
# There's a ton of noise here, due to small sample sizes
lower_bound=-3.4<if_stmt>model_name<eq>'DeepLearningRegressor'<block_start>lower_bound=-24<block_end><if_stmt>model_name<eq>'LGBMRegressor'<block_start>lower_bound=-16<block_end><if_stmt>model_name<eq>'GradientBoostingRegressor'<block_start>lower_bound=-5.1<block_end><if_stmt>model_name<eq>'CatBoostRegressor'<block_start>lower_bound=-4.5<block_end><if_stmt>model_name<eq>'XGBRegressor'<block_start>lower_bound=-4.8<block_end><assert_stmt>lower_bound<l>test_score<l>-2.75<block_end><def_stmt>getting_single_predictions_regression model_name=<none><block_start>np.random.seed(0)<line_sep>df_boston_train,df_boston_test=utils.get_boston_regression_dataset()<line_sep>column_descriptions={'MEDV':'output' 'CHAS':'categorical'}<line_sep>ml_predictor=Predictor(type_of_estimator='regressor' column_descriptions=column_descriptions)<line_sep>ml_predictor.train(df_boston_train model_names=model_name)<line_sep>file_name=ml_predictor.save(str(random.random()))<line_sep>saved_ml_pipeline=load_ml_model(file_name)<line_sep>os.remove(file_name)<try_stmt><block_start>keras_file_name=file_name[:-5]+'_keras_deep_learning_model.h5'<line_sep>os.remove(keras_file_name)<block_end><except_stmt><block_start><pass><block_end>df_boston_test_dictionaries=df_boston_test.to_dict('records')<line_sep># 1. make sure the accuracy is the same
predictions=[]<for_stmt>row df_boston_test_dictionaries<block_start>predictions.append(saved_ml_pipeline.predict(row))<block_end>print('predictions')<line_sep>print(predictions)<line_sep>print('predictions[0]')<line_sep>print(predictions[0])<line_sep>print('type(predictions)')<line_sep>print(type(predictions))<line_sep>first_score=utils.calculate_rmse(df_boston_test.MEDV predictions)<line_sep>print('first_score')<line_sep>print(first_score)<line_sep># Make sure our score is good, but not unreasonably good
lower_bound=-2.9<if_stmt>model_name<eq>'DeepLearningRegressor'<block_start>lower_bound=-7.8<block_end><if_stmt>model_name<eq>'LGBMRegressor'<block_start>lower_bound=-4.95<block_end><if_stmt>model_name<eq>'XGBRegressor'<block_start>lower_bound=-3.4<block_end><if_stmt>model_name<eq>'CatBoostRegressor'<block_start>lower_bound=-3.7<block_end><assert_stmt>lower_bound<l>first_score<l>-2.7<line_sep># 2. make sure the speed is reasonable (do it a few extra times)
data_length=len(df_boston_test_dictionaries)<line_sep>start_time=datetime.datetime.now()<for_stmt>idx range(1000)<block_start>row_num=idx%data_length<line_sep>saved_ml_pipeline.predict(df_boston_test_dictionaries[row_num])<block_end>end_time=datetime.datetime.now()<line_sep>duration=end_time-start_time<line_sep>print('duration.total_seconds()')<line_sep>print(duration.total_seconds())<line_sep># It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
<assert_stmt>0.1<l>duration.total_seconds()/1.0<l>60<line_sep># 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions=[]<for_stmt>row df_boston_test_dictionaries<block_start>predictions.append(saved_ml_pipeline.predict(row))<block_end>second_score=utils.calculate_rmse(df_boston_test.MEDV predictions)<line_sep>print('second_score')<line_sep>print(second_score)<line_sep># Make sure our score is good, but not unreasonably good
<assert_stmt>lower_bound<l>second_score<l>-2.7<block_end> |
#*********************************************************************
# content = init Nuke
# version = 0.1.0
# date = 2019-12-01
#
# license = MIT <https://github.com/alexanderrichtertd>
# author = <NAME> <<EMAIL>>
#*********************************************************************
<import_stmt>os<import_stmt>errno<import_stmt>nuke<import_stmt>pipefunc<import_from_stmt>tank Tank<line_sep>#*********************************************************************
# VARIABLE
TITLE=os.path.splitext(os.path.basename(__file__))[0]<line_sep>LOG=Tank().log.init(script=TITLE)<line_sep>PROJECT_DATA=Tank().data_project<line_sep>RESOLUTION=(' ').join([str(PROJECT_DATA['resolution'][0]) str(PROJECT_DATA['resolution'][1]) PROJECT_DATA['name'].replace(' ' '')])<line_sep>#*********************************************************************
# FOLDER CREATION
<def_stmt>create_write_dir <block_start>file_name=nuke.filename(nuke.thisNode())<line_sep>file_path=os.path.dirname(file_name)<line_sep>os_path=nuke.callbacks.filenameFilter(file_path)<line_sep># cope with the directory existing already by ignoring that exception
<try_stmt><block_start>os.makedirs(os_path)<block_end><except_stmt>OSError e<block_start><if_stmt>e.errno<ne>errno.EEXIST<block_start><raise><block_end><block_end><block_end><def_stmt>add_plugin_paths # ADD all IMG paths
<block_start><for_stmt>img os.getenv('IMG_PATH').split(';')<block_start><for_stmt>img_sub pipefunc.get_deep_folder_list(path=img add_path=<true>)<block_start>nuke.pluginAddPath(img_sub)<block_end><block_end># ADD sub software paths
<for_stmt>paths os.getenv('SOFTWARE_SUB_PATH').split(';')<block_start>nuke.pluginAddPath(paths)<block_end><block_end>#*********************************************************************
# PIPELINE
Tank().init_software()<line_sep>add_plugin_paths()<try_stmt><block_start><import_from_stmt>scripts write_node<block_end><except_stmt><block_start>LOG.warning('FAILED loading write_node')<block_end># LOAD paths
<try_stmt><block_start><for_stmt>paths os.getenv('SOFTWARE_SUB_PATH').split(';')<block_start>nuke.pluginAddPath(paths)<block_end><block_end><except_stmt><block_start>LOG.warning('FAILED loading SOFTWARE_SUB_PATH')<block_end>print('SETTINGS')<line_sep># RESOLUTION *********************************************************************
<try_stmt><block_start>nuke.addFormat(RESOLUTION)<line_sep>nuke.knobDefault('Root.format' PROJECT_DATA['name'].replace(' ' ''))<line_sep>print(' {} ON - {}'.format(chr(254) RESOLUTION))<block_end><except_stmt><block_start>LOG.error(' OFF - {}'.format(RESOLUTION) exc_info=<true>)<line_sep>print(' {} OFF - {}'.format(chr(254) RESOLUTION))<block_end># FPS *********************************************************************
<try_stmt><block_start>nuke.knobDefault("Root.fps" str(PROJECT_DATA['fps']))<line_sep>print(' {} ON - {} fps'.format(chr(254) PROJECT_DATA['fps']))<block_end><except_stmt><block_start>LOG.error(' OFF - {} fps'.format(PROJECT_DATA['fps']) exc_info=<true>)<line_sep>print(' {} OFF - {} fps'.format(chr(254) PROJECT_DATA['fps']))<block_end># createFolder *********************************************************************
<try_stmt><block_start>nuke.addBeforeRender(create_write_dir)<line_sep>print(' {} ON - create_write_dir (before render)'.format(chr(254)))<block_end><except_stmt><block_start>LOG.error(' OFF - create_write_dir (before render)'.format(chr(254)) exc_info=<true>)<line_sep>print(' {} OFF - create_write_dir (before render)'.format(chr(254)))<block_end>print('')<line_sep> |
<import_from_stmt>torch Tensor nn<import_from_stmt>...base VisionModule<class_stmt>ClassificationModule(VisionModule)<block_start>"""Base Classification Module class"""<def_stmt>__init__ self encoder:nn.Module head:nn.Module in_channels:int=3 n_classes:int=1000 **kwargs<block_start>super().__init__()<line_sep>self.encoder=encoder(in_channels=in_channels **kwargs)<line_sep>self.head=head(self.encoder.widths[-1] n_classes)<line_sep>self.initialize()<block_end><def_stmt>initialize self<block_start><pass><block_end><def_stmt>forward self x:Tensor<arrow>Tensor<block_start>x=self.encoder(x)<line_sep>x=self.head(x)<line_sep><return>x<block_end><block_end> |
<import_from_stmt>elasticsearch TransportError<import_from_stmt>sanic Blueprint<import_from_stmt>sanic.request Request<import_from_stmt>sanic.response HTTPResponse json<import_from_stmt>..connections get_client<line_sep>rest_bp=Blueprint('rest')<def_stmt>format_es_exception e:TransportError<block_start><return>json({"status_code":e.status_code "error":e.error "info":e.info})<block_end>@rest_bp.route('/query' methods=['POST'])<async_keyword><def_stmt>close_index request:Request<arrow>HTTPResponse<block_start>client=get_client(request)<line_sep>body=request.json['body']<line_sep>method=request.json['method']<line_sep>path=request.json['path']<try_stmt><block_start>resp=<await>client.transport.perform_request(method path body=body)<block_end><except_stmt>TransportError<as>e<block_start><return>format_es_exception(e)<block_end><return>json(resp)<block_end> |
<import_from_stmt>datetime datetime<import_from_stmt>pytz timezone utc<import_from_stmt>pytest mark<import_from_stmt>pyexchange.utils convert_datetime_to_utc<def_stmt>test_converting_none_returns_none <block_start><assert_stmt>convert_datetime_to_utc(<none>)<is><none><block_end><def_stmt>test_converting_non_tz_aware_date_returns_tz_aware <block_start>utc_time=datetime(year=2014 month=1 day=1 hour=1 minute=1 second=1)<assert_stmt>utc_time.tzinfo<is><none><assert_stmt>convert_datetime_to_utc(utc_time)<eq>datetime(year=2014 month=1 day=1 hour=1 minute=1 second=1 tzinfo=utc)<block_end><def_stmt>test_converting_tz_aware_date_returns_tz_aware_date # US/Pacific timezone is UTC-07:00 (In April we are in DST)
# We use localize() because according to the pytz documentation, using the tzinfo
# argument of the standard datetime constructors does not work for timezones with DST.
<block_start>pacific_time=timezone("US/Pacific").localize(datetime(year=2014 month=4 day=1 hour=1 minute=0 second=0))<line_sep>utc_time=utc.localize(datetime(year=2014 month=4 day=1 hour=8 minute=0 second=0))<assert_stmt>convert_datetime_to_utc(pacific_time)<eq>utc_time<block_end> |
<class_stmt>register<block_start>plugin_dict={}<line_sep>plugin_name=[]<line_sep>@classmethod<def_stmt>register cls plugin_name<block_start><def_stmt>wrapper plugin<block_start>cls.plugin_dict[plugin_name]=plugin<line_sep><return>plugin<block_end><return>wrapper<block_end><block_end> |
"""
Sliding Window Matching
=======================
Find recurring patterns in neural signals using Sliding Window Matching.
This tutorial primarily covers the :func:`~.sliding_window_matching` function.
"""<line_sep>###################################################################################################
# Overview
# --------
#
# Non-periodic or non-sinusoidal properties can be difficult to assess in frequency domain
# methods. To try and address this, the sliding window matching (SWM) algorithm has been
# proposed for detecting and measuring recurring, but unknown, patterns in time series data.
# Patterns of interest may be transient events, and/or the waveform shape of neural oscillations.
#
# In this example, we will explore applying the SWM algorithm to some LFP data.
#
# The SWM approach tries to find recurring patterns (or motifs) in the data, using sliding
# windows. An iterative process samples window randomly, and compares each to the average
# window. The goal is to find a selection of windows that look maximally like the average
# window, at which point the occurrences of the window have been detected, and the average
# window pattern can be examined.
#
# The sliding window matching algorithm is described in
# `Gips et al, 2017 <https://doi.org/10.1016/j.jneumeth.2016.11.001>`_
#
###################################################################################################
# sphinx_gallery_thumbnail_number = 2
<import_stmt>numpy<as>np<line_sep># Import the sliding window matching function
<import_from_stmt>neurodsp.rhythm sliding_window_matching<line_sep># Import utilities for loading and plotting data
<import_from_stmt>neurodsp.utils.download load_ndsp_data<import_from_stmt>neurodsp.plts.rhythm plot_swm_pattern<import_from_stmt>neurodsp.plts.time_series plot_time_series<import_from_stmt>neurodsp.utils set_random_seed create_times<import_from_stmt>neurodsp.utils.norm normalize_sig<line_sep>###################################################################################################
# Set random seed, for reproducibility
set_random_seed(0)<line_sep>###################################################################################################
# Load neural signal
# ------------------
#
# First, we will load a segment of ECoG data, as an example time series.
#
###################################################################################################
# Download, if needed, and load example data files
sig=load_ndsp_data('sample_data_1.npy' folder='data')<line_sep>sig=normalize_sig(sig mean=0 variance=1)<line_sep># Set sampling rate, and create a times vector for plotting
fs=1000<line_sep>times=create_times(len(sig)/fs fs)<line_sep>###################################################################################################
#
# Next, we can visualize this data segment. As we can see this segment of data has
# some prominent bursts of oscillations, in this case, in the beta frequency.
#
###################################################################################################
# Plot example signal
plot_time_series(times sig)<line_sep>###################################################################################################
# Apply sliding window matching
# -----------------------------
#
# The beta oscillation in our data segment looks like it might have some non-sinusoidal
# properties. We can investigate this with sliding window matching.
#
# Sliding window matching can be applied with the
# :func:`~.sliding_window_matching` function.
#
###################################################################################################
# Data Preprocessing
# ~~~~~~~~~~~~~~~~~~
#
# Typically, the input signal does not have to be filtered into a band of interest to use SWM.
#
# If the goal is to characterize non-sinusoidal rhythms, you typically won't want to
# apply a filter that will smooth out the features of interest.
#
# However, if the goal is to characterize higher frequency activity, it can be useful to
# apply a highpass filter, so that the method does not converge on a lower frequency motif.
#
# In our case, the beta rhythm of interest is the most prominent, low frequency, feature of the
# data, so we won't apply a filter.
#
###################################################################################################
# Algorithm Settings
# ~~~~~~~~~~~~~~~~~~
#
# The SWM algorithm has some algorithm specific settings that need to be applied, including:
#
# - `win_len` : the length of the window, defined in seconds
# - `win_spacing` : the minimum distance between windows, also defined in seconds
#
# The length of the window influences the patterns that are extracted from the data.
# Typically, you want to set the window length to match the expected timescale of the
# patterns under study.
#
# For our purposes, we will define the window length to be about 1 cycle of a beta oscillation,
# which should help the algorithm to find the waveform shape of the neural oscillation.
#
###################################################################################################
# Define window length & minimum window spacing, both in seconds
win_len=.055<line_sep>win_spacing=.055<line_sep>###################################################################################################
# Apply the sliding window matching algorithm to the time series
windows,window_starts=sliding_window_matching(sig fs win_len win_spacing var_thresh=.5)<line_sep>###################################################################################################
# Examine the Results
# ~~~~~~~~~~~~~~~~~~~
#
# What we got back from the SWM function are the calculate average window, the list
# of indices in the data of the windows, and the calculated costs for each iteration of
# the algorithm run.
#
# In order to visualize the resulting pattern, we can use
# :func:`~.plot_swm_pattern`.
#
###################################################################################################
# Compute the average window
avg_window=np.mean(windows 0)<line_sep># Plot the discovered pattern
plot_swm_pattern(avg_window)<line_sep>###################################################################################################
#
# In the above average pattern, that looks to capture a beta rhythm, we can notice some
# waveform shape of the extracted rhythm.
#
###################################################################################################
# Concluding Notes
# ~~~~~~~~~~~~~~~~
#
# One thing to keep in mind is that the SWM algorithm includes a random element of sampling
# and comparing the windows - meaning it is not deterministic. Because of this, results
# can change with different random seeds.
#
# To explore this, go back and change the random seed, and see how the output changes.
#
# You can also set the number of iterations that the algorithm sweeps through. Increasing
# the number of iterations, and using longer data segments, can help improve the robustness
# of the algorithm results.
#
|
<import_from_future_stmt> print_function<import_stmt>numba.unittest_support<as>unittest<import_from_stmt>numba compiler ir objmode<import_stmt>numpy<as>np<class_stmt>TestIR(unittest.TestCase)<block_start><def_stmt>test_IRScope self<block_start>filename="<?>"<line_sep>top=ir.Scope(parent=<none> loc=ir.Loc(filename=filename line=1))<line_sep>local=ir.Scope(parent=top loc=ir.Loc(filename=filename line=2))<line_sep>apple=local.define('apple' loc=ir.Loc(filename=filename line=3))<line_sep>self.assertIs(local.get('apple') apple)<line_sep>self.assertEqual(len(local.localvars) 1)<line_sep>orange=top.define('orange' loc=ir.Loc(filename=filename line=4))<line_sep>self.assertEqual(len(local.localvars) 1)<line_sep>self.assertEqual(len(top.localvars) 1)<line_sep>self.assertIs(top.get('orange') orange)<line_sep>self.assertIs(local.get('orange') orange)<line_sep>more_orange=local.define('orange' loc=ir.Loc(filename=filename line=5))<line_sep>self.assertIs(top.get('orange') orange)<line_sep>self.assertIsNot(local.get('orange') <not>orange)<line_sep>self.assertIs(local.get('orange') more_orange)<try_stmt><block_start>local.define('orange' loc=ir.Loc(filename=filename line=5))<block_end><except_stmt>ir.RedefinedError<block_start><pass><block_end><else_stmt><block_start>self.fail("Expecting an %s"%ir.RedefinedError)<block_end><block_end><block_end><class_stmt>CheckEquality(unittest.TestCase)<block_start>var_a=ir.Var(<none> 'a' ir.unknown_loc)<line_sep>var_b=ir.Var(<none> 'b' ir.unknown_loc)<line_sep>var_c=ir.Var(<none> 'c' ir.unknown_loc)<line_sep>var_d=ir.Var(<none> 'd' ir.unknown_loc)<line_sep>var_e=ir.Var(<none> 'e' ir.unknown_loc)<line_sep>loc1=ir.Loc('mock' 1 0)<line_sep>loc2=ir.Loc('mock' 2 0)<line_sep>loc3=ir.Loc('mock' 3 0)<def_stmt>check self base same=[] different=[]<block_start><for_stmt>s same<block_start>self.assertTrue(base<eq>s)<block_end><for_stmt>d different<block_start>self.assertTrue(base<ne>d)<block_end><block_end><block_end><class_stmt>TestIRMeta(CheckEquality)<block_start>"""
Tests IR node meta, like Loc and Scope
"""<def_stmt>test_loc self<block_start>a=ir.Loc('file' 1 0)<line_sep>b=ir.Loc('file' 1 0)<line_sep>c=ir.Loc('pile' 1 0)<line_sep>d=ir.Loc('file' 2 0)<line_sep>e=ir.Loc('file' 1 1)<line_sep>self.check(a same=[b ] different=[c d e])<line_sep>f=ir.Loc('file' 1 0 maybe_decorator=<false>)<line_sep>g=ir.Loc('file' 1 0 maybe_decorator=<true>)<line_sep>self.check(a same=[f g])<block_end><def_stmt>test_scope self<block_start>parent1=ir.Scope(<none> self.loc1)<line_sep>parent2=ir.Scope(<none> self.loc1)<line_sep>parent3=ir.Scope(<none> self.loc2)<line_sep>self.check(parent1 same=[parent2 parent3 ])<line_sep>a=ir.Scope(parent1 self.loc1)<line_sep>b=ir.Scope(parent1 self.loc1)<line_sep>c=ir.Scope(parent1 self.loc2)<line_sep>d=ir.Scope(parent3 self.loc1)<line_sep>self.check(a same=[b c d])<line_sep># parent1 and parent2 are equal, so children referring to either parent
# should be equal
e=ir.Scope(parent2 self.loc1)<line_sep>self.check(a same=[e ])<block_end><block_end><class_stmt>TestIRNodes(CheckEquality)<block_start>"""
Tests IR nodes
"""<def_stmt>test_terminator self# terminator base class inst should always be equal
<block_start>t1=ir.Terminator()<line_sep>t2=ir.Terminator()<line_sep>self.check(t1 same=[t2])<block_end><def_stmt>test_jump self<block_start>a=ir.Jump(1 self.loc1)<line_sep>b=ir.Jump(1 self.loc1)<line_sep>c=ir.Jump(1 self.loc2)<line_sep>d=ir.Jump(2 self.loc1)<line_sep>self.check(a same=[b c] different=[d])<block_end><def_stmt>test_return self<block_start>a=ir.Return(self.var_a self.loc1)<line_sep>b=ir.Return(self.var_a self.loc1)<line_sep>c=ir.Return(self.var_a self.loc2)<line_sep>d=ir.Return(self.var_b self.loc1)<line_sep>self.check(a same=[b c] different=[d])<block_end><def_stmt>test_raise self<block_start>a=ir.Raise(self.var_a self.loc1)<line_sep>b=ir.Raise(self.var_a self.loc1)<line_sep>c=ir.Raise(self.var_a self.loc2)<line_sep>d=ir.Raise(self.var_b self.loc1)<line_sep>self.check(a same=[b c] different=[d])<block_end><def_stmt>test_staticraise self<block_start>a=ir.StaticRaise(AssertionError <none> self.loc1)<line_sep>b=ir.StaticRaise(AssertionError <none> self.loc1)<line_sep>c=ir.StaticRaise(AssertionError <none> self.loc2)<line_sep>e=ir.StaticRaise(AssertionError ("str" ) self.loc1)<line_sep>d=ir.StaticRaise(RuntimeError <none> self.loc1)<line_sep>self.check(a same=[b c] different=[d e])<block_end><def_stmt>test_branch self<block_start>a=ir.Branch(self.var_a 1 2 self.loc1)<line_sep>b=ir.Branch(self.var_a 1 2 self.loc1)<line_sep>c=ir.Branch(self.var_a 1 2 self.loc2)<line_sep>d=ir.Branch(self.var_b 1 2 self.loc1)<line_sep>e=ir.Branch(self.var_a 2 2 self.loc1)<line_sep>f=ir.Branch(self.var_a 1 3 self.loc1)<line_sep>self.check(a same=[b c] different=[d e f])<block_end><def_stmt>test_expr self<block_start>a=ir.Expr('some_op' self.loc1)<line_sep>b=ir.Expr('some_op' self.loc1)<line_sep>c=ir.Expr('some_op' self.loc2)<line_sep>d=ir.Expr('some_other_op' self.loc1)<line_sep>self.check(a same=[b c] different=[d])<block_end><def_stmt>test_setitem self<block_start>a=ir.SetItem(self.var_a self.var_b self.var_c self.loc1)<line_sep>b=ir.SetItem(self.var_a self.var_b self.var_c self.loc1)<line_sep>c=ir.SetItem(self.var_a self.var_b self.var_c self.loc2)<line_sep>d=ir.SetItem(self.var_d self.var_b self.var_c self.loc1)<line_sep>e=ir.SetItem(self.var_a self.var_d self.var_c self.loc1)<line_sep>f=ir.SetItem(self.var_a self.var_b self.var_d self.loc1)<line_sep>self.check(a same=[b c] different=[d e f])<block_end><def_stmt>test_staticsetitem self<block_start>a=ir.StaticSetItem(self.var_a 1 self.var_b self.var_c self.loc1)<line_sep>b=ir.StaticSetItem(self.var_a 1 self.var_b self.var_c self.loc1)<line_sep>c=ir.StaticSetItem(self.var_a 1 self.var_b self.var_c self.loc2)<line_sep>d=ir.StaticSetItem(self.var_d 1 self.var_b self.var_c self.loc1)<line_sep>e=ir.StaticSetItem(self.var_a 2 self.var_b self.var_c self.loc1)<line_sep>f=ir.StaticSetItem(self.var_a 1 self.var_d self.var_c self.loc1)<line_sep>g=ir.StaticSetItem(self.var_a 1 self.var_b self.var_d self.loc1)<line_sep>self.check(a same=[b c] different=[d e f g])<block_end><def_stmt>test_delitem self<block_start>a=ir.DelItem(self.var_a self.var_b self.loc1)<line_sep>b=ir.DelItem(self.var_a self.var_b self.loc1)<line_sep>c=ir.DelItem(self.var_a self.var_b self.loc2)<line_sep>d=ir.DelItem(self.var_c self.var_b self.loc1)<line_sep>e=ir.DelItem(self.var_a self.var_c self.loc1)<line_sep>self.check(a same=[b c] different=[d e])<block_end><def_stmt>test_del self<block_start>a=ir.Del(self.var_a.name self.loc1)<line_sep>b=ir.Del(self.var_a.name self.loc1)<line_sep>c=ir.Del(self.var_a.name self.loc2)<line_sep>d=ir.Del(self.var_b.name self.loc1)<line_sep>self.check(a same=[b c] different=[d])<block_end><def_stmt>test_setattr self<block_start>a=ir.SetAttr(self.var_a 'foo' self.var_b self.loc1)<line_sep>b=ir.SetAttr(self.var_a 'foo' self.var_b self.loc1)<line_sep>c=ir.SetAttr(self.var_a 'foo' self.var_b self.loc2)<line_sep>d=ir.SetAttr(self.var_c 'foo' self.var_b self.loc1)<line_sep>e=ir.SetAttr(self.var_a 'bar' self.var_b self.loc1)<line_sep>f=ir.SetAttr(self.var_a 'foo' self.var_c self.loc1)<line_sep>self.check(a same=[b c] different=[d e f])<block_end><def_stmt>test_delattr self<block_start>a=ir.DelAttr(self.var_a 'foo' self.loc1)<line_sep>b=ir.DelAttr(self.var_a 'foo' self.loc1)<line_sep>c=ir.DelAttr(self.var_a 'foo' self.loc2)<line_sep>d=ir.DelAttr(self.var_c 'foo' self.loc1)<line_sep>e=ir.DelAttr(self.var_a 'bar' self.loc1)<line_sep>self.check(a same=[b c] different=[d e])<block_end><def_stmt>test_assign self<block_start>a=ir.Assign(self.var_a self.var_b self.loc1)<line_sep>b=ir.Assign(self.var_a self.var_b self.loc1)<line_sep>c=ir.Assign(self.var_a self.var_b self.loc2)<line_sep>d=ir.Assign(self.var_c self.var_b self.loc1)<line_sep>e=ir.Assign(self.var_a self.var_c self.loc1)<line_sep>self.check(a same=[b c] different=[d e])<block_end><def_stmt>test_print self<block_start>a=ir.Print((self.var_a ) self.var_b self.loc1)<line_sep>b=ir.Print((self.var_a ) self.var_b self.loc1)<line_sep>c=ir.Print((self.var_a ) self.var_b self.loc2)<line_sep>d=ir.Print((self.var_c ) self.var_b self.loc1)<line_sep>e=ir.Print((self.var_a ) self.var_c self.loc1)<line_sep>self.check(a same=[b c] different=[d e])<block_end><def_stmt>test_storemap self<block_start>a=ir.StoreMap(self.var_a self.var_b self.var_c self.loc1)<line_sep>b=ir.StoreMap(self.var_a self.var_b self.var_c self.loc1)<line_sep>c=ir.StoreMap(self.var_a self.var_b self.var_c self.loc2)<line_sep>d=ir.StoreMap(self.var_d self.var_b self.var_c self.loc1)<line_sep>e=ir.StoreMap(self.var_a self.var_d self.var_c self.loc1)<line_sep>f=ir.StoreMap(self.var_a self.var_b self.var_d self.loc1)<line_sep>self.check(a same=[b c] different=[d e f])<block_end><def_stmt>test_yield self<block_start>a=ir.Yield(self.var_a self.loc1 0)<line_sep>b=ir.Yield(self.var_a self.loc1 0)<line_sep>c=ir.Yield(self.var_a self.loc2 0)<line_sep>d=ir.Yield(self.var_b self.loc1 0)<line_sep>e=ir.Yield(self.var_a self.loc1 1)<line_sep>self.check(a same=[b c] different=[d e])<block_end><def_stmt>test_enterwith self<block_start>a=ir.EnterWith(self.var_a 0 1 self.loc1)<line_sep>b=ir.EnterWith(self.var_a 0 1 self.loc1)<line_sep>c=ir.EnterWith(self.var_a 0 1 self.loc2)<line_sep>d=ir.EnterWith(self.var_b 0 1 self.loc1)<line_sep>e=ir.EnterWith(self.var_a 1 1 self.loc1)<line_sep>f=ir.EnterWith(self.var_a 0 2 self.loc1)<line_sep>self.check(a same=[b c] different=[d e f])<block_end><def_stmt>test_arg self<block_start>a=ir.Arg('foo' 0 self.loc1)<line_sep>b=ir.Arg('foo' 0 self.loc1)<line_sep>c=ir.Arg('foo' 0 self.loc2)<line_sep>d=ir.Arg('bar' 0 self.loc1)<line_sep>e=ir.Arg('foo' 1 self.loc1)<line_sep>self.check(a same=[b c] different=[d e])<block_end><def_stmt>test_const self<block_start>a=ir.Const(1 self.loc1)<line_sep>b=ir.Const(1 self.loc1)<line_sep>c=ir.Const(1 self.loc2)<line_sep>d=ir.Const(2 self.loc1)<line_sep>self.check(a same=[b c] different=[d])<block_end><def_stmt>test_global self<block_start>a=ir.Global('foo' 0 self.loc1)<line_sep>b=ir.Global('foo' 0 self.loc1)<line_sep>c=ir.Global('foo' 0 self.loc2)<line_sep>d=ir.Global('bar' 0 self.loc1)<line_sep>e=ir.Global('foo' 1 self.loc1)<line_sep>self.check(a same=[b c] different=[d e])<block_end><def_stmt>test_var self<block_start>a=ir.Var(<none> 'foo' self.loc1)<line_sep>b=ir.Var(<none> 'foo' self.loc1)<line_sep>c=ir.Var(<none> 'foo' self.loc2)<line_sep>d=ir.Var(ir.Scope(<none> ir.unknown_loc) 'foo' self.loc1)<line_sep>e=ir.Var(<none> 'bar' self.loc1)<line_sep>self.check(a same=[b c d] different=[e])<block_end><def_stmt>test_intrinsic self<block_start>a=ir.Intrinsic('foo' 'bar' (0 ) self.loc1)<line_sep>b=ir.Intrinsic('foo' 'bar' (0 ) self.loc1)<line_sep>c=ir.Intrinsic('foo' 'bar' (0 ) self.loc2)<line_sep>d=ir.Intrinsic('baz' 'bar' (0 ) self.loc1)<line_sep>e=ir.Intrinsic('foo' 'baz' (0 ) self.loc1)<line_sep>f=ir.Intrinsic('foo' 'bar' (1 ) self.loc1)<line_sep>self.check(a same=[b c] different=[d e f])<block_end><def_stmt>test_undefinedtype self<block_start>a=ir.UndefinedType()<line_sep>b=ir.UndefinedType()<line_sep>self.check(a same=[b])<block_end><def_stmt>test_loop self<block_start>a=ir.Loop(1 3)<line_sep>b=ir.Loop(1 3)<line_sep>c=ir.Loop(2 3)<line_sep>d=ir.Loop(1 4)<line_sep>self.check(a same=[b] different=[c d])<block_end><def_stmt>test_with self<block_start>a=ir.With(1 3)<line_sep>b=ir.With(1 3)<line_sep>c=ir.With(2 3)<line_sep>d=ir.With(1 4)<line_sep>self.check(a same=[b] different=[c d])<block_end><block_end># used later
_GLOBAL=1234<class_stmt>TestIRCompounds(CheckEquality)<block_start>"""
Tests IR concepts that have state
"""<def_stmt>test_varmap self<block_start>a=ir.VarMap()<line_sep>a.define(self.var_a 'foo')<line_sep>a.define(self.var_b 'bar')<line_sep>b=ir.VarMap()<line_sep>b.define(self.var_a 'foo')<line_sep>b.define(self.var_b 'bar')<line_sep>c=ir.VarMap()<line_sep>c.define(self.var_a 'foo')<line_sep>c.define(self.var_c 'bar')<line_sep>self.check(a same=[b] different=[c])<block_end><def_stmt>test_block self<block_start><def_stmt>gen_block <block_start>parent=ir.Scope(<none> self.loc1)<line_sep>tmp=ir.Block(parent self.loc2)<line_sep>assign1=ir.Assign(self.var_a self.var_b self.loc3)<line_sep>assign2=ir.Assign(self.var_a self.var_c self.loc3)<line_sep>assign3=ir.Assign(self.var_c self.var_b self.loc3)<line_sep>tmp.append(assign1)<line_sep>tmp.append(assign2)<line_sep>tmp.append(assign3)<line_sep><return>tmp<block_end>a=gen_block()<line_sep>b=gen_block()<line_sep>c=gen_block().append(ir.Assign(self.var_a self.var_b self.loc3))<line_sep>self.check(a same=[b] different=[c])<block_end><def_stmt>test_functionir self# this creates a function full of all sorts of things to ensure the IR
# is pretty involved, it then compares two instances of the compiled
# function IR to check the IR is the same invariant of objects, and then
# a tiny mutation is made to the IR in the second function and detection
# of this change is checked.
<block_start><def_stmt>gen <block_start>_FREEVAR=0xCAFE<def_stmt>foo a b c=12 d=1j e=<none><block_start>f=a+b<line_sep>a<augadd>_FREEVAR<line_sep>g=np.zeros(c dtype=np.complex64)<line_sep>h=f+g<line_sep>i=1j/d<if_stmt>np.abs(i)<g>0<block_start>k=h/i<line_sep>l=np.arange(1 c+1)<with_stmt>objmode()<block_start>print(e k)<block_end>m=np.sqrt(l-g)<if_stmt>np.abs(m[0])<l>1<block_start>n=0<for_stmt>o range(a)<block_start>n<augadd>0<if_stmt>np.abs(n)<l>3<block_start><break><block_end><block_end>n<augadd>m[2]<block_end>p=g/l<line_sep>q=[]<for_stmt>r range(len(p))<block_start>q.append(p[r])<if_stmt>r<g>4+1<block_start><with_stmt>objmode(s='intp' t='complex128')<block_start>s=123<line_sep>t=5<block_end><if_stmt>s<g>122<block_start>t<augadd>s<block_end><block_end>t<augadd>q[0]+_GLOBAL<block_end><block_end><return>f+o+r+t+r+a+n<block_end><return>foo<block_end>x=gen()<line_sep>y=gen()<line_sep>x_ir=compiler.run_frontend(x)<line_sep>y_ir=compiler.run_frontend(y)<line_sep>self.assertTrue(x_ir.equal_ir(y_ir))<def_stmt>check_diffstr string pointing_at=[]<block_start>lines=string.splitlines()<for_stmt>item pointing_at<block_start><for_stmt>l lines<block_start><if_stmt>l.startswith('->')<block_start><if_stmt>item<in>l<block_start><break><block_end><block_end><block_end><else_stmt><block_start><raise>AssertionError("Could not find %s "%item)<block_end><block_end><block_end>self.assertIn("IR is considered equivalent" x_ir.diff_str(y_ir))<line_sep># minor mutation, simply switch branch targets on last branch
<for_stmt>label reversed(list(y_ir.blocks.keys()))<block_start>blk=y_ir.blocks[label]<if_stmt>isinstance(blk.body[-1] ir.Branch)<block_start>ref=blk.body[-1]<line_sep>ref.truebr,ref.falsebr=ref.falsebr ref.truebr<line_sep><break><block_end><block_end>check_diffstr(x_ir.diff_str(y_ir) ['branch'])<line_sep>z=gen()<line_sep>self.assertFalse(x_ir.equal_ir(y_ir))<line_sep>z_ir=compiler.run_frontend(z)<line_sep>change_set=set()<for_stmt>label reversed(list(z_ir.blocks.keys()))<block_start>blk=z_ir.blocks[label]<line_sep>ref=blk.body[:-1]<line_sep>idx=<none><for_stmt>i range(len(ref))# look for two adjacent Del
<block_start><if_stmt>(isinstance(ref[i] ir.Del)<and>isinstance(ref[i+1] ir.Del))<block_start>idx=i<line_sep><break><block_end><block_end><if_stmt>idx<is><not><none><block_start>b=blk.body<line_sep>change_set.add(str(b[idx+1]))<line_sep>change_set.add(str(b[idx]))<line_sep>b[idx],b[idx+1]=b[idx+1] b[idx]<line_sep><break><block_end><block_end>self.assertFalse(x_ir.equal_ir(z_ir))<line_sep>self.assertEqual(len(change_set) 2)<for_stmt>item change_set<block_start>self.assertTrue(item.startswith('del '))<block_end>check_diffstr(x_ir.diff_str(z_ir) change_set)<def_stmt>foo a b<block_start>c=a<times>2<line_sep>d=c+b<line_sep>e=np.sqrt(d)<line_sep><return>e<block_end><def_stmt>bar a b# same as foo
<block_start>c=a<times>2<line_sep>d=c+b<line_sep>e=np.sqrt(d)<line_sep><return>e<block_end><def_stmt>baz a b<block_start>c=a<times>2<line_sep>d=b+c<line_sep>e=np.sqrt(d+1)<line_sep><return>e<block_end>foo_ir=compiler.run_frontend(foo)<line_sep>bar_ir=compiler.run_frontend(bar)<line_sep>self.assertTrue(foo_ir.equal_ir(bar_ir))<line_sep>self.assertIn("IR is considered equivalent" foo_ir.diff_str(bar_ir))<line_sep>baz_ir=compiler.run_frontend(baz)<line_sep>self.assertFalse(foo_ir.equal_ir(baz_ir))<line_sep>tmp=foo_ir.diff_str(baz_ir)<line_sep>self.assertIn("Other block contains more statements" tmp)<line_sep>check_diffstr(tmp ["c + b" "b + c"])<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end> |
<import_stmt>dateutil<import_stmt>pytest<import_from_stmt>testsuite.plugins mockserver<import_from_stmt>testsuite.utils json_util<line_sep>NOW=dateutil.parser.parse('2019-09-19-13:04:00.000000')<line_sep>MOCKSERVER_INFO=mockserver.MockserverInfo('localhost' 123 'http://localhost:123/' <none> )<line_sep>MOCKSERVER_SSL_INFO=mockserver.MockserverInfo('localhost' 456 'https://localhost:456/' mockserver.SslInfo('/some_dir/cert.cert' '/some_dir/cert.key') )<line_sep>@pytest.mark.parametrize('json_input,expected_result' [(# simple list
[{'some_date':{'$dateDiff':0}} 'regular_element'] # json_input
[{'some_date':NOW} 'regular_element'] # expected_result
) (# simple dict
{# json_input
'some_date':{'$dateDiff':0} 'regular_key':'regular_value' } {'some_date':NOW 'regular_key':'regular_value'} # json_input
) (# nested list and dict
{# json_input
'regular_root_key':'regular_root_value' 'root_date':{'$dateDiff':0} 'parent_key':{'nested_date':{'$dateDiff':0} 'nested_list':['regular_element1' {'$dateDiff':0} {'$dateDiff':0} 'regular_element2' ] } } {# expected_result
'regular_root_key':'regular_root_value' 'root_date':NOW 'parent_key':{'nested_date':NOW 'nested_list':['regular_element1' NOW NOW 'regular_element2' ] } } ) ] )<def_stmt>test_substitute_now json_input expected_result<block_start>result=json_util.substitute(json_input now=NOW)<assert_stmt>result<eq>expected_result<block_end>@pytest.mark.parametrize('json_input,expected_result' [(({'client_url':{'$mockserver':'/path'}}) ({'client_url':'http://localhost:123/path'}) ) (({'client_url':{'$mockserver':'/path' '$schema':<false>}}) ({'client_url':'localhost:123/path'}) ) ] )<def_stmt>test_substitute_mockserver json_input expected_result<block_start>result=json_util.substitute(json_input mockserver=MOCKSERVER_INFO)<assert_stmt>result<eq>expected_result<block_end>@pytest.mark.parametrize('json_input,expected_result' [(({'client_url':{'$mockserver_https':'/path'}}) ({'client_url':'https://localhost:456/path'}) ) (({'client_url':{'$mockserver_https':'/path' '$schema':<false>}}) ({'client_url':'localhost:456/path'}) ) ] )<def_stmt>test_substitute_mockserver_https json_input expected_result<block_start>result=json_util.substitute(json_input mockserver_https=MOCKSERVER_SSL_INFO )<assert_stmt>result<eq>expected_result<block_end> |
<import_from_stmt>datetime datetime<import_from_stmt>typing Any List Optional Sequence Tuple cast<import_from_stmt>uuid UUID<import_from_stmt>eventsourcing.domain Aggregate<import_from_stmt>eventsourcing.examples.searchabletimestamps.persistence SearchableTimestampsRecorder <import_from_stmt>eventsourcing.persistence ApplicationRecorder StoredEvent<import_from_stmt>eventsourcing.postgres Factory PostgresApplicationRecorder PostgresConnection PostgresCursor PostgresDatastore <class_stmt>SearchableTimestampsApplicationRecorder(SearchableTimestampsRecorder PostgresApplicationRecorder)<block_start><def_stmt>__init__ self datastore:PostgresDatastore events_table_name:str="stored_events" event_timestamps_table_name:str="event_timestamps" <block_start>self.check_table_name_length(event_timestamps_table_name datastore.schema)<line_sep>self.event_timestamps_table_name=event_timestamps_table_name<line_sep>super().__init__(datastore events_table_name)<line_sep>self.insert_event_timestamp_statement=(f"INSERT INTO {self.event_timestamps_table_name} VALUES ($1, $2, $3)")<line_sep>self.insert_event_timestamp_statement_name=(f"insert_{event_timestamps_table_name}".replace("." "_"))<line_sep>self.select_event_timestamp_statement=(f"SELECT originator_version FROM {self.event_timestamps_table_name} WHERE "<concat>f"originator_id = $1 AND "<concat>f"timestamp <= $2 "<concat>"ORDER BY originator_version DESC "<concat>"LIMIT 1")<line_sep>self.select_event_timestamp_statement_name=(f"select_{event_timestamps_table_name}".replace("." "_"))<block_end><def_stmt>construct_create_table_statements self<arrow>List[str]<block_start>statements=super().construct_create_table_statements()<line_sep>statements.append("CREATE TABLE IF NOT EXISTS "<concat>f"{self.event_timestamps_table_name} ("<concat>"originator_id uuid NOT NULL, "<concat>"timestamp timestamp with time zone, "<concat>"originator_version bigint NOT NULL, "<concat>"PRIMARY KEY "<concat>"(originator_id, timestamp))")<line_sep><return>statements<block_end><def_stmt>_prepare_insert_events self conn:PostgresConnection<arrow><none><block_start>super()._prepare_insert_events(conn)<line_sep>self._prepare(conn self.insert_event_timestamp_statement_name self.insert_event_timestamp_statement )<block_end><def_stmt>_insert_events self c:PostgresCursor stored_events:List[StoredEvent] **kwargs:Any <arrow>Optional[Sequence[int]]<block_start>notification_ids=super()._insert_events(c stored_events **kwargs)<line_sep># Insert event timestamps.
event_timestamps_data=cast(List[Tuple[UUID datetime int]] kwargs.get("event_timestamps_data"))<for_stmt>event_timestamp_data event_timestamps_data<block_start>statement_alias=self.statement_name_aliases[self.insert_event_timestamp_statement_name]<line_sep>c.execute(f"EXECUTE {statement_alias}(%s, %s, %s)" event_timestamp_data)<block_end><return>notification_ids<block_end><def_stmt>get_version_at_timestamp self originator_id:UUID timestamp:datetime<arrow>Optional[int]<block_start><with_stmt>self.datastore.get_connection()<as>conn<block_start>self._prepare(conn self.select_event_timestamp_statement_name self.select_event_timestamp_statement )<with_stmt>conn.transaction(commit=<false>)<as>curs<block_start>statement_alias=self.statement_name_aliases[self.select_event_timestamp_statement_name]<line_sep>curs.execute(f"EXECUTE {statement_alias}(%s, %s)" [originator_id timestamp])<for_stmt>row curs.fetchall()<block_start><return>row["originator_version"]<block_end><else_stmt><block_start><return>Aggregate.INITIAL_VERSION-1<block_end><block_end><block_end><block_end><block_end><class_stmt>SearchableTimestampsInfrastructureFactory(Factory)<block_start><def_stmt>application_recorder self<arrow>ApplicationRecorder<block_start>prefix=(self.datastore.schema+".")<if>self.datastore.schema<else>""<line_sep>prefix<augadd>self.env.name.lower()<or>"stored"<line_sep>events_table_name=prefix+"_events"<line_sep>event_timestamps_table_name=prefix+"_timestamps"<line_sep>recorder=SearchableTimestampsApplicationRecorder(datastore=self.datastore events_table_name=events_table_name event_timestamps_table_name=event_timestamps_table_name )<line_sep>recorder.create_table()<line_sep><return>recorder<block_end><block_end><del_stmt>Factory<line_sep> |
<import_from_stmt>app.blogging bp<import_from_stmt>datetime datetime<import_from_stmt>flask flash redirect url_for<import_from_stmt>flask_login current_user<line_sep>@bp.before_request<def_stmt>protect <block_start>'''
Registers new function to Flask-Blogging Blueprint that protects
updates to make them only viewable by paid subscribers.
'''<if_stmt>current_user.is_authenticated<block_start><if_stmt>datetime.today()<le>current_user.expiration<block_start><return><none><block_end><else_stmt><block_start>flash('You must have a paid-up subscription \
to view updates.' 'warning')<line_sep><return>redirect(url_for('main.support'))<block_end><block_end><else_stmt><block_start>flash('Please login to view updates.' 'warning')<line_sep><return>redirect(url_for('auth.login'))<block_end><block_end> |
<def_stmt>extractKaedesan721TumblrCom item<block_start>'''
Parser for 'kaedesan721.tumblr.com'
'''<line_sep>bad_tags=['FanArt' "htr asks" 'Spanish translations' 'htr anime' 'my thoughts' 'Cats' 'answered' 'ask meme' 'relay convos' 'translation related post' 'nightmare fuel' 'htr manga' 'memes' 'htrweek' 'Video Games' 'Animation' 'replies' 'jazz' 'Music' ]<if_stmt>any([bad<in>item['tags']<for>bad bad_tags])<block_start><return><none><block_end>vol,chp,frag,postfix=extractVolChapterFragmentPostfix(item['title'])<if_stmt><not>(chp<or>vol)<or>"preview"<in>item['title'].lower()<block_start><return><none><block_end><if_stmt>"my translations"<in>item['tags']<block_start>tagmap=[('<NAME>' '<NAME>' 'translated') ('<NAME>' '<NAME>' 'translated') ('PRC' 'PRC' 'translated') ('Loiterous' 'Loiterous' 'oel') ]<for_stmt>tagname,name,tl_type tagmap<block_start><if_stmt>tagname<in>item['tags']<block_start><return>buildReleaseMessageWithType(item name vol chp frag=frag postfix=postfix tl_type=tl_type)<block_end><block_end><block_end><return><false><block_end> |
"""
The Sims 4 Community Library is licensed under the Creative Commons Attribution 4.0 International public license (CC BY 4.0).
https://creativecommons.org/licenses/by/4.0/
https://creativecommons.org/licenses/by/4.0/legalcode
Copyright (c) COLONOLNUTTY
"""<import_stmt>os<import_from_stmt>sims4.commands Command CommandType CheatOutput<import_from_stmt>sims4communitylib.utils.common_time_utils CommonTimeUtils<import_from_stmt>typing Any Callable<line_sep>ON_RTD=os.environ.get('READTHEDOCS' <none>)<eq>'True'<if_stmt><not>ON_RTD<block_start><import_from_stmt>scheduling Timeline<import_from_stmt>alarms AlarmHandle<import_from_stmt>date_and_time DateAndTime TimeSpan<block_end><else_stmt># noinspection PyMissingOrEmptyDocstring
<block_start><class_stmt>AlarmHandle<block_start><def_stmt>cancel self<block_start><pass><block_end><block_end># noinspection PyMissingOrEmptyDocstring
<class_stmt>DateAndTime<block_start><pass><block_end># noinspection PyMissingOrEmptyDocstring
<class_stmt>TimeSpan<block_start><pass><block_end># noinspection PyMissingOrEmptyDocstring
<class_stmt>Timeline<block_start><pass><block_end><block_end><class_stmt>CommonAlarmHandle(AlarmHandle)<block_start>"""A custom alarm handle that keeps track of when it is slated to trigger for the first time."""<def_stmt>__init__ self owner:Any on_alarm_triggered_callback:Callable[['CommonAlarmHandle'] <none>] timeline:Timeline when:DateAndTime should_repeat:bool=<false> time_until_repeat:TimeSpan=<none> accurate_repeat:bool=<true> persist_across_zone_loads:bool=<false><block_start>self.started_at_date_and_time=when<line_sep>super().__init__(owner on_alarm_triggered_callback timeline when repeating=should_repeat repeat_interval=time_until_repeat accurate_repeat=accurate_repeat cross_zone=persist_across_zone_loads)<block_end><block_end><if_stmt><not>ON_RTD<block_start>@Command('s4clib.print_current_time' command_type=CommandType.Live)<def_stmt>_s4clib_print_current_time _connection:int=<none><block_start>output=CheatOutput(_connection)<line_sep>output('Current time')<line_sep>output('Hour {} Minute {}'.format(CommonTimeUtils.get_current_date_and_time().hour() CommonTimeUtils.get_current_date_and_time().minute()))<line_sep>output('Abs Hour {} Abs Minute {}'.format(CommonTimeUtils.get_current_date_and_time().absolute_hours() CommonTimeUtils.get_current_date_and_time().absolute_minutes()))<block_end><block_end> |
<import_stmt>sys<import_stmt>numpy<as>np<import_from_stmt>matplotlib pyplot<as>plt<import_from_stmt>mpl_toolkits.mplot3d Axes3D# NOQA
<import_stmt>seaborn# NOQA
<import_from_stmt>spherecluster sample_vMF<line_sep>plt.ion()<line_sep>n_clusters=3<line_sep>mus=np.random.randn(3 n_clusters)<line_sep>mus,r=np.linalg.qr(mus mode='reduced')<line_sep>kappas=[15 15 15]<line_sep>num_points_per_class=250<line_sep>Xs=[]<for_stmt>nn range(n_clusters)<block_start>new_X=sample_vMF(mus[nn] kappas[nn] num_points_per_class)<line_sep>Xs.append(new_X.T)<block_end>fig=plt.figure(figsize=(8 6))<line_sep>ax=fig.add_subplot(1 1 1 aspect='equal' projection='3d' adjustable='box-forced' xlim=[-1.1 1.1] ylim=[-1.1 1.1] zlim=[-1.1 1.1])<line_sep>colors=['b' 'r' 'g']<for_stmt>nn range(n_clusters)<block_start>ax.scatter(Xs[nn][0 :] Xs[nn][1 :] Xs[nn][2 :] c=colors[nn])<block_end>ax.set_aspect('equal')<line_sep>plt.axis('off')<line_sep>plt.show()<def_stmt>r_input val=<none><block_start>val=val<or>''<if_stmt>sys.version_info[0]<ge>3<block_start><return>eval(input(val))<block_end><return>raw_input(val)<block_end>r_input()<line_sep> |
<import_from_stmt>django.contrib admin messages<import_from_stmt>django.shortcuts render<import_from_stmt>django.utils.translation gettext_lazy<as>_<import_from_stmt>inline_actions.actions DefaultActionsMixin ViewAction<import_from_stmt>inline_actions.admin InlineActionsMixin InlineActionsModelAdminMixin<import_from_stmt>. forms<import_from_stmt>.models Article Author AuthorProxy<class_stmt>UnPublishActionsMixin(object)<block_start><def_stmt>get_inline_actions self request obj=<none><block_start>actions=super(UnPublishActionsMixin self).get_inline_actions(request obj)<if_stmt>obj<block_start><if_stmt>obj.status<eq>Article.DRAFT<block_start>actions.append('publish')<block_end><elif_stmt>obj.status<eq>Article.PUBLISHED<block_start>actions.append('unpublish')<block_end><block_end><return>actions<block_end><def_stmt>publish self request obj parent_obj=<none><block_start>obj.status=Article.PUBLISHED<line_sep>obj.save()<line_sep>messages.info(request _("Article published."))<block_end>publish.short_description=_("Publish")# type: ignore
<def_stmt>unpublish self request obj parent_obj=<none><block_start>obj.status=Article.DRAFT<line_sep>obj.save()<line_sep>messages.info(request _("Article unpublished."))<block_end>unpublish.short_description=_("Unpublish")<block_end># type: ignore
<class_stmt>TogglePublishActionsMixin(object)<block_start><def_stmt>get_inline_actions self request obj=<none><block_start>actions=super(TogglePublishActionsMixin self).get_inline_actions(request=request obj=obj)<line_sep>actions.append('toggle_publish')<line_sep><return>actions<block_end><def_stmt>toggle_publish self request obj parent_obj=<none><block_start><if_stmt>obj.status<eq>Article.DRAFT<block_start>obj.status=Article.PUBLISHED<block_end><else_stmt><block_start>obj.status=Article.DRAFT<block_end>obj.save()<line_sep>status='unpublished'<if>obj.status<eq>Article.DRAFT<else>'published'<line_sep>messages.info(request _("Article {}.".format(status)))<block_end><def_stmt>get_toggle_publish_label self obj<block_start>label='publish'<if>obj.status<eq>Article.DRAFT<else>'unpublish'<line_sep><return>'Toggle {}'.format(label)<block_end><def_stmt>get_toggle_publish_css self obj<block_start><return>'button object-tools'<if>obj.status<eq>Article.DRAFT<else>'default'<block_end><block_end><class_stmt>ChangeTitleActionsMixin(object)<block_start><def_stmt>get_inline_actions self request obj=<none><block_start>actions=super(ChangeTitleActionsMixin self).get_inline_actions(request obj)<line_sep>actions.append('change_title')<line_sep><return>actions<block_end><def_stmt>change_title self request obj parent_obj=<none># explictly check whether the submit button has been pressed
<block_start><if_stmt>'_save'<in>request.POST<block_start>form=forms.ChangeTitleForm(request.POST instance=obj)<line_sep>form.save()<line_sep><return><none># return back to list view
<block_end><elif_stmt>'_back'<in>request.POST<block_start><return><none># return back to list view
<block_end><else_stmt><block_start>form=forms.ChangeTitleForm(instance=obj)<block_end><return>render(request 'change_title.html' context={'form':form})<block_end><block_end><class_stmt>ArticleInline(DefaultActionsMixin UnPublishActionsMixin TogglePublishActionsMixin InlineActionsMixin admin.TabularInline )<block_start>model=Article<line_sep>fields=('title' 'status' )<line_sep>readonly_fields=('title' 'status' )<def_stmt>has_add_permission self request obj=<none><block_start><return><false><block_end><block_end><class_stmt>ArticleNoopInline(InlineActionsMixin admin.TabularInline)<block_start>model=Article<line_sep>fields=('title' 'status' )<line_sep>readonly_fields=('title' 'status' )<def_stmt>get_inline_actions self request obj=<none><block_start>actions=super(ArticleNoopInline self).get_inline_actions(request=request obj=obj)<line_sep>actions.append('noop_action')<line_sep><return>actions<block_end><def_stmt>noop_action self request obj parent_obj=<none><block_start><pass><block_end><block_end>@admin.register(AuthorProxy)<class_stmt>AuthorMultipleInlinesAdmin(InlineActionsModelAdminMixin admin.ModelAdmin)<block_start>inlines=[ArticleInline ArticleNoopInline]<line_sep>list_display=('name' )<line_sep>inline_actions=<none><block_end>@admin.register(Author)<class_stmt>AuthorAdmin(InlineActionsModelAdminMixin admin.ModelAdmin)<block_start>inlines=[ArticleInline]<line_sep>list_display=('name' )<line_sep>inline_actions=<none><block_end>@admin.register(Article)<class_stmt>ArticleAdmin(UnPublishActionsMixin TogglePublishActionsMixin ChangeTitleActionsMixin ViewAction InlineActionsModelAdminMixin admin.ModelAdmin )<block_start>list_display=('title' 'status' 'author')<block_end> |
<import_from_stmt>social_core.actions do_auth do_complete do_disconnect<line_sep> |
<import_stmt>tensorflow<as>tf<import_from_stmt>parameterized parameterized<import_from_stmt>opennmt.data text<class_stmt>TextTest(tf.test.TestCase)<block_start><def_stmt>_testTokensToChars self tokens expected_chars<block_start>expected_chars=tf.nest.map_structure(tf.compat.as_bytes expected_chars)<line_sep>chars=text.tokens_to_chars(tf.constant(tokens dtype=tf.string))<line_sep>self.assertListEqual(chars.to_list() expected_chars)<block_end><def_stmt>testTokensToCharsEmpty self<block_start>self._testTokensToChars([] [])<block_end><def_stmt>testTokensToCharsSingle self<block_start>self._testTokensToChars(["Hello"] [["H" "e" "l" "l" "o"]])<block_end><def_stmt>testTokensToCharsMixed self<block_start>self._testTokensToChars(["Just" "a" "测试"] [["J" "u" "s" "t"] ["a"] ["测" "试"]])<block_end>@parameterized.expand([[["a■" "b" "c■" "d" "■e"] [["a■" "b"] ["c■" "d" "■e"]]] [["a" "■" "b" "c■" "d" "■" "e"] [["a" "■" "b"] ["c■" "d" "■" "e"]] ] ])<def_stmt>testToWordsWithJoiner self tokens expected<block_start>expected=tf.nest.map_structure(tf.compat.as_bytes expected)<line_sep>tokens=tf.constant(tokens)<line_sep>words=text.tokens_to_words(tokens)<line_sep>self.assertAllEqual(words.to_list() expected)<block_end>@parameterized.expand([[["▁a" "b" "▁c" "d" "e"] [["▁a" "b"] ["▁c" "d" "e"]]] [["▁" "a" "b" "▁" "c" "d" "e"] [["▁" "a" "b"] ["▁" "c" "d" "e"]] ] [["a▁" "b" "c▁" "d" "e"] [["a▁"] ["b" "c▁"] ["d" "e"]]] [["a" "▁b▁" "c" "d" "▁" "e"] [["a"] ["▁b▁"] ["c" "d"] ["▁" "e"]] ] ])<def_stmt>testToWordsWithSpacer self tokens expected<block_start>expected=tf.nest.map_structure(tf.compat.as_bytes expected)<line_sep>tokens=tf.constant(tokens)<line_sep>words=text.tokens_to_words(tokens subword_token="▁" is_spacer=<true>)<line_sep>self.assertAllEqual(words.to_list() expected)<block_end><def_stmt>_testPharaohAlignments self line lengths expected_matrix<block_start>matrix=text.alignment_matrix_from_pharaoh(tf.constant(line) lengths[0] lengths[1] dtype=tf.int32)<line_sep>self.assertListEqual(expected_matrix self.evaluate(matrix).tolist())<block_end><def_stmt>testPharaohAlignments self<block_start>self._testPharaohAlignments("" [0 0] [])<line_sep>self._testPharaohAlignments("0-0" [1 1] [[1]])<line_sep>self._testPharaohAlignments("0-0 1-1 2-2 3-3" [4 4] [[1 0 0 0] [0 1 0 0] [0 0 1 0] [0 0 0 1]] )<line_sep>self._testPharaohAlignments("0-0 1-1 2-3 3-2" [4 4] [[1 0 0 0] [0 1 0 0] [0 0 0 1] [0 0 1 0]] )<line_sep>self._testPharaohAlignments("0-0 1-2 1-1" [2 3] [[1 0] [0 1] [0 1]])<line_sep>self._testPharaohAlignments("0-0 1-2 1-1 2-4" [3 5] [[1 0 0] [0 1 0] [0 1 0] [0 0 0] [0 0 1]] )<block_end>@parameterized.expand([[<true>] [<false>]])<def_stmt>testInvalidPharaohAlignments self run_as_function<block_start>func=text.alignment_matrix_from_pharaoh<if_stmt>run_as_function<block_start>func=tf.function(func)<block_end><with_stmt>self.assertRaisesRegex(tf.errors.InvalidArgumentError "source")<block_start>func(tf.constant("0-0 1-1 2-3 3-2") 2 4)<block_end><with_stmt>self.assertRaisesRegex(tf.errors.InvalidArgumentError "target")<block_start>func(tf.constant("0-0 1-2 1-1 2-4") 3 4)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end> |
<import_stmt>copy<import_stmt>torch.nn<as>nn<import_stmt>dgl<import_from_stmt>modules MemoryModule MemoryOperation MsgLinkPredictor TemporalTransformerConv TimeEncode<class_stmt>TGN(nn.Module)<block_start><def_stmt>__init__ self edge_feat_dim memory_dim temporal_dim embedding_dim num_heads num_nodes n_neighbors=10 memory_updater_type='gru' layers=1<block_start>super(TGN self).__init__()<line_sep>self.memory_dim=memory_dim<line_sep>self.edge_feat_dim=edge_feat_dim<line_sep>self.temporal_dim=temporal_dim<line_sep>self.embedding_dim=embedding_dim<line_sep>self.num_heads=num_heads<line_sep>self.n_neighbors=n_neighbors<line_sep>self.memory_updater_type=memory_updater_type<line_sep>self.num_nodes=num_nodes<line_sep>self.layers=layers<line_sep>self.temporal_encoder=TimeEncode(self.temporal_dim)<line_sep>self.memory=MemoryModule(self.num_nodes self.memory_dim)<line_sep>self.memory_ops=MemoryOperation(self.memory_updater_type self.memory self.edge_feat_dim self.temporal_encoder)<line_sep>self.embedding_attn=TemporalTransformerConv(self.edge_feat_dim self.memory_dim self.temporal_encoder self.embedding_dim self.num_heads layers=self.layers allow_zero_in_degree=<true>)<line_sep>self.msg_linkpredictor=MsgLinkPredictor(embedding_dim)<block_end><def_stmt>embed self postive_graph negative_graph blocks<block_start>emb_graph=blocks[0]<line_sep>emb_memory=self.memory.memory[emb_graph.ndata[dgl.NID] :]<line_sep>emb_t=emb_graph.ndata['timestamp']<line_sep>embedding=self.embedding_attn(emb_graph emb_memory emb_t)<line_sep>emb2pred=dict(zip(emb_graph.ndata[dgl.NID].tolist() emb_graph.nodes().tolist()))<line_sep># Since postive graph and negative graph has same is mapping
feat_id=[emb2pred[int(n)]<for>n postive_graph.ndata[dgl.NID]]<line_sep>feat=embedding[feat_id]<line_sep>pred_pos,pred_neg=self.msg_linkpredictor(feat postive_graph negative_graph)<line_sep><return>pred_pos pred_neg<block_end><def_stmt>update_memory self subg<block_start>new_g=self.memory_ops(subg)<line_sep>self.memory.set_memory(new_g.ndata[dgl.NID] new_g.ndata['memory'])<line_sep>self.memory.set_last_update_t(new_g.ndata[dgl.NID] new_g.ndata['timestamp'])<block_end># Some memory operation wrappers
<def_stmt>detach_memory self<block_start>self.memory.detach_memory()<block_end><def_stmt>reset_memory self<block_start>self.memory.reset_memory()<block_end><def_stmt>store_memory self<block_start>memory_checkpoint={}<line_sep>memory_checkpoint['memory']=copy.deepcopy(self.memory.memory)<line_sep>memory_checkpoint['last_t']=copy.deepcopy(self.memory.last_update_t)<line_sep><return>memory_checkpoint<block_end><def_stmt>restore_memory self memory_checkpoint<block_start>self.memory.memory=memory_checkpoint['memory']<line_sep>self.memory.last_update_time=memory_checkpoint['last_t']<block_end><block_end> |
# Copyright 2017 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_stmt>pytest<import_from_stmt>f5.bigip.tm.asm.policies.signatures Signature<import_from_stmt>f5.sdk_exception UnsupportedOperation<import_from_stmt>requests.exceptions HTTPError<class_stmt>TestSignature(object)<block_start><def_stmt>test_create_raises self policy<block_start><with_stmt>pytest.raises(UnsupportedOperation)<block_start>policy.signatures_s.signature.create()<block_end><block_end><def_stmt>test_delete_raises self policy<block_start><with_stmt>pytest.raises(UnsupportedOperation)<block_start>policy.signatures_s.signature.delete()<block_end><block_end><def_stmt>test_refresh self policy<block_start>coll=policy.signatures_s.get_collection()<line_sep>hashid=str(coll[1].id)<line_sep>ws1=policy.signatures_s.signature.load(id=hashid)<line_sep>ws2=policy.signatures_s.signature.load(id=hashid)<assert_stmt>ws1.kind<eq>ws2.kind<assert_stmt>ws1.performStaging<eq>ws2.performStaging<line_sep>ws2.modify(performStaging=<false>)<assert_stmt>ws1.performStaging<is><true><assert_stmt>ws2.performStaging<is><false><line_sep>ws1.refresh()<assert_stmt>ws1.performStaging<is><false><block_end><def_stmt>test_load_no_object self policy<block_start><with_stmt>pytest.raises(HTTPError)<as>err<block_start>policy.signatures_s.signature.load(id='Lx3553-321')<block_end><assert_stmt>err.value.response.status_code<eq>404<block_end><def_stmt>test_load self policy<block_start>coll=policy.signatures_s.get_collection()<line_sep>hashid=str(coll[1].id)<line_sep>ws1=policy.signatures_s.signature.load(id=hashid)<assert_stmt>ws1.kind<eq>'tm:asm:policies:signatures:signaturestate'<assert_stmt>ws1.performStaging<is><true><line_sep>ws1.modify(performStaging=<false>)<assert_stmt>ws1.performStaging<is><false><line_sep>ws2=policy.signatures_s.signature.load(id=ws1.id)<assert_stmt>ws1.selfLink<eq>ws2.selfLink<assert_stmt>ws1.kind<eq>ws2.kind<assert_stmt>ws1.performStaging<eq>ws2.performStaging<block_end><def_stmt>test_signatures_subcollection self policy<block_start>coll=policy.signatures_s.get_collection()<assert_stmt>isinstance(coll list)<assert_stmt>len(coll)<assert_stmt>isinstance(coll[0] Signature)<block_end><block_end> |
# Validate input
<while_stmt><true><block_start>print('Enter your age:')<line_sep>age=input()<if_stmt>age.isdecimal()<block_start><break><block_end>print('Pleas enter a number for your age.')<block_end> |
<import_stmt>csv<import_stmt>sys<import_stmt>json<import_stmt>tests<try_stmt><block_start><import_from_stmt>cStringIO StringIO<block_end><except_stmt>ImportError<block_start><import_from_stmt>io StringIO<block_end><import_from_stmt>six with_metaclass<import_from_stmt>elex.cli.app ElexApp<import_from_stmt>collections OrderedDict<line_sep>DATA_FILE='tests/data/20151103_national.json'<line_sep>DATA_ELECTION_DATE='2015-11-03'<line_sep>DELSUM_DATA_FILE='tests/data/20160118_delsum.json'<line_sep>DELSUPER_DATA_FILE='tests/data/20160118_delsuper.json'<line_sep>ELECTIONS_DATA_FILE='tests/data/00000000_elections.json'<line_sep>DISTRICT_DATA_FILE='tests/data/20160201_district_results.json'<line_sep>TEST_COMMANDS=['races' 'candidates' 'reporting-units' 'candidate-reporting-units' 'results' ]<class_stmt>ElexCLICSVTestMeta(type)<block_start><def_stmt>__new__ mcs name bases dict<block_start><def_stmt>gen_fields_test command<block_start>"""
Dynamically generate a fields test
"""<def_stmt>test self<block_start>cli_fields,cli_data=self._test_command(command=command)<line_sep>api_data=getattr(self command.replace('-' '_'))<line_sep>api_fields=api_data[0].serialize().keys()<line_sep>self.assertEqual(cli_fields list(api_fields))<block_end><return>test<block_end><def_stmt>gen_length_test command<block_start>"""
Dynamically generate a data length test
"""<def_stmt>test self<block_start>cli_fields,cli_data=self._test_command(command=command)<line_sep>api_data=getattr(self command.replace('-' '_'))<line_sep>self.assertEqual(len(cli_data) len(api_data))<block_end><return>test<block_end><def_stmt>gen_data_test command<block_start>"""
Dynamically generate a data test
"""<def_stmt>test self<block_start>cli_fields,cli_data=self._test_command(command=command)<line_sep>api_data=getattr(self command.replace('-' '_'))<for_stmt>i,row enumerate(cli_data)<block_start><for_stmt>k,v api_data[i].serialize().items()<block_start><if_stmt>v<is><none><block_start>v=''<block_end>self.assertEqual(row[k] str(v))<block_end><block_end><block_end><return>test<block_end><def_stmt>gen_timestamp_test command<block_start>"""
Generate test to ensure timestamp field is set
"""<def_stmt>test self<block_start>cli_fields,cli_data=self._test_command(command=command with_timestamp=<true>)<line_sep>self.assertEqual(cli_fields[-1] 'timestamp')<block_end><return>test<block_end><def_stmt>gen_timestamp_data_test command<block_start>"""
Generate test to ensure timestamp field is set
"""<def_stmt>test self<block_start>cli_fields,cli_data=self._test_command(command=command with_timestamp=<true>)<for_stmt>row cli_data<block_start><try_stmt><block_start>self.assertTrue(unicode(row['timestamp']).isnumeric())<block_end><except_stmt>NameError<block_start>self.assertTrue(str(row['timestamp']).isnumeric())<block_end><block_end><block_end><return>test<block_end><def_stmt>gen_batch_name_data_test command<block_start>"""
Generate test to ensure timestamp field is set
"""<def_stmt>test self<block_start>cli_fields,cli_data=self._test_command(command=command batch_name='batch-01')<for_stmt>row cli_data<block_start>self.assertEqual(row['batchname'] 'batch-01')<block_end><block_end><return>test<block_end><for_stmt>command TEST_COMMANDS<block_start>fields_test_name='test_csv_{0}_fields'.format(command.replace('-' '_'))<line_sep>dict[fields_test_name]=gen_fields_test(command)<line_sep>length_test_name='test_csv_{0}_length'.format(command.replace('-' '_'))<line_sep>dict[length_test_name]=gen_length_test(command)<line_sep>data_test_name='test_csv_{0}_data'.format(command.replace('-' '_'))<line_sep>dict[data_test_name]=gen_data_test(command)<line_sep>timestamp_test_name='test_csv_{0}_timestamp'.format(command.replace('-' '_'))<line_sep>dict[timestamp_test_name]=gen_timestamp_test(command)<line_sep>timestamp_data_test_name='test_csv_{0}_timestamp_data'.format(command.replace('-' '_'))<line_sep>dict[timestamp_data_test_name]=gen_timestamp_data_test(command)<line_sep>batch_name_data_test_name='test_csv_{0}_batch_name_data'.format(command.replace('-' '_'))<line_sep>dict[batch_name_data_test_name]=gen_batch_name_data_test(command)<block_end><return>type.__new__(mcs name bases dict)<block_end><block_end><class_stmt>ElexCLICSVTestCase(with_metaclass(ElexCLICSVTestMeta tests.ElectionResultsTestCase))<block_start>"""
This testing class is mostly dynamically generated by its metaclass.
The goal of the CLI tests is to the make sure the CLI output matches the
Python API. The API tests guarantee the validity of the data, while these
tests guarantee the CLI provides the same data in CSV format.
"""<def_stmt>test_csv_elections_fields self<block_start>fields,data=self._test_command(command='elections' datafile=ELECTIONS_DATA_FILE)<line_sep>self.assertEqual(fields ['id' 'electiondate' 'liveresults' 'testresults'])<block_end><def_stmt>test_csv_elections_length self<block_start>fields,data=self._test_command(command='elections' datafile=ELECTIONS_DATA_FILE)<line_sep>self.assertEqual(len(data) 11)<block_end><def_stmt>test_csv_elections_date self<block_start>fields,data=self._test_command(command='elections' datafile=ELECTIONS_DATA_FILE)<line_sep>self.assertEqual(data[4]['electiondate'] '2015-08-04')<block_end><def_stmt>test_csv_elections_liveresults self<block_start>fields,data=self._test_command(command='elections' datafile=ELECTIONS_DATA_FILE)<line_sep>self.assertEqual(data[4]['liveresults'] 'False')<block_end><def_stmt>test_csv_elections_testresults self<block_start>fields,data=self._test_command(command='elections' datafile=ELECTIONS_DATA_FILE)<line_sep>self.assertEqual(data[4]['testresults'] 'True')<block_end><def_stmt>test_csv_next_election_fields self<block_start>fields,data=self._test_command(command='next-election' datafile=ELECTIONS_DATA_FILE electiondate='2015-08-04')<line_sep>self.assertEqual(fields ['id' 'electiondate' 'liveresults' 'testresults'])<block_end><def_stmt>test_csv_next_election_length self<block_start>fields,data=self._test_command(command='next-election' datafile=ELECTIONS_DATA_FILE electiondate='2015-08-04')<line_sep>self.assertEqual(len(data) 1)<block_end><def_stmt>test_csv_next_election_date self<block_start>fields,data=self._test_command(command='next-election' datafile=ELECTIONS_DATA_FILE electiondate='2015-08-04')<line_sep>self.assertEqual(data[0]['electiondate'] '2015-08-25')<block_end><def_stmt>test_csv_next_election_liveresults self<block_start>fields,data=self._test_command(command='next-election' datafile=ELECTIONS_DATA_FILE electiondate='2015-08-04')<line_sep>self.assertEqual(data[0]['liveresults'] 'True')<block_end><def_stmt>test_csv_next_election_testresults self<block_start>fields,data=self._test_command(command='next-election' datafile=ELECTIONS_DATA_FILE electiondate='2015-08-04')<line_sep>self.assertEqual(data[0]['testresults'] 'False')<block_end><def_stmt>test_csv_delegate_fields self<block_start>fields,data=self._test_command(command='delegates')<line_sep>self.assertEqual(fields ['level' 'party_total' 'superdelegates_count' 'last' 'state' 'candidateid' 'party_need' 'party' 'delegates_count' 'id' 'd1' 'd7' 'd30'])<block_end><def_stmt>test_csv_delegate_state_count self<block_start>fields,data=self._test_command(command='delegates')<line_sep>number_of_states=list(set([d['state']<for>d data<if>d['level']<eq>'state']))<line_sep>self.assertEqual(58 len(number_of_states))<block_end><def_stmt>test_csv_results_resultslevel self<block_start>fields,data=self._test_command(command='results' datafile=DISTRICT_DATA_FILE resultslevel='district')<line_sep>self.assertEqual(data[17]['reportingunitname'] 'District 1')<block_end><def_stmt>_test_command self command datafile=DATA_FILE delsum_datafile=DELSUM_DATA_FILE delsuper_datafile=DELSUPER_DATA_FILE electiondate=DATA_ELECTION_DATE resultslevel=<none> with_timestamp=<false> batch_name=<false><block_start>"""
Execute an `elex` sub-command; returns fieldnames and rows
"""<line_sep>stdout_backup=sys.stdout<line_sep>sys.stdout=StringIO()<line_sep>argv=[command]<if_stmt>electiondate<is><not><none><block_start>argv.append(electiondate)<block_end>argv=argv+['--data-file' datafile]<line_sep>argv=argv+['--delegate-sum-file' delsum_datafile]<line_sep>argv=argv+['--delegate-super-file' delsuper_datafile]<line_sep>argv=argv+['--results-level' resultslevel]<if_stmt>with_timestamp<block_start>argv=argv+['--with-timestamp']<block_end><if_stmt>batch_name<block_start>argv=argv+['--batch-name' batch_name]<block_end>app=ElexApp(argv=argv)<line_sep>app.setup()<line_sep>app.log.set_level('FATAL')<line_sep>app.run()<line_sep>lines=sys.stdout.getvalue().split('\n')<line_sep>reader=csv.DictReader(lines)<line_sep>sys.stdout.close()<line_sep>sys.stdout=stdout_backup<line_sep><return>reader.fieldnames list(reader)<block_end><block_end><class_stmt>ElexCLIJSONTestMeta(type)<block_start><def_stmt>__new__ mcs name bases dict<block_start><def_stmt>gen_fields_test command<block_start>"""
Dynamically generate a fields test
"""<def_stmt>test self<block_start>cli_fields,cli_data=self._test_command(command=command)<line_sep>api_data=getattr(self command.replace('-' '_'))<line_sep>api_fields=api_data[0].serialize().keys()<line_sep>self.assertEqual(cli_fields list(api_fields))<block_end><return>test<block_end><def_stmt>gen_length_test command<block_start>"""
Dynamically generate a data length test
"""<def_stmt>test self<block_start>cli_fields,cli_data=self._test_command(command=command)<line_sep>api_data=getattr(self command.replace('-' '_'))<line_sep>self.assertEqual(len(cli_data) len(api_data))<block_end><return>test<block_end><def_stmt>gen_data_test command<block_start>"""
Dynamically generate a data test
"""<def_stmt>test self<block_start>cli_fields,cli_data=self._test_command(command=command)<line_sep>api_data=getattr(self command.replace('-' '_'))<for_stmt>i,row enumerate(cli_data)<block_start><for_stmt>k,v api_data[i].serialize().items()<block_start>self.assertEqual(row[k] v)<block_end><block_end><block_end><return>test<block_end><def_stmt>gen_timestamp_test command<block_start>"""
Generate test to ensure timestamp field is set
"""<def_stmt>test self<block_start>cli_fields,cli_data=self._test_command(command=command with_timestamp=<true>)<line_sep>self.assertEqual(cli_fields[-1] 'timestamp')<block_end><return>test<block_end><def_stmt>gen_timestamp_data_test command<block_start>"""
Generate test to ensure timestamp data is an integer
"""<def_stmt>test self<block_start>cli_fields,cli_data=self._test_command(command=command with_timestamp=<true>)<for_stmt>row cli_data<block_start><try_stmt><block_start>self.assertTrue(unicode(row['timestamp']).isnumeric())<block_end><except_stmt>NameError<block_start>self.assertTrue(str(row['timestamp']).isnumeric())<block_end><block_end><block_end><return>test<block_end><def_stmt>gen_batch_name_data_test command<block_start>"""
Generate test to ensure timestamp field is set
"""<def_stmt>test self<block_start>cli_fields,cli_data=self._test_command(command=command batch_name='batch-01')<for_stmt>row cli_data<block_start>self.assertEqual(row['batchname'] 'batch-01')<block_end><block_end><return>test<block_end><for_stmt>command TEST_COMMANDS<block_start>fields_test_name='test_json_{0}_fields'.format(command.replace('-' '_'))<line_sep>dict[fields_test_name]=gen_fields_test(command)<line_sep>length_test_name='test_json_{0}_length'.format(command.replace('-' '_'))<line_sep>dict[length_test_name]=gen_length_test(command)<line_sep>data_test_name='test_json_{0}_data'.format(command.replace('-' '_'))<line_sep>dict[data_test_name]=gen_data_test(command)<line_sep>timestamp_data_test_name='test_json_{0}_data_timestamp'.format(command.replace('-' '_'))<line_sep>dict[timestamp_data_test_name]=gen_timestamp_test(command)<line_sep>timestamp_data_test_name='test_json_{0}_timestamp_data'.format(command.replace('-' '_'))<line_sep>dict[timestamp_data_test_name]=gen_timestamp_data_test(command)<line_sep>batch_name_data_test_name='test_csv_{0}_batch_name_data'.format(command.replace('-' '_'))<line_sep>dict[batch_name_data_test_name]=gen_batch_name_data_test(command)<block_end><return>type.__new__(mcs name bases dict)<block_end><block_end><class_stmt>ElexCLIJSONTestCase(with_metaclass(ElexCLIJSONTestMeta tests.ElectionResultsTestCase))<block_start>"""
This testing class is mostly dynamically generated by its metaclass.
The goal of the CLI tests is to the make sure the CLI output matches the
Python API. The API tests guarantee the validity of the data, while these
tests guarantee the CLI provides the same data in JSON format.
"""<def_stmt>test_json_elections_fields self<block_start>fields,data=self._test_command(command='elections' datafile=ELECTIONS_DATA_FILE)<line_sep>self.assertEqual(fields ['id' 'electiondate' 'liveresults' 'testresults'])<block_end><def_stmt>test_json_elections_length self<block_start>fields,data=self._test_command(command='elections' datafile=ELECTIONS_DATA_FILE)<line_sep>self.assertEqual(len(data) 11)<block_end><def_stmt>test_json_elections_date self<block_start>fields,data=self._test_command(command='elections' datafile=ELECTIONS_DATA_FILE)<line_sep>self.assertEqual(data[4]['electiondate'] '2015-08-04')<block_end><def_stmt>test_json_elections_liveresults self<block_start>fields,data=self._test_command(command='elections' datafile=ELECTIONS_DATA_FILE)<line_sep>self.assertEqual(data[4]['liveresults'] <false>)<block_end><def_stmt>test_json_elections_testresults self<block_start>fields,data=self._test_command(command='elections' datafile=ELECTIONS_DATA_FILE)<line_sep>self.assertEqual(data[4]['testresults'] <true>)<block_end><def_stmt>test_json_next_election_fields self<block_start>fields,data=self._test_command(command='next-election' datafile=ELECTIONS_DATA_FILE electiondate='2015-08-04')<line_sep>self.assertEqual(fields ['id' 'electiondate' 'liveresults' 'testresults'])<block_end><def_stmt>test_json_next_election_length self<block_start>fields,data=self._test_command(command='next-election' datafile=ELECTIONS_DATA_FILE electiondate='2015-08-04')<line_sep>self.assertEqual(len(data) 1)<block_end><def_stmt>test_json_next_election_date self<block_start>fields,data=self._test_command(command='next-election' datafile=ELECTIONS_DATA_FILE electiondate='2015-08-04')<line_sep>self.assertEqual(data[0]['electiondate'] '2015-08-25')<block_end><def_stmt>test_json_next_election_liveresults self<block_start>fields,data=self._test_command(command='next-election' datafile=ELECTIONS_DATA_FILE electiondate='2015-08-04')<line_sep>self.assertEqual(data[0]['liveresults'] <true>)<block_end><def_stmt>test_json_next_election_testresults self<block_start>fields,data=self._test_command(command='next-election' datafile=ELECTIONS_DATA_FILE electiondate='2015-08-04')<line_sep>self.assertEqual(data[0]['testresults'] <false>)<block_end><def_stmt>test_json_delegate_fields self<block_start>fields,data=self._test_command(command='delegates')<line_sep>self.assertEqual(fields ['level' 'party_total' 'superdelegates_count' 'last' 'state' 'candidateid' 'party_need' 'party' 'delegates_count' 'id' 'd1' 'd7' 'd30'])<block_end><def_stmt>test_json_delegate_state_count self<block_start>fields,data=self._test_command(command='delegates')<line_sep>number_of_states=list(set([d['state']<for>d data<if>d['level']<eq>'state']))<line_sep>self.assertEqual(58 len(number_of_states))<block_end><def_stmt>test_json_results_resultslevel self<block_start>fields,data=self._test_command(command='results' datafile=DISTRICT_DATA_FILE resultslevel='district')<line_sep>self.assertEqual(data[17]['reportingunitname'] 'District 1')<block_end><def_stmt>_test_command self command datafile=DATA_FILE delsum_datafile=DELSUM_DATA_FILE delsuper_datafile=DELSUPER_DATA_FILE electiondate=DATA_ELECTION_DATE resultslevel=<none> with_timestamp=<false> batch_name=<false><block_start>"""
Execute an `elex` sub-command; returns fieldnames and rows
"""<line_sep>stdout_backup=sys.stdout<line_sep>sys.stdout=StringIO()<line_sep>argv=[command]<line_sep>argv.append(electiondate)<line_sep>argv=argv+['--data-file' datafile '-o' 'json']<line_sep>argv=argv+['--delegate-sum-file' delsum_datafile]<line_sep>argv=argv+['--delegate-super-file' delsuper_datafile]<line_sep>argv=argv+['--results-level' resultslevel]<if_stmt>with_timestamp<block_start>argv=argv+['--with-timestamp']<block_end><if_stmt>batch_name<block_start>argv=argv+['--batch-name' batch_name]<block_end>app=ElexApp(argv=argv)<line_sep>app.setup()<line_sep>app.log.set_level('FATAL')<line_sep>app.run()<line_sep>json_data=sys.stdout.getvalue()<line_sep>data=json.loads(json_data object_pairs_hook=OrderedDict)<line_sep>sys.stdout.close()<line_sep>sys.stdout=stdout_backup<line_sep><return>list(data[0].keys()) data<block_end><block_end> |
# -*- coding: utf-8 -*-:
<import_from_stmt>django template<import_stmt>urllib<import_stmt>hashlib<line_sep>register=template.Library()<def_stmt>gravatar email size=80 username=<none><block_start>gravatar_url="http://www.gravatar.com/avatar.php?"<line_sep>gravatar_url<augadd>urllib.urlencode({'gravatar_id':hashlib.md5(email).hexdigest() 'size':str(size)})<if_stmt>username<is><not><none><block_start><return>"""<img src="%s" alt="gravatar for %s" />"""%(gravatar_url username)<block_end><else_stmt><block_start><return>"""<img src="%s" alt="gravatar" />"""%(gravatar_url)<block_end><block_end>register.simple_tag(gravatar)<line_sep> |
# coding: utf-8
<import_from_future_stmt> unicode_literals absolute_import<import_from_stmt>boxsdk.config API<def_stmt>test_get mock_box_session test_collaboration_allowlist_entry<block_start>entry_id=test_collaboration_allowlist_entry.object_id<line_sep>expected_url='{0}/collaboration_whitelist_entries/{1}'.format(API.BASE_API_URL entry_id)<line_sep>mock_entry={'type':'collaboration_whitelist_entry' 'id':'98765' 'domain':'example.com' 'direction':'inbound'}<line_sep>mock_box_session.get.return_value.json.return_value=mock_entry<line_sep>entry=test_collaboration_allowlist_entry.get()<line_sep>mock_box_session.get.assert_called_once_with(expected_url headers=<none> params=<none>)<assert_stmt>entry.id<eq>mock_entry['id']<assert_stmt>entry.domain<eq>mock_entry['domain']<assert_stmt>entry.direction<eq>mock_entry['direction']<block_end><def_stmt>test_delete mock_box_session test_collaboration_allowlist_entry<block_start>entry_id=test_collaboration_allowlist_entry.object_id<line_sep>expected_url='{0}/collaboration_whitelist_entries/{1}'.format(API.BASE_API_URL entry_id)<line_sep>test_collaboration_allowlist_entry.delete()<line_sep>mock_box_session.delete.assert_called_once_with(expected_url expect_json_response=<false> headers=<none> params={})<block_end> |
# Copyright 2021 The Brax Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""setup.py for Jumpy.
Install for development:
pip intall -e .
"""<import_from_stmt>setuptools setup<line_sep>setup(name="brax-jumpy" version="0.0.1" description=("Common backend for JAX or numpy.") author="Brax Authors" author_email="<EMAIL>" long_description=open("README.md").read() long_description_content_type="text/markdown" url="http://github.com/google/brax" license="Apache 2.0" py_modules=["jumpy"] install_requires=["jax" "jaxlib" "numpy" ] classifiers=["Development Status :: 4 - Beta" "Intended Audience :: Developers" "Intended Audience :: Science/Research" "License :: OSI Approved :: Apache Software License" "Programming Language :: Python" "Topic :: Scientific/Engineering :: Artificial Intelligence" ] )<line_sep> |
<import_from_stmt>mock.tests.base TestCase<import_from_stmt>django.test.client Client<import_from_stmt>django.contrib.auth.models User<import_from_stmt>django.core.urlresolvers reverse<import_from_stmt>django.template.defaultfilters slugify<import_from_stmt>knowledge settings<import_from_stmt>knowledge.models Question Response<import_from_stmt>knowledge.forms QuestionForm ResponseForm<class_stmt>BasicSettingsTest(TestCase)<block_start><def_stmt>test_ALLOW_ANONYMOUS self<block_start>self.assertFalse(settings.ALLOW_ANONYMOUS)<line_sep>self.assertEqual(<none> QuestionForm(self.anon))<line_sep>self.assertEqual(<none> ResponseForm(self.anon self.question))<line_sep>############# flip setting ##############
settings.ALLOW_ANONYMOUS=<not>settings.ALLOW_ANONYMOUS<line_sep>############# flip setting ##############
self.assertNotEqual(<none> QuestionForm(self.anon))<line_sep>self.assertNotEqual(<none> ResponseForm(self.anon self.question))<line_sep>form=QuestionForm(self.anon)<line_sep>self.assertNotIn('status' form.fields.keys())<line_sep># missing the name/email...
QUESTION_POST={'title':'This is a title friend!' 'body':'This is the body friend!'}<line_sep>form=QuestionForm(self.anon QUESTION_POST)<line_sep>self.assertFalse(form.is_valid())<line_sep>QUESTION_POST={'name':'<NAME>' 'email':'<EMAIL>' 'title':'This is a title friend!' 'body':'This is the body friend!'}<line_sep>form=QuestionForm(self.anon QUESTION_POST)<line_sep>self.assertTrue(form.is_valid())<line_sep>question=form.save()<line_sep># question has no user and is public by default
self.assertFalse(question.user)<line_sep>self.assertEquals(question.name '<NAME>')<line_sep>self.assertEquals(question.email '<EMAIL>')<line_sep>self.assertEquals(question.status 'public')<line_sep>############# flip setting ##############
settings.ALLOW_ANONYMOUS=<not>settings.ALLOW_ANONYMOUS<line_sep>############# flip setting ##############
<block_end><def_stmt>test_AUTO_PUBLICIZE self<block_start>self.assertFalse(settings.AUTO_PUBLICIZE)<line_sep>QUESTION_POST={'title':'This is a title friend!' 'body':'This is the body friend!' 'status':'private'}<line_sep>question=QuestionForm(self.joe QUESTION_POST).save()<line_sep>self.assertEquals(question.status 'private')<line_sep>############# flip setting ##############
settings.AUTO_PUBLICIZE=<not>settings.AUTO_PUBLICIZE<line_sep>############# flip setting ##############
question=QuestionForm(self.joe QUESTION_POST).save()<line_sep>self.assertEquals(question.status 'public')<line_sep>############# flip setting ##############
settings.AUTO_PUBLICIZE=<not>settings.AUTO_PUBLICIZE<line_sep>############# flip setting ##############
<block_end><def_stmt>test_FREE_RESPONSE self<block_start>self.assertTrue(settings.FREE_RESPONSE)<line_sep># joe authored the question, it is private so any user can respond...
self.assertFalse(ResponseForm(self.anon self.question))<line_sep>self.assertTrue(ResponseForm(self.bob self.question))<line_sep>self.assertTrue(ResponseForm(self.joe self.question))<line_sep>self.assertTrue(ResponseForm(self.admin self.question))<line_sep>############# flip setting ##############
settings.FREE_RESPONSE=<not>settings.FREE_RESPONSE<line_sep>############# flip setting ##############
# ...now bob can't respond!
self.assertFalse(ResponseForm(self.anon self.question))<line_sep>self.assertFalse(ResponseForm(self.bob self.question))<line_sep>self.assertTrue(ResponseForm(self.joe self.question))<line_sep>self.assertTrue(ResponseForm(self.admin self.question))<line_sep>############# flip setting ##############
settings.FREE_RESPONSE=<not>settings.FREE_RESPONSE<line_sep>############# flip setting ##############
<block_end><def_stmt>test_SLUG_URLS self<block_start>self.assertTrue(settings.SLUG_URLS)<line_sep>c=Client()<line_sep>self.question.public()<line_sep>question_url=reverse('knowledge_thread' args=[self.question.id slugify(self.question.title)])<line_sep>r=c.get(reverse('knowledge_thread' args=[self.question.id 'a-big-long-slug']))<line_sep>self.assertEquals(r.status_code 301)<line_sep>r=c.get(question_url)<line_sep>self.assertEquals(r.status_code 200)<line_sep>############# flip setting ##############
settings.SLUG_URLS=<not>settings.SLUG_URLS<line_sep>############# flip setting ##############
r=c.get(reverse('knowledge_thread' args=[self.question.id 'a-big-long-slug']))<line_sep>self.assertEquals(r.status_code 301)<line_sep>r=c.get(question_url)<line_sep>self.assertEquals(r.status_code 301)<line_sep>r=c.get(reverse('knowledge_thread_no_slug' args=[self.question.id]))<line_sep>self.assertEquals(r.status_code 200)<line_sep>############# flip setting ##############
settings.SLUG_URLS=<not>settings.SLUG_URLS<line_sep>############# flip setting ##############
<block_end><block_end> |
"""Constants for the Hardware integration."""<line_sep>DOMAIN="hardware"<line_sep> |
<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('cms' '0013_urlconfrevision') ('cmsplugin_cascade' '0008_sortableinlinecascadeelement') ]<line_sep>operations=[migrations.CreateModel(name='CascadePage' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('settings' models.JSONField(blank=<true> default={} help_text='User editable settings for this page.')) ('glossary' models.JSONField(blank=<true> default={} help_text='Store for arbitrary page data.')) ('extended_object' models.OneToOneField(editable=<false> on_delete=django.db.models.deletion.CASCADE to='cms.Page')) ('public_extension' models.OneToOneField(editable=<false> null=<true> on_delete=django.db.models.deletion.CASCADE related_name='draft_extension' to='cmsplugin_cascade.CascadePage')) ] options={'db_table':'cmsplugin_cascade_page' 'verbose_name':'Cascade Page Settings' 'verbose_name_plural':'Cascade Page Settings' } ) ]<block_end> |
<class_stmt>Solution<block_start><def_stmt>minSwapsCouples self row:List[int]<arrow>int<block_start>parent=[i<for>i range(len(row))]<for_stmt>i range(1 len(row) 2)<block_start>parent[i]<augsub>1<block_end><def_stmt>findpath u parent<block_start><if_stmt>parent[u]<ne>u<block_start>parent[u]=findpath(parent[u] parent)<block_end><return>parent[u]<block_end><for_stmt>i range(0 len(row) 2)<block_start>u_parent=findpath(row[i] parent)<line_sep>v_parent=findpath(row[i+1] parent)<line_sep>parent[u_parent]=v_parent<block_end><return>(len(row)<floordiv>2)-sum([1<for>i range(0 len(row) 2)<if>parent[i]<eq>parent[i+1]<eq>i])<block_end><block_end> |
<import_from_stmt>django.contrib.auth.models User<import_from_stmt>rest_framework permissions status<import_from_stmt>rest_framework.decorators api_view authentication_classes permission_classes throttle_classes <import_from_stmt>rest_framework.response Response<import_from_stmt>rest_framework_expiring_authtoken.authentication ExpiringTokenAuthentication <import_from_stmt>rest_framework.throttling UserRateThrottle<import_from_stmt>rest_framework_simplejwt.authentication JWTAuthentication<import_from_stmt>accounts.permissions HasVerifiedEmail<import_from_stmt>base.utils get_model_object team_paginated_queryset<import_from_stmt>.filters HostTeamsFilter<import_from_stmt>.models ChallengeHost ChallengeHostTeam<import_from_stmt>.serializers ChallengeHostSerializer ChallengeHostTeamSerializer InviteHostToTeamSerializer HostTeamDetailSerializer <import_from_stmt>.utils is_user_part_of_host_team<line_sep>get_challenge_host_model=get_model_object(ChallengeHost)<line_sep>@api_view(["GET" "POST"])@throttle_classes([UserRateThrottle])@permission_classes((permissions.IsAuthenticated HasVerifiedEmail))@authentication_classes((JWTAuthentication ExpiringTokenAuthentication ))<def_stmt>challenge_host_team_list request<block_start><if_stmt>request.method<eq>"GET"<block_start>challenge_host_team_ids=ChallengeHost.objects.filter(user=request.user).values_list("team_name" flat=<true>)<line_sep>challenge_host_teams=ChallengeHostTeam.objects.filter(id__in=challenge_host_team_ids).order_by("-id")<line_sep>filtered_teams=HostTeamsFilter(request.GET queryset=challenge_host_teams)<line_sep>paginator,result_page=team_paginated_queryset(filtered_teams.qs request)<line_sep>serializer=HostTeamDetailSerializer(result_page many=<true>)<line_sep>response_data=serializer.data<line_sep><return>paginator.get_paginated_response(response_data)<block_end><elif_stmt>request.method<eq>"POST"<block_start>serializer=ChallengeHostTeamSerializer(data=request.data context={"request":request})<if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep>response_data=serializer.data<line_sep><return>Response(response_data status=status.HTTP_201_CREATED)<block_end><return>Response(serializer.errors status=status.HTTP_400_BAD_REQUEST)<block_end><block_end>@api_view(["GET" "PUT" "PATCH"])@throttle_classes([UserRateThrottle])@permission_classes((permissions.IsAuthenticated HasVerifiedEmail))@authentication_classes((JWTAuthentication ExpiringTokenAuthentication))<def_stmt>challenge_host_team_detail request pk<block_start><try_stmt><block_start>challenge_host_team=ChallengeHostTeam.objects.get(pk=pk)<block_end><except_stmt>ChallengeHostTeam.DoesNotExist<block_start>response_data={"error":"ChallengeHostTeam does not exist"}<line_sep><return>Response(response_data status=status.HTTP_406_NOT_ACCEPTABLE)<block_end><if_stmt>request.method<eq>"GET"<block_start>serializer=HostTeamDetailSerializer(challenge_host_team)<line_sep>response_data=serializer.data<line_sep><return>Response(response_data status=status.HTTP_200_OK)<block_end><elif_stmt>request.method<in>["PUT" "PATCH"]<block_start><if_stmt>request.method<eq>"PATCH"<block_start>serializer=ChallengeHostTeamSerializer(challenge_host_team data=request.data context={"request":request} partial=<true> )<block_end><else_stmt><block_start>serializer=ChallengeHostTeamSerializer(challenge_host_team data=request.data context={"request":request} )<block_end><if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep>response_data=serializer.data<line_sep><return>Response(response_data status=status.HTTP_200_OK)<block_end><else_stmt><block_start><return>Response(serializer.errors status=status.HTTP_400_BAD_REQUEST)<block_end><block_end><block_end>@api_view(["GET" "POST"])@throttle_classes([UserRateThrottle])@permission_classes((permissions.IsAuthenticated HasVerifiedEmail))@authentication_classes((JWTAuthentication ExpiringTokenAuthentication))<def_stmt>challenge_host_list request challenge_host_team_pk<block_start><try_stmt><block_start>challenge_host_team=ChallengeHostTeam.objects.get(pk=challenge_host_team_pk)<block_end><except_stmt>ChallengeHostTeam.DoesNotExist<block_start>response_data={"error":"ChallengeHostTeam does not exist"}<line_sep><return>Response(response_data status=status.HTTP_406_NOT_ACCEPTABLE)<block_end><if_stmt>request.method<eq>"GET"<block_start>challenge_host_status=request.query_params.get("status" <none>)<line_sep>filter_condition={"team_name":challenge_host_team "user":request.user }<if_stmt>challenge_host_status<block_start>challenge_host_status=challenge_host_status.split(",")<line_sep>filter_condition.update({"status__in":challenge_host_status})<block_end>challenge_host=ChallengeHost.objects.filter(**filter_condition).order_by("-id")<line_sep>paginator,result_page=team_paginated_queryset(challenge_host request)<line_sep>serializer=ChallengeHostSerializer(result_page many=<true>)<line_sep>response_data=serializer.data<line_sep><return>paginator.get_paginated_response(response_data)<block_end><elif_stmt>request.method<eq>"POST"<block_start>serializer=ChallengeHostSerializer(data=request.data context={"challenge_host_team":challenge_host_team "request":request } )<if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep>response_data=serializer.data<line_sep><return>Response(response_data status=status.HTTP_201_CREATED)<block_end><return>Response(serializer.errors status=status.HTTP_400_BAD_REQUEST)<block_end><block_end>@api_view(["GET" "PUT" "PATCH" "DELETE"])@throttle_classes([UserRateThrottle])@permission_classes((permissions.IsAuthenticated HasVerifiedEmail))@authentication_classes((JWTAuthentication ExpiringTokenAuthentication))<def_stmt>challenge_host_detail request challenge_host_team_pk pk<block_start><try_stmt><block_start>challenge_host_team=ChallengeHostTeam.objects.get(pk=challenge_host_team_pk)<block_end><except_stmt>ChallengeHostTeam.DoesNotExist<block_start>response_data={"error":"ChallengeHostTeam does not exist"}<line_sep><return>Response(response_data status=status.HTTP_406_NOT_ACCEPTABLE)<block_end>challenge_host=get_challenge_host_model(pk)<if_stmt>request.method<eq>"GET"<block_start>serializer=ChallengeHostSerializer(challenge_host)<line_sep>response_data=serializer.data<line_sep><return>Response(response_data status=status.HTTP_200_OK)<block_end><elif_stmt>request.method<in>["PUT" "PATCH"]<block_start><if_stmt>request.method<eq>"PATCH"<block_start>serializer=ChallengeHostSerializer(challenge_host data=request.data context={"challenge_host_team":challenge_host_team "request":request } partial=<true> )<block_end><else_stmt><block_start>serializer=ChallengeHostSerializer(challenge_host data=request.data context={"challenge_host_team":challenge_host_team "request":request } )<block_end><if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep>response_data=serializer.data<line_sep><return>Response(response_data status=status.HTTP_200_OK)<block_end><else_stmt><block_start><return>Response(serializer.errors status=status.HTTP_400_BAD_REQUEST)<block_end><block_end><elif_stmt>request.method<eq>"DELETE"<block_start>challenge_host.delete()<line_sep><return>Response(status=status.HTTP_204_NO_CONTENT)<block_end><block_end>@api_view(["POST"])@throttle_classes([UserRateThrottle])@permission_classes((permissions.IsAuthenticated HasVerifiedEmail))@authentication_classes((JWTAuthentication ExpiringTokenAuthentication))<def_stmt>create_challenge_host_team request<block_start>serializer=ChallengeHostTeamSerializer(data=request.data context={"request":request})<if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep>response_data=serializer.data<line_sep>challenge_host_team=serializer.instance<line_sep>challenge_host=ChallengeHost(user=request.user status=ChallengeHost.SELF permissions=ChallengeHost.ADMIN team_name=challenge_host_team )<line_sep>challenge_host.save()<line_sep><return>Response(response_data status=status.HTTP_201_CREATED)<block_end><return>Response(serializer.errors status=status.HTTP_400_BAD_REQUEST)<block_end>@api_view(["DELETE"])@throttle_classes([UserRateThrottle])@permission_classes((permissions.IsAuthenticated HasVerifiedEmail))@authentication_classes((JWTAuthentication ExpiringTokenAuthentication))<def_stmt>remove_self_from_challenge_host_team request challenge_host_team_pk<block_start>"""
A user can remove himself from the challenge host team.
"""<try_stmt><block_start>ChallengeHostTeam.objects.get(pk=challenge_host_team_pk)<block_end><except_stmt>ChallengeHostTeam.DoesNotExist<block_start>response_data={"error":"ChallengeHostTeam does not exist"}<line_sep><return>Response(response_data status=status.HTTP_406_NOT_ACCEPTABLE)<block_end><try_stmt><block_start>challenge_host=ChallengeHost.objects.filter(user=request.user.id team_name__pk=challenge_host_team_pk)<line_sep>challenge_host.delete()<line_sep><return>Response(status=status.HTTP_204_NO_CONTENT)<block_end><except_stmt># noqa E722
<block_start>response_data={"error":"Sorry, you do not belong to this team."}<line_sep><return>Response(response_data status=status.HTTP_401_UNAUTHORIZED)<block_end><block_end>@api_view(["POST"])@throttle_classes([UserRateThrottle])@permission_classes((permissions.IsAuthenticated HasVerifiedEmail))@authentication_classes((JWTAuthentication ExpiringTokenAuthentication))<def_stmt>invite_host_to_team request pk<block_start><try_stmt><block_start>challenge_host_team=ChallengeHostTeam.objects.get(pk=pk)<block_end><except_stmt>ChallengeHostTeam.DoesNotExist<block_start>response_data={"error":"Host Team does not exist"}<line_sep><return>Response(response_data status=status.HTTP_406_NOT_ACCEPTABLE)<block_end>email=request.data.get("email")<try_stmt><block_start>user=User.objects.get(email=email)<block_end><except_stmt>User.DoesNotExist<block_start>response_data={"error":"User does not exist with this email address!"}<line_sep><return>Response(response_data status=status.HTTP_406_NOT_ACCEPTABLE)<block_end># Check if the user requesting this API is part of host team
<if_stmt><not>is_user_part_of_host_team(request.user challenge_host_team)<block_start>response_data={"error":"You are not a member of this team!"}<line_sep><return>Response(response_data status=status.HTTP_400_BAD_REQUEST)<block_end>host=ChallengeHost.objects.filter(team_name=challenge_host_team user=user)<if_stmt>host.exists()<block_start>response_data={"error":"User is already part of the team!"}<line_sep><return>Response(response_data status=status.HTTP_406_NOT_ACCEPTABLE)<block_end>serializer=InviteHostToTeamSerializer(data=request.data context={"challenge_host_team":challenge_host_team "request":request } )<if_stmt>serializer.is_valid()<block_start>serializer.save()<line_sep>response_data={"message":"User has been added successfully to the host team"}<line_sep><return>Response(response_data status=status.HTTP_202_ACCEPTED)<block_end><return>Response(serializer.errors status=status.HTTP_400_BAD_REQUEST)<block_end> |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
<import_from_stmt>typing Union<def_stmt>separate_by_count total_length:int part_count:int<arrow>(int int)<block_start>"""
Calculates parts needed to separate count by part_count value
For example: separate_by_count(total_length=196582, part_count=10000) returns (19, 6582) -> 19*10000 + 6582=196582
:param total_length:
:param part_count:
:return: Returns the total_parts and last part count
"""<line_sep>total_parts=total_length<floordiv>part_count<line_sep>last_part=total_length-(part_count<times>total_parts)<line_sep><return>total_parts last_part<block_end><def_stmt>separate_items_by_count item_list:Union[list tuple] part_count:int<arrow>list<block_start><if_stmt><not>item_list<block_start><return>[]<block_end>total_parts,_=separate_by_count(len(item_list) part_count)<line_sep>result_list=[]<for_stmt>i range(total_parts)<block_start>result_list.append(item_list[part_count<times>i:part_count<times>(i+1)])<block_end><if_stmt>len(item_list)%part_count<ne>0<block_start>result_list.append(item_list[total_parts<times>part_count:])<block_end><return>result_list<block_end> |
<import_stmt>subprocess os<line_sep>win_binary_path='UE4Binaries/{project_name}/WindowsNoEditor/{project_name}.exe'<line_sep>linux_binary_path='./UE4Binaries/{project_name}/LinuxNoEditor/{project_name}/Binaries/Linux/{project_name}'<line_sep>mac_binary_path='./UE4Binaries/{project_name}/MacNoEditor/{project_name}.app'<line_sep>project_names=['RealisticRendering' 'ArchinteriorsVol2Scene1' 'ArchinteriorsVol2Scene2' 'ArchinteriorsVol2Scene3' 'UrbanCity' 'Matinee' 'PhotorealisticCharacter']<line_sep>binaries=[]<line_sep>binaries<augadd>[linux_binary_path.format(project_name=v)<for>v project_names]<line_sep>binaries<augadd>[win_binary_path.format(project_name=v)<for>v project_names]<line_sep>binaries<augadd>[mac_binary_path.format(project_name=v)<for>v project_names]<if_stmt>__name__<eq>'__main__'<block_start><if_stmt><not>os.path.isdir('output')<block_start>os.mkdir('output')<block_end><for_stmt>binary_path binaries<block_start>project_name=os.path.basename(binary_path).split('.')[0]<line_sep>output_folder=os.path.join('output' project_name)<if_stmt><not>os.path.isfile(binary_path)<and><not>os.path.isdir(binary_path)<block_start>print('Can not find binary "%s", skip'%binary_path)<line_sep><continue><block_end>print('Testing %s ..., output will be saved to "%s"'%(binary_path output_folder))<line_sep>subprocess.call(['python' 'examples/commands_demo.py' binary_path '--output' output_folder])<block_end><block_end> |
<import_stmt>iota_client<line_sep>client=iota_client.Client()<line_sep>print(client.get_output("a22cba0667c922cbb1f8bdcaf970b2a881ccd6e88e2fcce50374de2aac7c37720000"))<line_sep> |
# -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=invalid-name,no-member
"""Migrate some legacy process attributes.
Attribute keys that are renamed:
* `_sealed` -> `sealed`
Attribute keys that are removed entirely:
* `_finished`
* `_failed`
* `_aborted`
* `_do_abort`
Finally, after these first migrations, any remaining process nodes that still do not have a sealed attribute and have
it set to `True`. Excluding the nodes that have a `process_state` attribute of one of the active states `created`,
running` or `waiting`, because those are actual valid active processes that are not yet sealed.
This is identical to migration e734dd5e50d7
Revision ID: django_0040
Revises: django_0039
"""<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<line_sep>revision='django_0040'<line_sep>down_revision='django_0039'<line_sep>branch_labels=<none><line_sep>depends_on=<none><def_stmt>upgrade <block_start>"""Migrations for the upgrade."""<line_sep>conn=op.get_bind()<line_sep>statement=sa.text("""
UPDATE db_dbnode
SET attributes = jsonb_set(attributes, '{"sealed"}', attributes->'_sealed')
WHERE attributes ? '_sealed' AND node_type LIKE 'process.%';
-- Copy `_sealed` -> `sealed`
UPDATE db_dbnode SET attributes = attributes - '_sealed'
WHERE attributes ? '_sealed' AND node_type LIKE 'process.%';
-- Delete `_sealed`
UPDATE db_dbnode SET attributes = attributes - '_finished'
WHERE attributes ? '_finished' AND node_type LIKE 'process.%';
-- Delete `_finished`
UPDATE db_dbnode SET attributes = attributes - '_failed'
WHERE attributes ? '_failed' AND node_type LIKE 'process.%';
-- Delete `_failed`
UPDATE db_dbnode SET attributes = attributes - '_aborted'
WHERE attributes ? '_aborted' AND node_type LIKE 'process.%';
-- Delete `_aborted`
UPDATE db_dbnode SET attributes = attributes - '_do_abort'
WHERE attributes ? '_do_abort' AND node_type LIKE 'process.%';
-- Delete `_do_abort`
UPDATE db_dbnode
SET attributes = jsonb_set(attributes, '{"sealed"}', to_jsonb(True))
WHERE
node_type LIKE 'process.%' AND
NOT (attributes ? 'sealed') AND
attributes->>'process_state' NOT IN ('created', 'running', 'waiting');
-- Set `sealed=True` for process nodes that do not yet have a `sealed` attribute AND are not in an active state
""")<line_sep>conn.execute(statement)<block_end><def_stmt>downgrade <block_start>"""Migrations for the downgrade."""<line_sep><raise>NotImplementedError('Downgrade of django_0040.')<block_end> |
# stub to allow changing the map without having to alter gta_model.sc
<import_stmt>os<line_sep>mapPath='map.npz'<def_stmt>setLocalMap module relpath<block_start><global>mapPath<line_sep>base=os.path.dirname(module)<line_sep>mapPath=os.path.join(base relpath)<block_end> |
<import_from_stmt>django.test TestCase<import_from_stmt>django.utils timezone<import_from_stmt>.models Episode<class_stmt>PodCastsTests(TestCase)<block_start><def_stmt>setUp self<block_start>self.episode=Episode.objects.create(title="My Awesome Podcast Episode" description="Look mom, I made it!" pub_date=timezone.now() link="https://myawesomeshow.com" image="https://image.myawesomeshow.com" podcast_name="My Python Podcast" guid="de194720-7b4c-49e2-a05f-432436d3fetr" )<block_end><def_stmt>test_episode_content self<block_start>self.assertEqual(self.episode.description "Look mom, I made it!")<line_sep>self.assertEqual(self.episode.link "https://myawesomeshow.com")<line_sep>self.assertEqual(self.episode.guid "de194720-7b4c-49e2-a05f-432436d3fetr")<block_end><def_stmt>test_episode_str_representation self<block_start>self.assertEqual(str(self.episode) "My Python Podcast: My Awesome Podcast Episode")<block_end><block_end> |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""add granularity to charts where missing
Revision ID: <KEY>
Revises: <PASSWORD>
Create Date: 2021-02-04 09:34:13.608891
"""<line_sep># revision identifiers, used by Alembic.
revision="<KEY>"<line_sep>down_revision="4<PASSWORD>"<import_stmt>json<import_from_stmt>alembic op<import_from_stmt>sqlalchemy and_ Boolean Column Integer String Text<import_from_stmt>sqlalchemy.ext.declarative declarative_base<import_from_stmt>superset db<line_sep>Base=declarative_base()<class_stmt>Slice(Base)<block_start>__tablename__="slices"<line_sep>id=Column(Integer primary_key=<true>)<line_sep>params=Column(Text)<line_sep>datasource_id=Column(Integer)<line_sep>datasource_type=Column(String(200))<block_end><class_stmt>SqlaTable(Base)<block_start>__tablename__="tables"<line_sep>id=Column(Integer primary_key=<true>)<line_sep>main_dttm_col=Column(String(250))<block_end><class_stmt>TableColumn(Base)<block_start>__tablename__="table_columns"<line_sep>id=Column(Integer primary_key=<true>)<line_sep>table_id=Column(Integer)<line_sep>is_dttm=Column(Boolean)<line_sep>column_name=Column(String(255))<block_end><def_stmt>upgrade <block_start>"""
Adds the granularity param to charts without it populated. This is required for
time range filtering to work properly. Uses the following approach:
- Find all charts without a granularity or granularity_sqla param.
- Get the dataset that backs the chart.
- If the dataset has the main dttm column set, use it.
- Otherwise, find all the dttm columns in the dataset and use the first one (this
matches the behavior of Explore view on the frontend)
- If no dttm columns exist in the dataset, don't change the chart.
"""<line_sep>bind=op.get_bind()<line_sep>session=db.Session(bind=bind)<line_sep>slices_changed=0<for_stmt>slc (session.query(Slice).filter(and_(Slice.datasource_type<eq>"table" Slice.params.notlike('%"granularity%'))).all())<block_start><try_stmt><block_start>params=json.loads(slc.params)<if_stmt>"granularity"<in>params<or>"granularity_sqla"<in>params<block_start><continue><block_end>table=session.query(SqlaTable).get(slc.datasource_id)<if_stmt><not>table<block_start><continue><block_end><if_stmt>table.main_dttm_col<block_start>params["granularity"]=table.main_dttm_col<line_sep>slc.params=json.dumps(params sort_keys=<true>)<line_sep>print(f"Set granularity for slice {slc.id} to {table.main_dttm_col}")<line_sep>slices_changed<augadd>1<line_sep><continue><block_end>table_columns=(session.query(TableColumn).filter(TableColumn.table_id<eq>table.id).filter(TableColumn.is_dttm<eq><true>).all())<if_stmt>len(table_columns)<block_start>params["granularity"]=table_columns[0].column_name<line_sep>slc.params=json.dumps(params sort_keys=<true>)<line_sep>print(f"Set granularity for slice {slc.id} to {table_columns[0].column_name}")<line_sep>slices_changed<augadd>1<block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep>print(f"Parsing params for slice {slc.id} failed.")<line_sep><pass><block_end><block_end>print(f"{slices_changed} slices altered")<line_sep>session.commit()<line_sep>session.close()<block_end><def_stmt>downgrade <block_start>"""
It's impossible to downgrade this migration.
"""<line_sep><pass><block_end> |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>matplotlib.pyplot<as>plt<import_stmt>pandas<as>pd<import_from_stmt>qf_lib.common.enums.orientation Orientation<import_from_stmt>qf_lib.plotting.charts.bar_chart BarChart<import_from_stmt>qf_lib.plotting.decorators.data_element_decorator DataElementDecorator<import_from_stmt>qf_lib.plotting.helpers.index_translator IndexTranslator<line_sep>index=['constant' 'b' 'c' 'd']<line_sep># index = [0, 4, 5, 6]
labels_to_locations_dict={'constant':0 'b':4 'c':5 'd':6}<line_sep>colors=['orange']+['forestgreen']<times>3<def_stmt>main # using automatic mapping between labels and locations
<block_start>bar_chart2=BarChart(orientation=Orientation.Horizontal index_translator=IndexTranslator() thickness=1.0 color=colors align='center')<line_sep>bar_chart2.add_decorator(DataElementDecorator(pd.Series(data=[1 2 3 4] index=index)))<line_sep>bar_chart2.add_decorator(DataElementDecorator(pd.Series(data=[3 1 2 4] index=index)))<line_sep>bar_chart2.plot()<line_sep># using custom mapping between labels and locations
bar_chart=BarChart(orientation=Orientation.Horizontal index_translator=IndexTranslator(labels_to_locations_dict) thickness=1.0 color=colors align='center')<line_sep>bar_chart.add_decorator(DataElementDecorator(pd.Series(data=[1 2 3 4] index=index)))<line_sep>bar_chart.add_decorator(DataElementDecorator(pd.Series(data=[3 1 2 4] index=index)))<line_sep>bar_chart.plot()<line_sep>plt.show(block=<true>)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
<import_stmt>logging<import_from_stmt>http cookiejar<as>http_cookiejar<import_from_stmt>http.cookiejar http2time# type: ignore
<import_from_stmt>typing Any# noqa
<import_from_stmt>typing Dict# noqa
<import_from_stmt>urllib.parse parse_qs<import_from_stmt>urllib.parse urlsplit<import_from_stmt>urllib.parse urlunsplit<import_from_stmt>oic.exception UnSupported<import_from_stmt>oic.oauth2.exception TimeFormatError<import_from_stmt>oic.utils.sanitize sanitize<line_sep>logger=logging.getLogger(__name__)<line_sep>__author__="roland"<line_sep>URL_ENCODED="application/x-www-form-urlencoded"<line_sep>JSON_ENCODED="application/json"<line_sep>DEFAULT_POST_CONTENT_TYPE=URL_ENCODED<line_sep>PAIRS={"port":"port_specified" "domain":"domain_specified" "path":"path_specified" }<line_sep>ATTRS={"version":<none> "name":"" "value":<none> "port":<none> "port_specified":<false> "domain":"" "domain_specified":<false> "domain_initial_dot":<false> "path":"" "path_specified":<false> "secure":<false> "expires":<none> "discard":<true> "comment":<none> "comment_url":<none> "rest":"" "rfc2109":<true> }<line_sep># type: Dict[str, Any]
<def_stmt>get_or_post uri method req content_type=DEFAULT_POST_CONTENT_TYPE accept=<none> **kwargs<block_start>"""
Construct HTTP request.
:param uri:
:param method:
:param req:
:param content_type:
:param accept:
:param kwargs:
:return:
"""<if_stmt>method<in>["GET" "DELETE"]<block_start><if_stmt>req.keys()<block_start>_req=req.copy()<line_sep>comp=urlsplit(str(uri))<if_stmt>comp.query<block_start>_req.update(parse_qs(comp.query))<block_end>_query=str(_req.to_urlencoded())<line_sep>path=urlunsplit((comp.scheme comp.netloc comp.path _query comp.fragment))<block_end><else_stmt><block_start>path=uri<block_end>body=<none><block_end><elif_stmt>method<in>["POST" "PUT"]<block_start>path=uri<if_stmt>content_type<eq>URL_ENCODED<block_start>body=req.to_urlencoded()<block_end><elif_stmt>content_type<eq>JSON_ENCODED<block_start>body=req.to_json()<block_end><else_stmt><block_start><raise>UnSupported("Unsupported content type: '%s'"%content_type)<block_end>header_ext={"Content-Type":content_type}<if_stmt>accept<block_start>header_ext={"Accept":accept}<block_end><if_stmt>"headers"<in>kwargs.keys()<block_start>kwargs["headers"].update(header_ext)<block_end><else_stmt><block_start>kwargs["headers"]=header_ext<block_end><block_end><else_stmt><block_start><raise>UnSupported("Unsupported HTTP method: '%s'"%method)<block_end><return>path body kwargs<block_end><def_stmt>set_cookie cookiejar kaka<block_start>"""
Place a cookie (a http_cookielib.Cookie based on a set-cookie header line) in the cookie jar.
Always chose the shortest expires time.
:param cookiejar:
:param kaka: Cookie
"""<line_sep># default rfc2109=False
# max-age, httponly
<for_stmt>cookie_name,morsel kaka.items()<block_start>std_attr=ATTRS.copy()<line_sep>std_attr["name"]=cookie_name<line_sep>_tmp=morsel.coded_value<if_stmt>_tmp.startswith('"')<and>_tmp.endswith('"')<block_start>std_attr["value"]=_tmp[1:-1]<block_end><else_stmt><block_start>std_attr["value"]=_tmp<block_end>std_attr["version"]=0<line_sep>attr=""<line_sep># copy attributes that have values
<try_stmt><block_start><for_stmt>attr morsel.keys()<block_start><if_stmt>attr<in>ATTRS<block_start><if_stmt>morsel[attr]<block_start><if_stmt>attr<eq>"expires"<block_start>std_attr[attr]=http2time(morsel[attr])<block_end><else_stmt><block_start>std_attr[attr]=morsel[attr]<block_end><block_end><block_end><elif_stmt>attr<eq>"max-age"<block_start><if_stmt>morsel[attr]<block_start>std_attr["expires"]=http2time(morsel[attr])<block_end><block_end><block_end><block_end><except_stmt>TimeFormatError# Ignore cookie
<block_start>logger.info("Time format error on %s parameter in received cookie"%(sanitize(attr) ))<line_sep><continue><block_end><for_stmt>att,spec PAIRS.items()<block_start><if_stmt>std_attr[att]<block_start>std_attr[spec]=<true><block_end><block_end><if_stmt>std_attr["domain"]<and>std_attr["domain"].startswith(".")<block_start>std_attr["domain_initial_dot"]=<true><block_end><if_stmt>morsel["max-age"]<eq>0<block_start><try_stmt><block_start>cookiejar.clear(domain=std_attr["domain"] path=std_attr["path"] name=std_attr["name"] )<block_end><except_stmt>ValueError<block_start><pass><block_end><block_end><else_stmt># Fix for Microsoft cookie error
<block_start><if_stmt>"version"<in>std_attr<block_start><try_stmt><block_start>std_attr["version"]=std_attr["version"].split(",")[0]<block_end><except_stmt>(TypeError AttributeError)<block_start><pass><block_end><block_end>new_cookie=http_cookiejar.Cookie(**std_attr)# type: ignore
cookiejar.set_cookie(new_cookie)<block_end><block_end><block_end><def_stmt>match_to_ val vlist<block_start><if_stmt>isinstance(vlist str)<block_start><if_stmt>vlist.startswith(val)<block_start><return><true><block_end><block_end><else_stmt><block_start><for_stmt>v vlist<block_start><if_stmt>v.startswith(val)<block_start><return><true><block_end><block_end><block_end><return><false><block_end><def_stmt>verify_header reqresp body_type<block_start>logger.debug("resp.headers: %s"%(sanitize(reqresp.headers) ))<line_sep>logger.debug("resp.txt: %s"%(sanitize(reqresp.text) ))<if_stmt>body_type<eq>""<block_start>_ctype=reqresp.headers["content-type"]<if_stmt>match_to_("application/json" _ctype)<block_start>body_type="json"<block_end><elif_stmt>match_to_("application/jwt" _ctype)<block_start>body_type="jwt"<block_end><elif_stmt>match_to_(URL_ENCODED _ctype)<block_start>body_type="urlencoded"<block_end><else_stmt><block_start>body_type="txt"# reasonable default ??
<block_end><block_end><elif_stmt>body_type<eq>"json"<block_start><if_stmt><not>match_to_("application/json" reqresp.headers["content-type"])<block_start><if_stmt>match_to_("application/jwt" reqresp.headers["content-type"])<block_start>body_type="jwt"<block_end><else_stmt><block_start><raise>ValueError("content-type: %s"%(reqresp.headers["content-type"] ))<block_end><block_end><block_end><elif_stmt>body_type<eq>"jwt"<block_start><if_stmt><not>match_to_("application/jwt" reqresp.headers["content-type"])<block_start><raise>ValueError("Wrong content-type in header, got: {} expected "<concat>"'application/jwt'".format(reqresp.headers["content-type"]))<block_end><block_end><elif_stmt>body_type<eq>"urlencoded"<block_start><if_stmt><not>match_to_(DEFAULT_POST_CONTENT_TYPE reqresp.headers["content-type"])<block_start><if_stmt><not>match_to_("text/plain" reqresp.headers["content-type"])<block_start><raise>ValueError("Wrong content-type")<block_end><block_end><block_end><else_stmt><block_start><raise>ValueError("Unknown return format: %s"%body_type)<block_end><return>body_type<block_end> |
<import_stmt>decimal<import_stmt>hashlib<import_stmt>json<import_stmt>requests<import_stmt>tempfile<import_stmt>uuid<import_stmt>os<import_from_stmt>tqdm tqdm<import_from_stmt>requests_toolbelt MultipartEncoder MultipartEncoderMonitor<def_stmt>sha256_for_file f buf_size=65536<block_start>pos=f.tell()<line_sep>dgst=hashlib.sha256()<while_stmt><true><block_start>data=f.read(buf_size)<if_stmt><not>data<block_start><break><block_end>dgst.update(data)<block_end>size=f.tell()-pos<line_sep>f.seek(pos)<line_sep><return>size dgst.hexdigest()<block_end>namespace="default"<line_sep>fission_url=os.environ["FISSION_URL"]<def_stmt>post rel_url data<block_start>response=requests.post("%s%s"%(fission_url rel_url) data=json.dumps(data) headers={"Content-Type":"application/json"})<line_sep># print("POST", rel_url)
# print(response, response.text)
<if_stmt>response.status_code<in>[404 409]<block_start><return>response.status_code <none><block_end><if_stmt>response.status_code<eq>500<block_start><raise>Exception(response.text)<block_end><return>response.status_code response.json()<block_end><def_stmt>get rel_url params=<none><block_start>response=requests.get("%s%s"%(fission_url rel_url) params=params)<if_stmt>response.status_code<eq>404<block_start><return>response.status_code <none><block_end><if_stmt>response.status_code<eq>500<block_start><raise>Exception(response.text)<block_end><return>response.status_code response.json()<block_end><def_stmt>format_bytes count<block_start>label_ix=0<line_sep>labels=["B" "KiB" "MiB" "GiB"]<while_stmt>label_ix<l>len(labels)<and>count/1024.<g>1<block_start>count=count/1024.<line_sep>label_ix<augadd>1<block_end>count=decimal.Decimal(count)<line_sep>count=count.to_integral()<if>count<eq>count.to_integral()<else>round(count.normalize() 2)<line_sep><return>"%s %s"%(count labels[label_ix])<block_end><def_stmt>lazily_define_package environment file<block_start>filesize,archive_sha256=sha256_for_file(file)<line_sep>base_archive_url="%s/proxy/storage/v1/archive"%fission_url<line_sep>status_code,response=get("/v2/packages/%s"%archive_sha256)<if_stmt>status_code<eq>200<block_start>print("Already uploaded" flush=<true>)<line_sep><return>archive_sha256 response<block_end>progress=tqdm(total=filesize desc="Uploading" unit="B" unit_scale=<true> unit_divisor=1024 leave=<true>)<line_sep>last_bytes_read=0<def_stmt>update_progress monitor# Your callback function
<block_start><nonlocal>last_bytes_read<line_sep>progress.update(monitor.bytes_read-last_bytes_read)<line_sep>last_bytes_read=monitor.bytes_read<block_end>e=MultipartEncoder(fields={'uploadfile':('uploaded' file 'text/plain')})<line_sep>m=MultipartEncoderMonitor(e update_progress)<line_sep>archive_response=requests.post(base_archive_url data=m headers={"X-File-Size":str(filesize) 'Content-Type':m.content_type})<line_sep>archive_id=archive_response.json()['id']<line_sep>print(" done" flush=<true>)<line_sep>archive_url="%s?id=%s"%(base_archive_url archive_id)<line_sep>package={"metadata":{"name":archive_sha256 "namespace":namespace } "spec":{"environment":environment "deployment":{"type":"url" "url":archive_url "checksum":{"type":"sha256" "sum":archive_sha256 } } } "status":{"buildstatus":"succeeded" } }<line_sep><return>archive_sha256 post("/v2/packages" package)[1]<block_end><def_stmt>lazily_define_function environment f<block_start>archive_sha256,package_ref=lazily_define_package(environment f)<line_sep>print("Registering ..." end='' flush=<true>)<line_sep>function_name=archive_sha256[:8]<line_sep>status_code,response=get("/v2/functions/%s"%function_name)<if_stmt>status_code<eq>200<block_start><return>function_name<block_end>status_code,r=post("/v2/functions" {"metadata":{"name":function_name "namespace":namespace } "spec":{"environment":environment "package":{"functionName":function_name "packageref":package_ref } } })<if_stmt>status_code<eq>409<or>status_code<eq>201<block_start>print(" done" flush=<true>)<line_sep><return>function_name<block_end>print(" error" flush=<true>)<line_sep><raise>Exception(r.text)<block_end><def_stmt>lazily_define_trigger2 function_name http_method host relativeurl<block_start>trigger_name="%s-%s-%s"%(host.replace('.' '-') relativeurl.replace(':.*' '').replace('{' '').replace('}' '').replace('/' '-') http_method.lower())<line_sep>status_code,response=get("/v2/triggers/http/%s"%trigger_name)<if_stmt>status_code<eq>200<block_start><return><block_end>status_code,r=post("/v2/triggers/http" {"metadata":{"name":trigger_name "namespace":namespace } "spec":{"host":host "relativeurl":relativeurl "method":http_method "functionref":{"Type":"name" "Name":function_name } } })<if_stmt>status_code<eq>409<or>status_code<eq>201<block_start><return><block_end><raise>Exception(r.text)<block_end><def_stmt>publish environment_name f<block_start>environment={"namespace":namespace "name":environment_name }<line_sep>function_name=lazily_define_function(environment f)<line_sep>host="%s.tfi.gcp.tesserai.com"%function_name<line_sep>lazily_define_trigger2(function_name "POST" host "/{path-info:.*}")<line_sep>lazily_define_trigger2(function_name "GET" host "/{path-info:.*}")<line_sep>lazily_define_trigger2(function_name "GET" host "/")<line_sep><return>"http://%s"%host<block_end> |
# -*- coding: utf-8 -*-
""" Auto Encoder Example.
Using an auto encoder on MNIST handwritten digits.
References:
<NAME>, <NAME>, <NAME>, and <NAME>. "Gradient-based
learning applied to document recognition." Proceedings of the IEEE,
86(11):2278-2324, November 1998.
Links:
[MNIST Dataset] http://yann.lecun.com/exdb/mnist/
"""<import_from_future_stmt> division print_function absolute_import<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_stmt>tflearn<line_sep># Data loading and preprocessing
<import_stmt>tflearn.datasets.mnist<as>mnist<line_sep>X,Y,testX,testY=mnist.load_data(one_hot=<true>)<line_sep># Building the encoder
encoder=tflearn.input_data(shape=[<none> 784])<line_sep>encoder=tflearn.fully_connected(encoder 256)<line_sep>encoder=tflearn.fully_connected(encoder 64)<line_sep># Building the decoder
decoder=tflearn.fully_connected(encoder 256)<line_sep>decoder=tflearn.fully_connected(decoder 784 activation='sigmoid')<line_sep># Regression, with mean square error
net=tflearn.regression(decoder optimizer='adam' learning_rate=0.001 loss='mean_square' metric=<none>)<line_sep># Training the auto encoder
model=tflearn.DNN(net tensorboard_verbose=0)<line_sep>model.fit(X X n_epoch=20 validation_set=(testX testX) run_id="auto_encoder" batch_size=256)<line_sep># Encoding X[0] for test
print("\nTest encoding of X[0]:")<line_sep># New model, re-using the same session, for weights sharing
encoding_model=tflearn.DNN(encoder session=model.session)<line_sep>print(encoding_model.predict([X[0]]))<line_sep># Testing the image reconstruction on new data (test set)
print("\nVisualizing results after being encoded and decoded:")<line_sep>testX=tflearn.data_utils.shuffle(testX)[0]<line_sep># Applying encode and decode over test set
encode_decode=model.predict(testX)<line_sep># Compare original images with their reconstructions
f,a=plt.subplots(2 10 figsize=(10 2))<for_stmt>i range(10)<block_start>temp=[[ii ii ii]<for>ii list(testX[i])]<line_sep>a[0][i].imshow(np.reshape(temp (28 28 3)))<line_sep>temp=[[ii ii ii]<for>ii list(encode_decode[i])]<line_sep>a[1][i].imshow(np.reshape(temp (28 28 3)))<block_end>f.show()<line_sep>plt.draw()<line_sep>plt.waitforbuttonpress()<line_sep> |
<import_from_future_stmt> annotations<line_sep>__all__=("executor" )<import_stmt>inspect<import_stmt>sys<import_from_stmt>asyncio get_running_loop<import_from_stmt>concurrent.futures Executor<import_from_stmt>functools partial wraps<import_from_stmt>typing Awaitable Callable TypeVar overload<import_from_stmt>asphalt.core Context<if_stmt>sys.version_info<ge>(3 10)<block_start><import_from_stmt>typing Concatenate ParamSpec<block_end><else_stmt><block_start><import_from_stmt>typing_extensions Concatenate ParamSpec<block_end>T_Retval=TypeVar("T_Retval")<line_sep>P=ParamSpec("P")<line_sep>@overload<def_stmt>executor func_or_executor:Executor|str <arrow>Callable[[Callable[Concatenate[Context P] T_Retval]] Callable[Concatenate[Context P] T_Retval|Awaitable[T_Retval]] ]<block_start><ellipsis><block_end>@overload<def_stmt>executor func_or_executor:Callable[Concatenate[Context P] T_Retval]<arrow>Callable[Concatenate[Context P] T_Retval|Awaitable[T_Retval]]<block_start><ellipsis><block_end><def_stmt>executor func_or_executor:Executor|str|Callable[Concatenate[Context P] T_Retval]<arrow>(Callable[[Callable[Concatenate[Context P] T_Retval]] Callable[Concatenate[Context P] T_Retval|Awaitable[T_Retval]] ]|Callable[Concatenate[Context P] T_Retval|Awaitable[T_Retval]])<block_start>"""
Decorate a function to run in an executor.
If no executor (or ``None``) is given, the current event loop's default executor is
used. Otherwise, the argument must be a PEP 3148 compliant thread pool executor or
the name of an :class:`~concurrent.futures.Executor` instance.
If a decorated callable is called in a worker thread, the executor argument is
ignored and the wrapped function is called directly.
Callables wrapped with this decorator must be used with ``await`` when called in the
event loop thread.
Example use with the default executor (``None``)::
@executor
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
With a named :class:`~concurrent.futures.Executor` resource::
@executor('special_ops')
def this_runs_in_threadpool(ctx):
return do_something_cpu_intensive()
async def request_handler(ctx):
result = await this_runs_in_threadpool(ctx)
:param func_or_executor: either a callable (when used as a decorator), an executor
instance or the name of an :class:`~concurrent.futures.Executor` resource
"""<def_stmt>outer func:Callable[Concatenate[Context P] T_Retval]<arrow>Callable[Concatenate[Context P] T_Retval|Awaitable[T_Retval]]<block_start><def_stmt>wrapper ctx:Context *args:P.args **kwargs:P.kwargs<arrow>T_Retval|Awaitable[T_Retval]<block_start><try_stmt><block_start>loop=get_running_loop()<block_end><except_stmt>RuntimeError# Event loop not available -- we're in a worker thread
<block_start><return>func(ctx *args **kwargs)<block_end># Resolve the executor resource name to an Executor instance
_executor:Executor|<none><if_stmt>isinstance(executor str)<block_start>_executor=ctx.require_resource(Executor executor)<block_end><else_stmt><block_start>_executor=executor<block_end>callback=partial(func ctx *args **kwargs)<line_sep><return>loop.run_in_executor(_executor callback)<block_end><assert_stmt><not>inspect.iscoroutinefunction(func) "Cannot wrap coroutine functions to be run in an executor"<line_sep><return>wraps(func)(wrapper)<block_end>executor:Executor|str|<none>=<none><if_stmt>isinstance(func_or_executor (str Executor))<block_start>executor=func_or_executor<line_sep><return>outer<block_end><else_stmt><block_start><return>outer(func_or_executor)<block_end><block_end> |
"""
pyexcel_xlsx
~~~~~~~~~~~~~~~~~~~
The lower level xlsx file format handler using openpyxl
:copyright: (c) 2015-2019 by Onni Software Ltd & its contributors
:license: New BSD License
"""<import_from_stmt>pyexcel_io.io get_data<as>read_data<import_from_stmt>pyexcel_io.io isstream<import_from_stmt>pyexcel_io.io save_data<as>write_data<import_from_stmt>pyexcel_io.plugins IOPluginInfoChainV2<line_sep>__FILE_TYPE__="xlsx"<line_sep>IOPluginInfoChainV2(__name__).add_a_reader(relative_plugin_class_path="xlsxr.XLSXBook" locations=["file" "memory"] file_types=[__FILE_TYPE__ "xlsm"] stream_type="binary" ).add_a_reader(relative_plugin_class_path="xlsxr.XLSXBookInContent" locations=["content"] file_types=[__FILE_TYPE__ "xlsm"] stream_type="binary" ).add_a_writer(relative_plugin_class_path="xlsxw.XLSXWriter" locations=["file" "memory"] file_types=[__FILE_TYPE__ "xlsm"] stream_type="binary" )<def_stmt>save_data afile data file_type=<none> **keywords<block_start>"""standalone module function for writing module supported file type"""<if_stmt>isstream(afile)<and>file_type<is><none><block_start>file_type=__FILE_TYPE__<block_end>write_data(afile data file_type=file_type **keywords)<block_end><def_stmt>get_data afile file_type=<none> **keywords<block_start>"""standalone module function for reading module supported file type"""<if_stmt>isstream(afile)<and>file_type<is><none><block_start>file_type=__FILE_TYPE__<block_end><return>read_data(afile file_type=file_type **keywords)<block_end> |
<import_from_stmt>.dynamic_graph_temporal_signal *<import_from_stmt>.dynamic_graph_temporal_signal_batch *<import_from_stmt>.static_graph_temporal_signal *<import_from_stmt>.static_graph_temporal_signal_batch *<import_from_stmt>.dynamic_graph_static_signal *<import_from_stmt>.dynamic_graph_static_signal_batch *<import_from_stmt>.train_test_split *<line_sep> |
# grid relative
<import_from_stmt>.. BaseModel<import_from_stmt>.. db<class_stmt>Group(BaseModel)<block_start>__tablename__="group"<line_sep>id=db.Column(db.Integer() primary_key=<true> autoincrement=<true>)<line_sep>name=db.Column(db.String(255))<def_stmt>__str__ self<block_start><return>f"<Group id: {self.id}, name: {self.name}>"<block_end><block_end> |
# Authored by : gusdn3477
# Co-authored by : -
# Link : http://boj.kr/8adc986ae26b461eadd65abdff3cfba9
<import_stmt>sys<def_stmt>input <block_start><return>sys.stdin.readline().rstrip()<block_end>N=int(input())<line_sep>book={}<for_stmt>i range(N)<block_start>name=input()<if_stmt>name<not><in>book<block_start>book[name]=1<block_end><else_stmt><block_start>book[name]<augadd>1<block_end><block_end>book=list(book.items())<line_sep>book.sort(key=<lambda>x:(-x[1] x[0]))<line_sep>print(book[0][0])<line_sep> |
<import_stmt>csv<import_stmt>sys<import_from_stmt>pathlib Path<import_from_stmt>abc abstractmethod<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tqdm tqdm<import_stmt>common.tf_utils<as>tf_utils<import_stmt>metrics.manager<as>metric_manager<import_from_stmt>common.model_loader Ckpt<import_from_stmt>common.utils format_text<import_from_stmt>common.utils get_logger<import_from_stmt>helper.base AudioBase<import_from_stmt>metrics.summaries BaseSummaries<import_from_stmt>metrics.summaries Summaries<class_stmt>Evaluator(object)<block_start><def_stmt>__init__ self model session args dataset dataset_name name<block_start>self.log=get_logger(name)<line_sep>self.model=model<line_sep>self.session=session<line_sep>self.args=args<line_sep>self.dataset=dataset<line_sep>self.dataset_name=dataset_name<if_stmt>Path(self.args.checkpoint_path).is_dir()<block_start>latest_checkpoint=tf.train.latest_checkpoint(self.args.checkpoint_path)<if_stmt>latest_checkpoint<is><not><none><block_start>self.args.checkpoint_path=latest_checkpoint<block_end>self.log.info(f"Get latest checkpoint and update to it: {self.args.checkpoint_path}")<block_end>self.watch_path=self._build_watch_path()<line_sep>self.session.run(tf.global_variables_initializer())<line_sep>self.session.run(tf.local_variables_initializer())<line_sep>self.ckpt_loader=Ckpt(session=session include_scopes=args.checkpoint_include_scopes exclude_scopes=args.checkpoint_exclude_scopes ignore_missing_vars=args.ignore_missing_vars use_ema=self.args.use_ema ema_decay=self.args.ema_decay )<block_end>@abstractmethod<def_stmt>setup_metric_manager self<block_start><raise>NotImplementedError<block_end>@abstractmethod<def_stmt>setup_metric_ops self<block_start><raise>NotImplementedError<block_end>@abstractmethod<def_stmt>build_non_tensor_data_from_eval_dict self eval_dict **kwargs<block_start><raise>NotImplementedError<block_end>@abstractmethod<def_stmt>setup_dataset_iterator self<block_start><raise>NotImplementedError<block_end><def_stmt>_build_watch_path self<block_start><if_stmt>Path(self.args.checkpoint_path).is_dir()<block_start><return>Path(self.args.checkpoint_path)<block_end><else_stmt><block_start><return>Path(self.args.checkpoint_path).parent<block_end><block_end><def_stmt>build_evaluation_step self checkpoint_path<block_start><if_stmt>"-"<in>checkpoint_path<and>checkpoint_path.split("-")[-1].isdigit()<block_start><return>int(checkpoint_path.split("-")[-1])<block_end><else_stmt><block_start><return>0<block_end><block_end><def_stmt>build_checkpoint_paths self checkpoint_path<block_start>checkpoint_glob=Path(checkpoint_path+"*")<line_sep>checkpoint_path=Path(checkpoint_path)<line_sep><return>checkpoint_glob checkpoint_path<block_end><def_stmt>build_miscellaneous_path self name<block_start>target_dir=self.watch_path/"miscellaneous"/self.dataset_name/name<if_stmt><not>target_dir.exists()<block_start>target_dir.mkdir(parents=<true>)<block_end><return>target_dir<block_end><def_stmt>setup_best_keeper self<block_start>metric_with_modes=self.metric_manager.get_best_keep_metric_with_modes()<line_sep>self.log.debug(metric_with_modes)<line_sep>self.best_keeper=tf_utils.BestKeeper(metric_with_modes self.dataset_name self.watch_path self.log )<block_end><def_stmt>evaluate_once self checkpoint_path<block_start>self.log.info("Evaluation started")<line_sep>self.setup_dataset_iterator()<line_sep>self.ckpt_loader.load(checkpoint_path)<line_sep>step=self.build_evaluation_step(checkpoint_path)<line_sep>checkpoint_glob,checkpoint_path=self.build_checkpoint_paths(checkpoint_path)<line_sep>self.session.run(tf.local_variables_initializer())<line_sep>eval_metric_dict=self.run_evaluation(step is_training=<false>)<line_sep>best_keep_metric_dict=self.metric_manager.filter_best_keep_metric(eval_metric_dict)<line_sep>is_keep,metrics_keep=self.best_keeper.monitor(self.dataset_name best_keep_metric_dict)<if_stmt>self.args.save_best_keeper<block_start>meta_info={"step":step "model_size":self.model.total_params }<line_sep>self.best_keeper.remove_old_best(self.dataset_name metrics_keep)<line_sep>self.best_keeper.save_best(self.dataset_name metrics_keep checkpoint_glob)<line_sep>self.best_keeper.remove_temp_dir()<line_sep>self.best_keeper.save_scores(self.dataset_name metrics_keep best_keep_metric_dict meta_info)<block_end>self.metric_manager.write_evaluation_summaries(step=step collection_keys=[BaseSummaries.KEY_TYPES.DEFAULT])<line_sep>self.metric_manager.log_metrics(step=step)<line_sep>self.log.info("Evaluation finished")<if_stmt>step<ge>self.args.max_step_from_restore<block_start>self.log.info("Evaluation stopped")<line_sep>sys.exit()<block_end><block_end><def_stmt>build_train_directory self<block_start><if_stmt>Path(self.args.checkpoint_path).is_dir()<block_start><return>str(self.args.checkpoint_path)<block_end><else_stmt><block_start><return>str(Path(self.args.checkpoint_path).parent)<block_end><block_end>@staticmethod<def_stmt>add_arguments parser<block_start>g=parser.add_argument_group("(Evaluator) arguments")<line_sep>g.add_argument("--valid_type" default="loop" type=str choices=["loop" "once"])<line_sep>g.add_argument("--max_outputs" default=5 type=int)<line_sep>g.add_argument("--maximum_num_labels_for_metric" default=10 type=int help="Maximum number of labels for using class-specific metrics(e.g. precision/recall/f1score)")<line_sep>g.add_argument("--no-save_best_keeper" dest="save_best_keeper" action="store_false")<line_sep>g.add_argument("--save_best_keeper" dest="save_best_keeper" action="store_true")<line_sep>g.set_defaults(save_best_keeper=<true>)<line_sep>g.add_argument("--no-flatten_output" dest="flatten_output" action="store_false")<line_sep>g.add_argument("--flatten_output" dest="flatten_output" action="store_true")<line_sep>g.set_defaults(flatten_output=<false>)<line_sep>g.add_argument("--max_step_from_restore" default=1e20 type=int)<block_end><block_end><class_stmt>SingleLabelAudioEvaluator(Evaluator AudioBase)<block_start><def_stmt>__init__ self model session args dataset dataset_name<block_start>super().__init__(model session args dataset dataset_name "SingleLabelAudioEvaluator")<line_sep>self.setup_dataset_related_attr()<line_sep>self.setup_metric_manager()<line_sep>self.setup_metric_ops()<line_sep>self.setup_best_keeper()<block_end><def_stmt>setup_dataset_related_attr self<block_start><assert_stmt>len(self.dataset.label_names)<eq>self.args.num_classes<line_sep>self.use_class_metrics=len(self.dataset.label_names)<l>self.args.maximum_num_labels_for_metric<block_end><def_stmt>setup_metric_manager self<block_start>self.metric_manager=metric_manager.AudioMetricManager(is_training=<false> use_class_metrics=self.use_class_metrics exclude_metric_names=self.args.exclude_metric_names summary=Summaries(session=self.session train_dir=self.build_train_directory() is_training=<false> base_name=self.dataset.dataset_split_name max_summary_outputs=self.args.max_summary_outputs ) )<block_end><def_stmt>setup_metric_ops self<block_start>losses=self.build_basic_loss_ops()<line_sep>self.metric_tf_op=self.metric_manager.build_metric_ops({"dataset_split_name":self.dataset_name "label_names":self.dataset.label_names "losses":losses "learning_rate":<none> "wavs":self.model.audio_original })<block_end><def_stmt>build_non_tensor_data_from_eval_dict self eval_dict **kwargs<block_start><return>{"dataset_split_name":self.dataset.dataset_split_name "label_names":self.dataset.label_names "predictions_onehot":eval_dict["predictions_onehot"] "labels_onehot":eval_dict["labels_onehot"] }<block_end><def_stmt>setup_dataset_iterator self<block_start>self.dataset.setup_iterator(self.session self.dataset.placeholders self.dataset.data )<block_end><block_end> |
#
#
# Copyright (C) 2010-2021 ARM Limited or its affiliates. All rights reserved.
#
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the License); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_from_stmt>sympy.ntheory factorint<import_stmt>numpy<as>np<import_from_stmt>sympy.combinatorics Permutation<import_stmt>io<import_stmt>math<import_from_stmt>config.strtools *<import_stmt>itertools<import_stmt>struct<import_stmt>config.formats<line_sep># Conversion of double to fixed point values
#
# - 8000 gives 8000 in C (int16)
# So when it is multiplied it will give the wrong sign for the result
# of the multiplication except if DSPE instructions with saturation are used
# to compute the negate (and we should get 7FFF).
#
# So for cortex-m without DSP extension, we should try to use 8001
# It is done but not yet tested.
<def_stmt>to_q63 v dspe<block_start>r=int(round(v<times>2<power>63))<if_stmt>(r<g>0x07FFFFFFFFFFFFFFF)<block_start>r=0x07FFFFFFFFFFFFFFF<block_end><if_stmt>(r<l>-0x08000000000000000)<block_start><if_stmt>dspe<block_start>r=-0x08000000000000000<block_end><else_stmt><block_start>r=-0x07FFFFFFFFFFFFFFF<block_end><block_end><return>("0x%s"%format(struct.unpack('<Q' struct.pack('<q' r))[0] '016X'))<block_end><def_stmt>to_q31 v dspe<block_start>r=int(round(v<times>2<power>31))<if_stmt>(r<g>0x07FFFFFFF)<block_start>r=0x07FFFFFFF<block_end><if_stmt>(r<l>-0x080000000)<block_start><if_stmt>dspe<block_start>r=-0x080000000<block_end><else_stmt><block_start>r=-0x07FFFFFFF<block_end><block_end><return>("0x%s"%format(struct.unpack('<I' struct.pack('<i' r))[0] '08X'))<block_end><def_stmt>to_q15 v dspe<block_start>r=int(round(v<times>2<power>15))<if_stmt>(r<g>0x07FFF)<block_start>r=0x07FFF<block_end><if_stmt>(r<l>-0x08000)<block_start><if_stmt>dspe<block_start>r=-0x08000<block_end><else_stmt><block_start>r=-0x07FFF<block_end><block_end><return>("0x%s"%format(struct.unpack('<H' struct.pack('<h' r))[0] '04X'))<block_end><def_stmt>to_q7 v dspe<block_start>r=int(round(v<times>2<power>7))<if_stmt>(r<g>0x07F)<block_start>r=0x07F<block_end><if_stmt>(r<l>-0x080)#
<block_start><if_stmt>dspe<block_start>r=-0x080<block_end><else_stmt><block_start>r=-0x07F<block_end><block_end><return>("0x%s"%format(struct.unpack('<B' struct.pack('<b' r))[0] '02X'))<block_end>Q7=1<line_sep>Q15=2<line_sep>Q31=3<line_sep>F16=4<line_sep>F32=5<line_sep>F64=6<line_sep># In the final C++ code, we have a loop for a given radix.
# The input list here has not grouped the factors.
# The list need to be transformed into a list of pair.
# The pair being (radix,exponent)
<def_stmt>groupFactors factors<block_start>n=0<line_sep>current=-1<line_sep>result=[]<for_stmt>f factors<block_start><if_stmt>f<ne>current<block_start><if_stmt>current<ne>-1<block_start>result=result+[current n]<block_end>current=f<line_sep>n=1<block_end><else_stmt><block_start>n=n+1<block_end><block_end>result=result+[current n]<line_sep><return>(result)<block_end># Compute the grouped factors for the the FFT length originaln
# where the only possible radix are in primitiveFactors list.
<def_stmt>getFactors primitiveFactors originaln<block_start>factors=[]<line_sep>length=[]<line_sep>primitiveFactors.sort(reverse=<true>)<line_sep>n=originaln<while_stmt>(n<g>1)<and>primitiveFactors<block_start><if_stmt>(n%primitiveFactors[0]<eq>0)<block_start>factors.append(primitiveFactors[0])<line_sep>n=n<floordiv>primitiveFactors[0]<block_end><else_stmt><block_start>primitiveFactors=primitiveFactors[1:]<block_end><block_end># When lowest factors are at the beginning (like 2)
# we use a special implementation of the loopcore template
# and it is removing some cycles.
# So, we will get (for instance) 2x8x8x8 instead of 8x8x8x2
factors.reverse()<for_stmt>f factors<block_start>originaln=originaln<floordiv>f<line_sep>length.append(originaln)<block_end>groupedfactors=groupFactors(factors)<line_sep><return>(groupedfactors factors length)<block_end># Apply the radix decomposition to compute the input -> output permutation
# computed by the FFT.
<def_stmt>radixReverse f n<block_start>a=np.array(range(0 n)).reshape(f)<line_sep>r=list(range(0 len(f)))<line_sep>r.reverse()<line_sep>r=tuple(r)<line_sep>a=np.transpose(a r)<line_sep><return>(a.reshape(n))<block_end><def_stmt>radixPermutation factors n<block_start>a=radixReverse(factors n)<line_sep>tps=[]<line_sep>vectorizable=<true><for_stmt>c Permutation.from_sequence(a).cyclic_form<block_start><if_stmt>(len(c)<g>2)<block_start>vectorizable=<false><block_end><for_stmt>i range(len(c)-1 0 -1)# 2 because those are indexes in an array of complex numbers but
# with a real type.
<block_start>tps.append([2<times>c[i] 2<times>c[i-1]])<block_end><block_end><return>(np.array(tps dtype=int).flatten() vectorizable)<block_end># CFFT Twiddle table
<def_stmt>cfft_twiddle n<block_start>a=2.0<times>math.pi<times>np.linspace(0 n num=n endpoint=<false>)/n<line_sep>c=np.cos(-a)<line_sep>s=np.sin(-a)<line_sep>r=np.empty((c.size+s.size ) dtype=c.dtype)<line_sep>r[0::2]=c<line_sep>r[1::2]=s<line_sep><return>(r)<block_end># RFFT twiddle for the merge and split steps.
<def_stmt>rfft_twiddle n<block_start>a=2.0j<times>math.pi<times>np.linspace(0 n<floordiv>2 num=n<floordiv>2 endpoint=<false>)/n<line_sep>z=-1.0j<times>np.exp(-a)<line_sep>r=z.view(dtype=np.float64)<line_sep><return>(r)<block_end># Compute the twiddle tables
<def_stmt>twiddle transform n<block_start><if_stmt>transform<eq>"CFFT"<block_start><return>(cfft_twiddle(n))<block_end><if_stmt>transform<eq>"RFFT"<block_start><return>(rfft_twiddle(n))<block_end><return>(<none>)<block_end>NB_ELEMS_PER_LINE=3<line_sep># Generate C array content for a given datatype
<def_stmt>printFloat64Array f n<block_start>nb=0<for_stmt>s n<block_start>print("%.20f, "%s end="" file=f)<line_sep>nb=nb+1<if_stmt>nb<eq>NB_ELEMS_PER_LINE<block_start>nb=0<line_sep>print("" file=f)<block_end><block_end><block_end><def_stmt>printFloat32Array f n<block_start>nb=0<for_stmt>s n<block_start>print("%.20ff, "%s end="" file=f)<line_sep>nb=nb+1<if_stmt>nb<eq>NB_ELEMS_PER_LINE<block_start>nb=0<line_sep>print("" file=f)<block_end><block_end><block_end><def_stmt>printFloat16Array f n<block_start>nb=0<for_stmt>s n<block_start>print("%.8ff16, "%s end="" file=f)<line_sep>nb=nb+1<if_stmt>nb<eq>NB_ELEMS_PER_LINE<block_start>nb=0<line_sep>print("" file=f)<block_end><block_end><block_end><def_stmt>printQ31Array f mode n<block_start>DSPE=<false><if_stmt>mode<eq>"DSP"<block_start>DSPE=<true><block_end>nb=0<for_stmt>s n<block_start>print(to_q31(s DSPE)+", " end="" file=f)<line_sep>nb=nb+1<if_stmt>nb<eq>NB_ELEMS_PER_LINE<block_start>nb=0<line_sep>print("" file=f)<block_end><block_end><block_end><def_stmt>printQ15Array f mode n<block_start>DSPE=<false><if_stmt>mode<eq>"DSP"<block_start>DSPE=<true><block_end>nb=0<for_stmt>s n<block_start>print(to_q15(s DSPE)+", " end="" file=f)<line_sep>nb=nb+1<if_stmt>nb<eq>NB_ELEMS_PER_LINE<block_start>nb=0<line_sep>print("" file=f)<block_end><block_end><block_end><def_stmt>printQ7Array f mode n<block_start>DSPE=<false><if_stmt>mode<eq>"DSP"<block_start>DSPE=<true><block_end>nb=0<for_stmt>s n<block_start>print(to_q7(s DSPE)+", " end="" file=f)<line_sep>nb=nb+1<if_stmt>nb<eq>NB_ELEMS_PER_LINE<block_start>nb=0<line_sep>print("" file=f)<block_end><block_end><block_end># Print a C array
# Using the type, dpse mode, name
# (dpse mode is for knowing if 0x8000 must be generated as 8000 or 8001
# to avoid sign issues when multiplying with the twiddles)
<def_stmt>printArray f ctype mode name a<block_start>nbSamples=len(a)<line_sep>define="NB_"+name.upper()<line_sep>n=a.reshape(len(a))<line_sep>print("__ALIGNED(8) const %s %s[%s]={"%(ctype name define) file=f)<if_stmt>ctype<eq>"float64_t"<block_start>printFloat64Array(f n)<block_end><if_stmt>ctype<eq>"float32_t"<block_start>printFloat32Array(f n)<block_end><if_stmt>ctype<eq>"float16_t"<block_start>printFloat16Array(f n)<block_end><if_stmt>ctype<eq>"Q31"<block_start>printQ31Array(f mode n)<block_end><if_stmt>ctype<eq>"Q15"<block_start>printQ15Array(f mode n)<block_end><if_stmt>ctype<eq>"Q7"<block_start>printQ7Array(f mode n)<block_end>print("};" file=f)<block_end># Convert a float value to a given datatype.
<def_stmt>convertToDatatype r ctype mode<block_start>DSPE=<false><if_stmt>mode<eq>"DSP"<block_start>DSPE=<true><block_end><if_stmt>ctype<eq>"float64_t"<block_start>result="%.20f"%r<block_end><if_stmt>ctype<eq>"float32_t"<block_start>result="%.20ff"%r<block_end><if_stmt>ctype<eq>"float16_t"<block_start>result="%.20ff16"%r<block_end><if_stmt>ctype<eq>"Q31"<block_start>result="Q31(%s)"%to_q31(r DSPE)<block_end><if_stmt>ctype<eq>"Q15"<block_start>result="Q15(%s)"%to_q15(r DSPE)<block_end><if_stmt>ctype<eq>"Q7"<block_start>result="Q7(%s)"%to_q7(r DSPE)<block_end><return>(result)<block_end><def_stmt>printArrayHeader f ctype name nbSamples<block_start>define="NB_"+name.upper()<line_sep>print("#define %s %d"%(define nbSamples) file=f)<line_sep>print("extern __ALIGNED(8) const %s %s[%s];\n"%(ctype name define) file=f)<block_end># Print UINT arrays for permutations.
<def_stmt>printUInt32Array f name a<block_start>nbSamples=len(a)<line_sep>define="NB_"+name.upper()<line_sep>n=a.reshape(len(a))<line_sep>print("__ALIGNED(8) const uint32_t %s[%s]={"%(name define) file=f)<line_sep>nb=0<for_stmt>s n<block_start>print("%d, "%s end="" file=f)<line_sep>nb=nb+1<if_stmt>nb<eq>NB_ELEMS_PER_LINE<block_start>nb=0<line_sep>print("" file=f)<block_end><block_end>print("};" file=f)<block_end><def_stmt>printUInt16Array f name a<block_start>nbSamples=len(a)<line_sep>define="NB_"+name.upper()<line_sep>n=a.reshape(len(a))<line_sep>print("__ALIGNED(8) const uint16_t %s[%s]={"%(name define) file=f)<line_sep>nb=0<for_stmt>s n<block_start>print("%d, "%s end="" file=f)<line_sep>nb=nb+1<if_stmt>nb<eq>NB_ELEMS_PER_LINE<block_start>nb=0<line_sep>print("" file=f)<block_end><block_end>print("};" file=f)<block_end><def_stmt>printUInt32ArrayHeader f name a<block_start>nbSamples=len(a)<line_sep>define="NB_"+name.upper()<line_sep>n=a.reshape(len(a))<line_sep>print("#define %s %d"%(define nbSamples) file=f)<line_sep>print("extern __ALIGNED(8) const uint32_t %s[%s];\n"%(name define) file=f)<block_end><def_stmt>printUInt16ArrayHeader f name a<block_start>nbSamples=len(a)<line_sep>define="NB_"+name.upper()<line_sep>n=a.reshape(len(a))<line_sep>print("#define %s %d"%(define nbSamples) file=f)<line_sep>print("extern __ALIGNED(8) const uint16_t %s[%s];\n"%(name define) file=f)<block_end><def_stmt>getCtype t<block_start><if_stmt>t<eq>'f64'<block_start><return>("float64_t")<block_end><if_stmt>t<eq>'f32'<block_start><return>("float32_t")<block_end><if_stmt>t<eq>'f16'<block_start><return>("float16_t")<block_end><if_stmt>t<eq>'q31'<block_start><return>("Q31")<block_end><if_stmt>t<eq>'q15'<block_start><return>("Q15")<block_end><if_stmt>t<eq>'q7'<block_start><return>("Q7")<block_end><return>("void")<block_end># Configuration structures for CFFT and RFFT
cfftconfig="""cfftconfig<%s> config%d={
.normalization=%s,
.nbPerms=%s,
.perms=perm%d,
.nbTwiddle=%s,
.twiddle=twiddle%d,
.nbGroupedFactors=%d,
.nbFactors=%d,
.factors=factors%d,
.lengths=lengths%d,
.format=%d,
.reversalVectorizable=%d
};"""<line_sep>rfftconfig="""rfftconfig<%s> config%d={
.nbTwiddle=%s,
.twiddle=twiddle%d
};"""<line_sep>fftconfigHeader="""extern %sconfig<%s> config%d;"""<line_sep>fftFactorArray="""const uint16_t factors%d[%d]=%s;\n"""<line_sep>fftLengthArray="""const uint16_t lengths%d[%d]=%s;\n"""<line_sep># Descriptino of a permutation
<class_stmt>Perm<block_start>PermID=0<line_sep># Grouped factors and factors.
<def_stmt>getFactors core nb datatype<block_start>_groupedFactors,_factors,_lens=getFactors(core.radix(datatype nb) nb)<line_sep><return>(_factors)<block_end><def_stmt>__init__ self core nb datatype<block_start>Perm.PermID=Perm.PermID+1<line_sep>self._nb=nb<line_sep>self._id=Perm.PermID<line_sep>self._radixUsed=set([])<line_sep>self._groupedFactors,self._factors,self._lens=getFactors(core.radix(datatype nb) nb)<line_sep>self._perms=<none><line_sep>self._core=core<line_sep>self._isvectorizable=<false><block_end><def_stmt>permutations self<block_start>_permFactors=list(itertools.chain(*[self._core.getPermFactor(x)<for>x self._factors]))<line_sep>#print(_permFactors)
self._perms,self._isvectorizable=radixPermutation(_permFactors[::-1] self._nb)<block_end>@property<def_stmt>isVectorizable self<block_start><return>(self._isvectorizable)<block_end>@property<def_stmt>permID self<block_start><return>(self._id)<block_end>@property<def_stmt>perms self<block_start><if_stmt>self._perms<is><not><none><block_start><return>(self._perms)<block_end><else_stmt><block_start>self.permutations()<line_sep><return>(self._perms)<block_end><block_end>@property<def_stmt>factors self<block_start><return>(self._factors)<block_end>@property<def_stmt>nbGroupedFactors self<block_start><return>(int(len(self._groupedFactors)/2))<block_end>@property<def_stmt>nbFactors self<block_start><return>(len(self._factors))<block_end><def_stmt>writePermHeader self h<block_start>printUInt16ArrayHeader(h "perm%d"%self.permID self.perms)<block_end><def_stmt>writePermCode self c<block_start>printUInt16Array(c "perm%d"%self.permID self.perms)<block_end><def_stmt>writeFactorDesc self c<block_start>radixList="{%s}"%joinStr([str(x)<for>x self._groupedFactors])<line_sep>lengthList="{%s}"%joinStr([str(x)<for>x self._lens])<line_sep>print(fftFactorArray%(self.permID 2<times>self.nbGroupedFactors radixList) file=c)<line_sep>print(fftLengthArray%(self.permID len(self._lens) lengthList) file=c)<line_sep><block_end><block_end><class_stmt>Twiddle<block_start>TwiddleId=0<def_stmt>__init__ self transform nb datatype mode<block_start>Twiddle.TwiddleId=Twiddle.TwiddleId+1<line_sep>self._id=Twiddle.TwiddleId<line_sep>self._datatype=datatype<line_sep>self._nb=nb<line_sep>self._twiddle=<none><line_sep>self._transform=transform<line_sep>self._mode=mode<block_end>@property<def_stmt>twiddleID self<block_start><return>(self._id)<block_end>@property<def_stmt>datatype self<block_start><return>(self._datatype)<block_end>@property<def_stmt>samples self<block_start><if_stmt>self._twiddle<is><none><block_start>self._twiddle=twiddle(self._transform self._nb)<block_end><return>(self._twiddle)<block_end>@property<def_stmt>nbSamples self<block_start><return>(self._nb)<block_end>@property<def_stmt>nbTwiddles self<block_start><if_stmt>self._transform<eq>"RFFT"<block_start><return>(self._nb<floordiv>2)<block_end><else_stmt><block_start><return>(self._nb)<block_end><block_end><def_stmt>writeTwidHeader self h<block_start>ctype=getCtype(self.datatype)<line_sep># Twiddle is a complex array so 2*nbSamples must be used
printArrayHeader(h ctype "twiddle%d"%self.twiddleID 2<times>self.nbTwiddles)<block_end><def_stmt>writeTwidCode self c<block_start>ctype=getCtype(self.datatype)<line_sep>printArray(c ctype self._mode "twiddle%d"%self.twiddleID self.samples)<block_end><block_end><class_stmt>Config<block_start>ConfigID=0<def_stmt>__init__ self transform twiddle perms coreMode<block_start>Config.ConfigID=Config.ConfigID+1<line_sep>self._id=Config.ConfigID<line_sep>self._twiddle=twiddle<line_sep>self._perms=perms<line_sep>self._transform=transform<line_sep>self._coreMode=coreMode<block_end>@property<def_stmt>transform self<block_start><return>(self._transform)<block_end>@property<def_stmt>configID self<block_start><return>(self._id)<block_end>@property<def_stmt>perms self<block_start><return>(self._perms)<block_end>@property<def_stmt>twiddle self<block_start><return>(self._twiddle)<block_end>@property<def_stmt>nbSamples self<block_start><return>(self.twiddle.nbSamples)<block_end><def_stmt>writeConfigHeader self c<block_start>ctype=getCtype(self.twiddle.datatype)<line_sep>print(fftconfigHeader%(self.transform.lower() ctype self.configID) file=c)<block_end><def_stmt>writeConfigCode self c<block_start>ctype=getCtype(self.twiddle.datatype)<line_sep>twiddleLen="NB_"+("twiddle%d"%self.twiddle.twiddleID).upper()<if_stmt>self.transform<eq>"RFFT"<block_start>print(rfftconfig%(ctype self.configID twiddleLen self.twiddle.twiddleID) file=c)<block_end><else_stmt><block_start>normfactor=1.0/self.twiddle.nbSamples<line_sep>normFactorStr=convertToDatatype(normfactor ctype self._coreMode)<line_sep>permsLen="NB_"+("perm%d"%self.perms.permID).upper()<line_sep>outputFormat=0<line_sep>#print(self.twiddle.datatype)
#print(self.twiddle.nbSamples)
#print(self.perms.factors)
# For fixed point, each stage will change the output format.
# We need to cmpute the final format of the FFT
# and record it in the initialization structure
# so that the user can easily know how to recover the
# input format (q31, q15). It is encoded as a shift value.
# The shift to apply to recover the input format
# But applying this shift will saturate the result in general.
<if_stmt>self.twiddle.datatype<eq>"q15"<or>self.twiddle.datatype<eq>"q31"<block_start><for_stmt>f self.perms.factors#print(f,self.twiddle.datatype,self._coreMode)
# The file "formats.py" is decribing the format of each radix
# and is used to compute the format of the FFT based
# on the decomposition of its length.
#
# Currently (since there is no vector version for fixed point)
# this is not taking into account the format change that may
# be implied by the vectorization in case it may be different
# from the scalar version.
<block_start>formatForSize=config.formats.formats[f][self._coreMode]<line_sep>outputFormat<augadd>formatForSize[self.twiddle.datatype]<block_end><block_end>vectorizable=0<if_stmt>self.perms.isVectorizable<block_start>vectorizable=1<block_end>print(cfftconfig%(ctype self.configID normFactorStr permsLen self.perms.permID twiddleLen self.twiddle.twiddleID self.perms.nbGroupedFactors self.perms.nbFactors self.perms.permID self.perms.permID outputFormat vectorizable) file=c)<block_end><block_end><block_end> |
<import_stmt>pytest<import_stmt>tensorflow<as>tf<import_from_stmt>nncf.tensorflow.tensor TFNNCFTensor<import_from_stmt>nncf.tensorflow.pruning.tensor_processor TFNNCFPruningTensorProcessor<line_sep>@pytest.mark.parametrize('device' ("CPU" 'GPU'))<def_stmt>test_create_tensor device<block_start><if_stmt><not>tf.config.list_physical_devices('GPU')<block_start><if_stmt>device<eq>'GPU'<block_start>pytest.skip('There are no available CUDA devices')<block_end><block_end>shape=[1 3 10 100]<line_sep>tensor=TFNNCFPruningTensorProcessor.ones(shape device)<assert_stmt>tf.is_tensor(tensor.tensor)<assert_stmt>tensor.tensor.device.split('/')[-1].split(':')[1]<eq>device<assert_stmt>list(tensor.tensor.shape)<eq>shape<block_end><def_stmt>test_repeat <block_start>tensor_data=[0. 1.]<line_sep>repeats=5<line_sep>tensor=TFNNCFTensor(tf.Variable(tensor_data))<line_sep>repeated_tensor=TFNNCFPruningTensorProcessor.repeat(tensor repeats=repeats)<line_sep>ref_repeated=[]<for_stmt>val tensor_data<block_start><for_stmt>_ range(repeats)<block_start>ref_repeated.append(val)<block_end><block_end><assert_stmt>tf.reduce_all(repeated_tensor.tensor<eq>tf.Variable(ref_repeated))<block_end><def_stmt>test_concat <block_start>tensor_data=[0. 1.]<line_sep>tensors=[TFNNCFTensor(tf.Variable(tensor_data))<for>_ range(3)]<line_sep>concatenated_tensor=TFNNCFPruningTensorProcessor.concatenate(tensors axis=0)<assert_stmt>tf.reduce_all(concatenated_tensor.tensor<eq>tf.Variable(tensor_data<times>3))<block_end>@pytest.mark.parametrize('all_close' [<false> <true>])<def_stmt>test_assert_all_close all_close<block_start>tensor_data=[0. 1.]<line_sep>tensors=[TFNNCFTensor(tf.Variable(tensor_data))<for>_ range(3)]<if_stmt><not>all_close<block_start>tensors.append(TFNNCFTensor(tf.Variable(tensor_data[::-1])))<with_stmt>pytest.raises(tf.errors.InvalidArgumentError)<block_start>TFNNCFPruningTensorProcessor.assert_allclose(tensors)<block_end><block_end><else_stmt><block_start>TFNNCFPruningTensorProcessor.assert_allclose(tensors)<block_end><block_end>@pytest.mark.parametrize('all_close' [<false> <true>])<def_stmt>test_elementwise_mask_propagation all_close<block_start>tensor_data=[0. 1.]<line_sep>tensors=[TFNNCFTensor(tf.Variable(tensor_data))<for>_ range(3)]<if_stmt><not>all_close<block_start>tensors.append(TFNNCFTensor(tf.Variable(tensor_data[::-1])))<with_stmt>pytest.raises(tf.errors.InvalidArgumentError)<block_start>TFNNCFPruningTensorProcessor.elementwise_mask_propagation(tensors)<block_end><block_end><else_stmt><block_start>result=TFNNCFPruningTensorProcessor.elementwise_mask_propagation(tensors)<for_stmt>t tensors<block_start>tf.debugging.assert_near(result.tensor t.tensor)<block_end><block_end><block_end> |
<import_stmt>logging<line_sep>logger1=logging.getLogger('package1.module1')<line_sep>logger2=logging.getLogger('package1.module2')<line_sep>logging.basicConfig(level=logging.WARNING)<line_sep>logger1.warning('This is a warning message')<line_sep>logger2.warning('This is a another warning message')<line_sep> |
"""
Transitional module for moving to the w3lib library.
For new code, always import from w3lib.html instead of this module
"""<import_stmt>warnings<import_from_stmt>scrapy.exceptions ScrapyDeprecationWarning<import_from_stmt>w3lib.html *<line_sep>warnings.warn("Module `scrapy.utils.markup` is deprecated. "<concat>"Please import from `w3lib.html` instead." ScrapyDeprecationWarning stacklevel=2)<line_sep> |
<import_stmt>KratosMultiphysics<import_stmt>KratosMultiphysics.FemToDemApplication.MainFemDem<as>MainFemDem<import_stmt>KratosMultiphysics.FemToDemApplication<as>KratosFemDem<import_stmt>KratosMultiphysics.DEMApplication<as>DEM<import_stmt>KratosMultiphysics.DemStructuresCouplingApplication<as>DEM_Structures<line_sep># Python script created to modify the existing one due to the coupling of the DEM app in 2D
<class_stmt>FEM_for_coupling_Solution(MainFemDem.FEM_Solution)<block_start><def_stmt>Info self<block_start>print("FEM part of the FEMDEM application")<block_end><def_stmt>Initialize self#### INITIALIZE ####
# Add variables (always before importing the model part)
<block_start>self.solver.AddVariables()<line_sep># For remeshing purposes
self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.NODAL_STRESS_VECTOR)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_AREA)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(DEM.DEM_NODAL_AREA)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.NODAL_H)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.EQUIVALENT_NODAL_STRESS)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.EQUIVALENT_NODAL_STRESS_GRADIENT)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.NODAL_DAMAGE)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.EQUIVALENT_STRESS_VM)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(KratosFemDem.DISPLACEMENT_INCREMENT)<line_sep># For the DE-FE contact model
self.main_model_part.AddNodalSolutionStepVariable(DEM.DEM_PRESSURE)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(KratosMultiphysics.TOTAL_FORCES)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(DEM.DELTA_DISPLACEMENT)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(DEM.CONTACT_FORCES)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(DEM.ELASTIC_FORCES)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(DEM.TANGENTIAL_ELASTIC_FORCES)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(DEM.SHEAR_STRESS)<line_sep># For the Substepping
self.main_model_part.AddNodalSolutionStepVariable(DEM_Structures.BACKUP_LAST_STRUCTURAL_VELOCITY)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(DEM_Structures.BACKUP_LAST_STRUCTURAL_DISPLACEMENT)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(DEM_Structures.SMOOTHED_STRUCTURAL_VELOCITY)<line_sep>self.main_model_part.AddNodalSolutionStepVariable(DEM.CONTACT_IMPULSE)<line_sep># Read model_part (note: the buffer_size is set here) (restart is read here)
self.solver.ImportModelPart()<line_sep># Add dofs (always after importing the model part)
<if_stmt>((self.main_model_part.ProcessInfo).Has(KratosMultiphysics.IS_RESTARTED))<block_start><if_stmt>(self.main_model_part.ProcessInfo[KratosMultiphysics.IS_RESTARTED]<eq><false>)<block_start>self.solver.AddDofs()<block_end><block_end><else_stmt><block_start>self.solver.AddDofs()<block_end># Add materials (assign material to model_parts if Materials.json exists)
self.AddMaterials()<line_sep># Add processes
self.model_processes=self.AddProcesses()<line_sep>self.model_processes.ExecuteInitialize()<line_sep># Print model_part and properties
<if_stmt>(self.echo_level<g>1)<block_start>print("")<line_sep>print(self.main_model_part)<for_stmt>properties self.main_model_part.Properties<block_start>print(properties)<block_end><block_end>#### START SOLUTION ####
self.computing_model_part=self.solver.GetComputingModelPart()<if_stmt>(self.ProjectParameters["solver_settings"]["strategy_type"].GetString()<eq>"arc_length")<block_start>neighbour_elemental_finder=KratosMultiphysics.FindElementalNeighboursProcess(self.main_model_part 2 5)<line_sep>neighbour_elemental_finder.Execute()<line_sep>self.InitializeIntegrationPointsVariables()<line_sep>self.model_processes.ExecuteBeforeSolutionLoop()<line_sep>self.model_processes.ExecuteInitializeSolutionStep()<line_sep>self.using_arc_length=<true><block_end><else_stmt><block_start>self.using_arc_length=<false><block_end>## Sets strategies, builders, linear solvers, schemes and solving info, and fills the buffer
self.solver.Initialize()<line_sep>#self.solver.InitializeStrategy()
self.solver.SetEchoLevel(self.echo_level)<line_sep># Initialize GiD I/O (gid outputs, file_lists)
self.SetGraphicalOutput()<line_sep>self.GraphicalOutputExecuteInitialize()<line_sep>print(" ")<line_sep>print("=================================================")<line_sep>print(" - Kratos FemDem Application Calculation Start - ")<line_sep>print("=================================================")<line_sep>self.model_processes.ExecuteBeforeSolutionLoop()<line_sep>self.GraphicalOutputExecuteBeforeSolutionLoop()<line_sep># Set time settings
self.step=self.main_model_part.ProcessInfo[KratosMultiphysics.STEP]<line_sep>self.time=self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]<line_sep>self.end_time=self.ProjectParameters["problem_data"]["end_time"].GetDouble()<line_sep>self.delta_time=self.ComputeDeltaTime()<block_end>#============================================================================================================================
<def_stmt>ComputeDeltaTime self<block_start><if_stmt>self.ProjectParameters["problem_data"].Has("time_step")<block_start><return>self.ProjectParameters["problem_data"]["time_step"].GetDouble()<block_end><elif_stmt>self.ProjectParameters["problem_data"].Has("variable_time_steps")<block_start>current_time=self.main_model_part.ProcessInfo[KratosMultiphysics.TIME]<for_stmt>key self.ProjectParameters["problem_data"]["variable_time_steps"].keys()<block_start>interval_settings=self.ProjectParameters["problem_data"]["variable_time_steps"][key]<line_sep>interval=KratosMultiphysics.IntervalUtility(interval_settings)<line_sep># Getting the time step of the interval
<if_stmt>interval.IsInInterval(current_time)<block_start><return>interval_settings["time_step"].GetDouble()<block_end># If we arrive here we raise an error because the intervals are not well defined
<raise>Exception("::[MechanicalSolver]:: Time stepping not well defined!")<block_end><block_end><else_stmt><block_start><raise>Exception("::[MechanicalSolver]:: Time stepping not defined!")<block_end><block_end>#============================================================================================================================
<def_stmt>InitializeIntegrationPointsVariables self<block_start>utils=KratosMultiphysics.VariableUtils()<line_sep>elements=self.main_model_part.Elements<line_sep>self.domain_size=self.main_model_part.ProcessInfo[KratosMultiphysics.DOMAIN_SIZE]<line_sep>nodes=self.main_model_part.Nodes<line_sep>utils.SetNonHistoricalVariable(KratosFemDem.GENERATE_DEM <false> elements)<line_sep>utils.SetNonHistoricalVariable(KratosFemDem.STRESS_THRESHOLD 0.0 elements)<line_sep>utils.SetNonHistoricalVariable(KratosFemDem.DAMAGE_ELEMENT 0.0 elements)<line_sep>utils.SetNonHistoricalVariable(KratosFemDem.PRESSURE_EXPANDED 0 elements)<line_sep>utils.SetNonHistoricalVariable(KratosFemDem.IS_SKIN 0 elements)<line_sep>utils.SetNonHistoricalVariable(KratosFemDem.SMOOTHING 0 elements)<line_sep>utils.SetNonHistoricalVariable(KratosFemDem.RECOMPUTE_NEIGHBOURS <true> elements)<if_stmt>self.domain_size<eq>3<block_start>utils.SetNonHistoricalVariable(KratosFemDem.VOLUME_COUNTED <false> elements)<line_sep>utils.SetNonHistoricalVariable(KratosFemDem.STRESS_VECTOR [0.0 0.0 0.0 0.0 0.0 0.0] elements)<line_sep>utils.SetNonHistoricalVariable(KratosFemDem.STRAIN_VECTOR [0.0 0.0 0.0 0.0 0.0 0.0] elements)<line_sep>utils.SetNonHistoricalVariable(KratosFemDem.STRESS_VECTOR_INTEGRATED [0.0 0.0 0.0 0.0 0.0 0.0] elements)<block_end><else_stmt># 2D
<block_start>utils.SetNonHistoricalVariable(KratosFemDem.STRESS_VECTOR [0.0 0.0 0.0] elements)<line_sep>utils.SetNonHistoricalVariable(KratosFemDem.STRAIN_VECTOR [0.0 0.0 0.0] elements)<line_sep>utils.SetNonHistoricalVariable(KratosFemDem.STRESS_VECTOR_INTEGRATED [0.0 0.0 0.0] elements)<block_end># if self.PressureLoad:
# utils.SetNonHistoricalVariable(KratosFemDem.PRESSURE_ID, 0, nodes)
<block_end><block_end> |
<import_from_stmt>asynctest TestCase<as>AsyncTestCase<import_stmt>json<import_from_stmt>...storage.error StorageNotFoundError<import_from_stmt>..util bytes_to_b58<import_from_stmt>..key_type KeyType<import_from_stmt>...core.in_memory InMemoryProfile<import_from_stmt>...storage.in_memory InMemoryStorage<import_from_stmt>..key_pair KeyPairStorageManager KEY_PAIR_STORAGE_TYPE<class_stmt>TestKeyPairStorageManager(AsyncTestCase)<block_start>test_public_key=b"somepublickeybytes"<line_sep>test_secret=b"verysecretkey"<async_keyword><def_stmt>setUp self<block_start>self.profile=InMemoryProfile.test_profile()<line_sep>self.store=InMemoryStorage(self.profile)<line_sep>self.key_pair_mgr=KeyPairStorageManager(self.store)<block_end><async_keyword><def_stmt>test_create_key_pair self<block_start><await>self.key_pair_mgr.store_key_pair(public_key=self.test_public_key secret_key=self.test_secret key_type=KeyType.ED25519 )<line_sep>verkey=bytes_to_b58(self.test_public_key)<line_sep>record=<await>self.store.find_record(KEY_PAIR_STORAGE_TYPE {"verkey":verkey})<assert_stmt>record<line_sep>value=json.loads(record.value)<assert_stmt>record.tags<eq>{"verkey":verkey "key_type":KeyType.ED25519.key_type}<assert_stmt>value["verkey"]<eq>verkey<assert_stmt>value["secret_key"]<eq>bytes_to_b58(self.test_secret)<assert_stmt>value["metadata"]<eq>{}<assert_stmt>value["key_type"]<eq>KeyType.ED25519.key_type<block_end><async_keyword><def_stmt>test_get_key_pair self<block_start><await>self.key_pair_mgr.store_key_pair(public_key=self.test_public_key secret_key=self.test_secret key_type=KeyType.ED25519 )<line_sep>verkey=bytes_to_b58(self.test_public_key)<line_sep>key_pair=<await>self.key_pair_mgr.get_key_pair(verkey)<assert_stmt>key_pair["verkey"]<eq>verkey<assert_stmt>key_pair["secret_key"]<eq>bytes_to_b58(self.test_secret)<assert_stmt>key_pair["metadata"]<eq>{}<assert_stmt>key_pair["key_type"]<eq>KeyType.ED25519.key_type<block_end><async_keyword><def_stmt>test_get_key_pair_x_not_found self<block_start><with_stmt>self.assertRaises(StorageNotFoundError)<block_start><await>self.key_pair_mgr.get_key_pair("not_existing_verkey")<block_end><block_end><async_keyword><def_stmt>test_delete_key_pair self<block_start><await>self.key_pair_mgr.store_key_pair(public_key=self.test_public_key secret_key=self.test_secret key_type=KeyType.ED25519 )<line_sep>verkey=bytes_to_b58(self.test_public_key)<line_sep>record=<await>self.store.find_record(KEY_PAIR_STORAGE_TYPE {"verkey":verkey})<assert_stmt>record<line_sep><await>self.key_pair_mgr.delete_key_pair(verkey)<line_sep># should be deleted now
<with_stmt>self.assertRaises(StorageNotFoundError)<block_start><await>self.key_pair_mgr.delete_key_pair(verkey)<block_end><block_end><async_keyword><def_stmt>test_delete_key_pair_x_not_found self<block_start><with_stmt>self.assertRaises(StorageNotFoundError)<block_start><await>self.key_pair_mgr.delete_key_pair("non_existing_verkey")<block_end><block_end><async_keyword><def_stmt>test_update_key_pair_metadata self<block_start><await>self.key_pair_mgr.store_key_pair(public_key=self.test_public_key secret_key=self.test_secret key_type=KeyType.ED25519 metadata={"some":"data"} )<line_sep>verkey=bytes_to_b58(self.test_public_key)<line_sep>record=<await>self.store.find_record(KEY_PAIR_STORAGE_TYPE {"verkey":verkey})<assert_stmt>record<line_sep>value=json.loads(record.value)<assert_stmt>value["metadata"]<eq>{"some":"data"}<line_sep><await>self.key_pair_mgr.update_key_pair_metadata(verkey {"some_other":"data"})<line_sep>record=<await>self.store.find_record(KEY_PAIR_STORAGE_TYPE {"verkey":verkey})<assert_stmt>record<line_sep>value=json.loads(record.value)<assert_stmt>value["metadata"]<eq>{"some_other":"data"}<block_end><async_keyword><def_stmt>test_update_key_pair_metadata_x_not_found self<block_start><with_stmt>self.assertRaises(StorageNotFoundError)<block_start><await>self.key_pair_mgr.update_key_pair_metadata("non_existing_verkey" {})<block_end><block_end><block_end> |
'''
Problem:-
Given a string s, find the longest palindromic substring in s.
You may assume that the maximum length of s is 1000.
Example 1:
Input: "babad"
Output: "bab"
Note: "aba" is also a valid answer.
'''<class_stmt>Solution<block_start><def_stmt>longestPalindrome self s:str<arrow>str<block_start>res=""<line_sep>resLen=0<for_stmt>i range(len(s))# odd length
<block_start>l,r=i i<while_stmt>l<ge>0<and>r<l>len(s)<and>s[l]<eq>s[r]<block_start><if_stmt>(r-l+1)<g>resLen<block_start>res=s[l:r+1]<line_sep>resLen=r-l+1<block_end>l<augsub>1<line_sep>r<augadd>1<block_end># even length
l,r=i i+1<while_stmt>l<ge>0<and>r<l>len(s)<and>s[l]<eq>s[r]<block_start><if_stmt>(r-l+1)<g>resLen<block_start>res=s[l:r+1]<line_sep>resLen=r-l+1<block_end>l<augsub>1<line_sep>r<augadd>1<block_end><block_end><return>res<block_end><block_end> |
<import_from_stmt>.save_views save_3d_views<import_from_stmt>.panels plot_panels<import_from_stmt>._default_params params_inflatedless_lateral_medial_ventral params_flatmap_lateral_medial params_occipital_triple_view params_inflated_dorsal_lateral_medial_ventral <line_sep>__all__=["save_3d_views" "plot_panels" "params_flatmap_lateral_medial" "params_occipital_triple_view" "params_inflatedless_lateral_medial_ventral" "params_inflated_dorsal_lateral_medial_ventral" ]<line_sep> |
<import_stmt>const<def_stmt>corpora2idx sents ind2idx<block_start><return>[[ind2idx[w]<if>w<in>ind2idx<else>const.UNK<for>w s]<for>s sents]<block_end> |
<import_from_stmt>flask Flask render_template session redirect url_for<line_sep>app=Flask(__name__)<line_sep>app.config['SECRET_KEY']='<PASSWORD>'<line_sep>@app.route('/')<def_stmt>index <block_start><return>render_template('index.html')<block_end>@app.route('/set-background/<mode>')<def_stmt>set_background mode<block_start>session['mode']=mode<line_sep><return>redirect(url_for('index'))<block_end>@app.route('/drop-session')<def_stmt>drop_session <block_start>session.pop('mode' <none>)<line_sep><return>redirect(url_for('index'))<block_end> |
# Copyright 2017-2019 typed_python Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>typed_python.compiler.type_wrappers.wrapper Wrapper<import_from_stmt>typed_python.compiler.typed_expression TypedExpression<import_from_stmt>typed_python._types refTo<import_stmt>typed_python.compiler.native_ast<as>native_ast<import_stmt>typed_python.compiler<line_sep>typeWrapper=<lambda>t:typed_python.compiler.python_object_representation.typedPythonTypeToTypeWrapper(t)<class_stmt>RefToObjectWrapper(Wrapper)<block_start>is_pod=<true><line_sep>is_empty=<false><line_sep>is_pass_by_ref=<false><def_stmt>__init__ self<block_start>super().__init__(refTo)<block_end><def_stmt>getNativeLayoutType self<block_start><return>native_ast.Type.Void()<block_end>@Wrapper.unwrapOneOfAndValue<def_stmt>convert_call self context expr args kwargs<block_start><if_stmt>len(args)<ne>1<or>kwargs<block_start><return>super().convert_call(context expr args kwargs)<block_end><return>args[0].expr_type.convert_refTo(context args[0])<block_end><block_end><class_stmt>RefToWrapper(Wrapper)<block_start>is_pod=<true><line_sep>is_empty=<false><line_sep>is_pass_by_ref=<false><def_stmt>__init__ self t<block_start>super().__init__(t)<line_sep>self.layoutType=typeWrapper(t.ElementType).getNativeLayoutType().pointer()<block_end><def_stmt>underlyingTypeWrapper self<block_start><return>typeWrapper(self.typeRepresentation.ElementType)<block_end><def_stmt>getNativeLayoutType self<block_start><return>self.layoutType<block_end><def_stmt>convert_assign self context target toStore<block_start><assert_stmt>target.isReference<line_sep>context.pushEffect(target.expr.store(toStore.nonref_expr))<block_end><def_stmt>convert_copy_initialize self context target toStore<block_start><assert_stmt>target.isReference<line_sep>context.pushEffect(target.expr.store(toStore.nonref_expr))<block_end><def_stmt>deref self instance<block_start><return>TypedExpression(instance.context instance.nonref_expr typeWrapper(self.typeRepresentation.ElementType) <true>)<block_end><def_stmt>convert_destroy self context instance<block_start><pass><block_end><def_stmt>_can_convert_to_type self targetType conversionLevel<block_start><return>self.underlyingTypeWrapper._can_convert_to_type(targetType conversionLevel)<block_end><def_stmt>convert_to_type_with_target self context instance targetVal conversionLevel mayThrowOnFailure=<false><block_start><return>self.deref(instance).convert_to_type_with_target(targetVal conversionLevel)<block_end><def_stmt>convert_bin_op self context left op right inplace<block_start><return>self.deref(left).convert_bin_op(op right inplace)<block_end><def_stmt>convert_unary_op self context left op<block_start><return>self.deref(left).convert_unary_op(op)<block_end><def_stmt>convert_attribute self context instance attr<block_start><return>self.deref(instance).convert_attribute(attr)<block_end><def_stmt>convert_getitem self context instance key<block_start><return>self.deref(instance).convert_getitem(key)<block_end><def_stmt>convert_setitem self context instance key val<block_start><return>self.deref(instance).convert_setitem(key val)<block_end><def_stmt>convert_method_call self context instance methodname args kwargs<block_start><return>self.deref(instance).convert_method_call(methodname args kwargs)<block_end><def_stmt>convert_set_attribute self context instance attribute value<block_start><return>self.deref(instance).convert_set_attribute(attribute value)<block_end><def_stmt>convert_hash self context expr<block_start><return>self.deref(expr).convert_hash()<block_end><def_stmt>convert_call self context expr args kwargs<block_start>self.deref(expr).convert_call(args kwargs)<block_end><def_stmt>convert_len self context expr<block_start>self.deref(expr).convert_len()<block_end><def_stmt>convert_abs self context expr<block_start>self.deref(expr).convert_abs()<block_end><def_stmt>convert_repr self context expr<block_start>self.deref(expr).convert_repr()<block_end><def_stmt>convert_builtin self f context expr a1=<none><block_start>self.deref(expr).convert_builtin(a1)<block_end><def_stmt>convert_comparison self context l op r<block_start>self.deref(l).convert_comparison(op r)<block_end><def_stmt>convert_bin_op_reverse self context r op l inplace<block_start>self.deref(r).convert_bin_op_reverse(op l inplace)<block_end><block_end> |
<import_stmt>os<import_stmt>salt.utils.platform<import_from_stmt>tests.support.mock patch<import_from_stmt>tests.support.unit TestCase skipIf<try_stmt><block_start><import_stmt>salt.utils.win_system<as>win_system<block_end><except_stmt>Exception<as>exc# pylint: disable=broad-except
<block_start>win_system=exc<block_end><class_stmt>WinSystemImportTestCase(TestCase)<block_start>"""
Simply importing should not raise an error, especially on Linux
"""<def_stmt>test_import self<block_start><if_stmt>isinstance(win_system Exception)<block_start><raise>Exception("Importing win_system caused traceback: {}".format(win_system))<block_end><block_end><block_end>@skipIf(<not>salt.utils.platform.is_windows() "Only test on Windows systems")<class_stmt>WinSystemTestCase(TestCase)<block_start>"""
Test cases for salt.utils.win_system
"""<def_stmt>test_get_computer_name self<block_start>"""
Should return the computer name
"""<with_stmt>patch("win32api.GetComputerNameEx" return_value="FAKENAME")<block_start>self.assertEqual(win_system.get_computer_name() "FAKENAME")<block_end><block_end><def_stmt>test_get_computer_name_fail self<block_start>"""
If it fails, it returns False
"""<with_stmt>patch("win32api.GetComputerNameEx" return_value=<none>)<block_start>self.assertFalse(win_system.get_computer_name())<block_end><block_end><def_stmt>test_get_pending_computer_name self<block_start>"""
Will return the pending computer name if one is pending
"""<line_sep>expected="PendingName"<line_sep>patch_value={"vdata":expected}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patch_value)<block_start>result=win_system.get_pending_computer_name()<line_sep>self.assertEqual(expected result)<block_end><block_end><def_stmt>test_get_pending_computer_name_none self<block_start>"""
Will return the None if the pending computer is the current name
"""<line_sep>patch_value={"vdata":os.environ.get("COMPUTERNAME")}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patch_value)<block_start>self.assertIsNone(win_system.get_pending_computer_name())<block_end><block_end><def_stmt>test_get_pending_computer_name_false self<block_start>"""
Will return False if there is no pending computer name
"""<with_stmt>patch("salt.utils.win_reg.read_value" return_value=<false>)<block_start>self.assertIsNone(win_system.get_pending_computer_name())<block_end><block_end><def_stmt>test_get_pending_component_servicing self<block_start>"""
If none of the keys exist, should return False
"""<with_stmt>patch("salt.utils.win_reg.key_exists" return_value=<false>)<block_start>self.assertFalse(win_system.get_pending_component_servicing())<block_end><block_end><def_stmt>test_get_pending_component_servicing_true_1 self<block_start>"""
If the RebootPending key exists, should return True
"""<with_stmt>patch("salt.utils.win_reg.key_exists" side_effect=[<true>])<block_start>self.assertTrue(win_system.get_pending_component_servicing())<block_end><block_end><def_stmt>test_get_pending_component_servicing_true_2 self<block_start>"""
If the RebootInProgress key exists, should return True
"""<with_stmt>patch("salt.utils.win_reg.key_exists" side_effect=[<false> <true>])<block_start>self.assertTrue(win_system.get_pending_component_servicing())<block_end><block_end><def_stmt>test_get_pending_component_servicing_true_3 self<block_start>"""
If the PackagesPending key exists, should return True
"""<with_stmt>patch("salt.utils.win_reg.key_exists" side_effect=[<false> <false> <true>])<block_start>self.assertTrue(win_system.get_pending_component_servicing())<block_end><block_end><def_stmt>test_get_pending_domain_join self<block_start>"""
If none of the keys exist, should return False
"""<with_stmt>patch("salt.utils.win_reg.key_exists" return_value=<false>)<block_start>self.assertFalse(win_system.get_pending_domain_join())<block_end><block_end><def_stmt>test_get_pending_domain_join_true_1 self<block_start>"""
If the AvoidSpnSet key exists, should return True
"""<with_stmt>patch("salt.utils.win_reg.key_exists" side_effect=[<true>])<block_start>self.assertTrue(win_system.get_pending_domain_join())<block_end><block_end><def_stmt>test_get_pending_domain_join_true_2 self<block_start>"""
If the JoinDomain key exists, should return True
"""<with_stmt>patch("salt.utils.win_reg.key_exists" side_effect=[<false> <true>])<block_start>self.assertTrue(win_system.get_pending_domain_join())<block_end><block_end><def_stmt>test_get_pending_file_rename_false_1 self<block_start>"""
If none of the value names exist, should return False
"""<line_sep>patched_return={"success":<false>}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patched_return)<block_start>self.assertFalse(win_system.get_pending_file_rename())<block_end><block_end><def_stmt>test_get_pending_file_rename_false_2 self<block_start>"""
If one of the value names exists but is not set, should return False
"""<line_sep>patched_return={"success":<true> "vdata":"(value not set)"}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patched_return)<block_start>self.assertFalse(win_system.get_pending_file_rename())<block_end><block_end><def_stmt>test_get_pending_file_rename_true_1 self<block_start>"""
If one of the value names exists and is set, should return True
"""<line_sep>patched_return={"success":<true> "vdata":"some value"}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patched_return)<block_start>self.assertTrue(win_system.get_pending_file_rename())<block_end><block_end><def_stmt>test_get_pending_servermanager_false_1 self<block_start>"""
If the CurrentRebootAttempts value name does not exist, should return
False
"""<line_sep>patched_return={"success":<false>}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patched_return)<block_start>self.assertFalse(win_system.get_pending_servermanager())<block_end><block_end><def_stmt>test_get_pending_servermanager_false_2 self<block_start>"""
If the CurrentRebootAttempts value name exists but is not an integer,
should return False
"""<line_sep>patched_return={"success":<true> "vdata":"(value not set)"}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patched_return)<block_start>self.assertFalse(win_system.get_pending_file_rename())<block_end><block_end><def_stmt>test_get_pending_servermanager_true self<block_start>"""
If the CurrentRebootAttempts value name exists and is an integer,
should return True
"""<line_sep>patched_return={"success":<true> "vdata":1}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patched_return)<block_start>self.assertTrue(win_system.get_pending_file_rename())<block_end><block_end><def_stmt>test_get_pending_dvd_reboot self<block_start>"""
If the DVDRebootSignal value name does not exist, should return False
"""<with_stmt>patch("salt.utils.win_reg.value_exists" return_value=<false>)<block_start>self.assertFalse(win_system.get_pending_dvd_reboot())<block_end><block_end><def_stmt>test_get_pending_dvd_reboot_true self<block_start>"""
If the DVDRebootSignal value name exists, should return True
"""<with_stmt>patch("salt.utils.win_reg.value_exists" return_value=<true>)<block_start>self.assertTrue(win_system.get_pending_dvd_reboot())<block_end><block_end><def_stmt>test_get_pending_update self<block_start>"""
If none of the keys exist and there are not subkeys, should return False
"""<with_stmt>patch("salt.utils.win_reg.key_exists" return_value=<false>) patch("salt.utils.win_reg.list_keys" return_value=[])<block_start>self.assertFalse(win_system.get_pending_update())<block_end><block_end><def_stmt>test_get_pending_update_true_1 self<block_start>"""
If the RebootRequired key exists, should return True
"""<with_stmt>patch("salt.utils.win_reg.key_exists" side_effect=[<true>])<block_start>self.assertTrue(win_system.get_pending_update())<block_end><block_end><def_stmt>test_get_pending_update_true_2 self<block_start>"""
If the PostRebootReporting key exists, should return True
"""<with_stmt>patch("salt.utils.win_reg.key_exists" side_effect=[<false> <true>])<block_start>self.assertTrue(win_system.get_pending_update())<block_end><block_end><def_stmt>test_get_reboot_required_witnessed_false_1 self<block_start>"""
The ``Reboot Required`` value name does not exist, should return False
"""<line_sep>patched_data={"vdata":<none>}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patched_data)<block_start>self.assertFalse(win_system.get_reboot_required_witnessed())<block_end><block_end><def_stmt>test_get_reboot_required_witnessed_false_2 self<block_start>"""
The ``Reboot required`` value name is set to 0, should return False
"""<line_sep>patched_data={"vdata":0}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patched_data)<block_start>self.assertFalse(win_system.get_reboot_required_witnessed())<block_end><block_end><def_stmt>test_get_reboot_required_witnessed_true self<block_start>"""
The ``Reboot required`` value name is set to 1, should return True
"""<line_sep>patched_data={"vdata":1}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patched_data)<block_start>self.assertTrue(win_system.get_reboot_required_witnessed())<block_end><block_end><def_stmt>test_set_reboot_required_witnessed self<block_start>"""
The call to ``set_value`` should return True and should be called with
the specified parameters
"""<with_stmt>patch("salt.utils.win_reg.set_value" return_value=<true>)<as>sv<block_start>self.assertTrue(win_system.set_reboot_required_witnessed())<line_sep>sv.assert_called_once_with(hive="HKLM" key=win_system.MINION_VOLATILE_KEY volatile=<true> vname=win_system.REBOOT_REQUIRED_NAME vdata=1 vtype="REG_DWORD" )<block_end><block_end><def_stmt>test_get_pending_update_exe_volatile_false_1 self<block_start>"""
If UpdateExeVolatile value name is 0, should return False
"""<line_sep>patched_data={"success":<true> "vdata":0}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patched_data)<block_start>self.assertFalse(win_system.get_pending_update_exe_volatile())<block_end><block_end><def_stmt>test_get_pending_update_exe_volatile_false_2 self<block_start>"""
If UpdateExeVolatile value name is not present, should return False
"""<line_sep>patched_data={"success":<false>}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patched_data)<block_start>self.assertFalse(win_system.get_pending_update_exe_volatile())<block_end><block_end><def_stmt>test_get_pending_update_exe_volatile_true_1 self<block_start>"""
If UpdateExeVolatile value name is not 0, should return True
"""<line_sep>patched_data={"success":<true> "vdata":1}<with_stmt>patch("salt.utils.win_reg.read_value" return_value=patched_data)<block_start>self.assertTrue(win_system.get_pending_update_exe_volatile())<block_end><block_end><def_stmt>test_get_pending_reboot self<block_start>"""
If all functions return Falsy data, should return False
"""<with_stmt>patch("salt.utils.win_system.get_pending_update" return_value=<false>) patch("salt.utils.win_update.needs_reboot" return_value=<false>) patch("salt.utils.win_system.get_pending_update_exe_volatile" return_value=<false>) patch("salt.utils.win_system.get_pending_file_rename" return_value=<false>) patch("salt.utils.win_system.get_pending_servermanager" return_value=<false>) patch("salt.utils.win_system.get_pending_component_servicing" return_value=<false>) patch("salt.utils.win_system.get_pending_dvd_reboot" return_value=<false>) patch("salt.utils.win_system.get_reboot_required_witnessed" return_value=<false>) patch("salt.utils.win_system.get_pending_computer_name" return_value=<none>) patch("salt.utils.win_system.get_pending_domain_join" return_value=<false>)<block_start>self.assertFalse(win_system.get_pending_reboot())<block_end><block_end><def_stmt>test_get_pending_reboot_true_1 self<block_start>"""
If any boolean returning functions return True, should return True
"""<with_stmt>patch("salt.utils.win_system.get_pending_update" return_value=<false>) patch("salt.utils.win_update.needs_reboot" return_value=<false>) patch("salt.utils.win_system.get_pending_update_exe_volatile" return_value=<false>) patch("salt.utils.win_system.get_pending_file_rename" return_value=<false>) patch("salt.utils.win_system.get_pending_servermanager" return_value=<false>) patch("salt.utils.win_system.get_pending_component_servicing" return_value=<false>) patch("salt.utils.win_system.get_pending_dvd_reboot" return_value=<false>) patch("salt.utils.win_system.get_reboot_required_witnessed" return_value=<false>) patch("salt.utils.win_system.get_pending_computer_name" return_value=<none>) patch("salt.utils.win_system.get_pending_domain_join" return_value=<true>)<block_start>self.assertTrue(win_system.get_pending_reboot())<block_end><block_end><def_stmt>test_get_pending_reboot_true_2 self<block_start>"""
If a computer name is returned, should return True
"""<with_stmt>patch("salt.utils.win_system.get_pending_update" return_value=<false>) patch("salt.utils.win_update.needs_reboot" return_value=<false>) patch("salt.utils.win_system.get_pending_update_exe_volatile" return_value=<false>) patch("salt.utils.win_system.get_pending_file_rename" return_value=<false>) patch("salt.utils.win_system.get_pending_servermanager" return_value=<false>) patch("salt.utils.win_system.get_pending_component_servicing" return_value=<false>) patch("salt.utils.win_system.get_pending_dvd_reboot" return_value=<false>) patch("salt.utils.win_system.get_reboot_required_witnessed" return_value=<false>) patch("salt.utils.win_system.get_pending_computer_name" return_value="pending name" )<block_start>self.assertTrue(win_system.get_pending_reboot())<block_end><block_end><def_stmt>test_get_pending_reboot_details self<block_start>"""
All items False should return a dictionary with all items False
"""<with_stmt>patch("salt.utils.win_system.get_pending_update" return_value=<false>) patch("salt.utils.win_update.needs_reboot" return_value=<false>) patch("salt.utils.win_system.get_pending_update_exe_volatile" return_value=<false>) patch("salt.utils.win_system.get_pending_file_rename" return_value=<false>) patch("salt.utils.win_system.get_pending_servermanager" return_value=<false>) patch("salt.utils.win_system.get_pending_component_servicing" return_value=<false>) patch("salt.utils.win_system.get_pending_dvd_reboot" return_value=<false>) patch("salt.utils.win_system.get_reboot_required_witnessed" return_value=<false>) patch("salt.utils.win_system.get_pending_computer_name" return_value=<none>) patch("salt.utils.win_system.get_pending_domain_join" return_value=<false>)<block_start>expected={"Pending Component Servicing":<false> "Pending Computer Rename":<false> "Pending DVD Reboot":<false> "Pending File Rename":<false> "Pending Join Domain":<false> "Pending ServerManager":<false> "Pending Update":<false> "Pending Windows Update":<false> "Reboot Required Witnessed":<false> "Volatile Update Exe":<false> }<line_sep>result=win_system.get_pending_reboot_details()<line_sep>self.assertDictEqual(expected result)<block_end><block_end><def_stmt>test_get_pending_reboot_details_true self<block_start>"""
All items True should return a dictionary with all items True
"""<with_stmt>patch("salt.utils.win_system.get_pending_update" return_value=<true>) patch("salt.utils.win_update.needs_reboot" return_value=<true>) patch("salt.utils.win_system.get_pending_update_exe_volatile" return_value=<true>) patch("salt.utils.win_system.get_pending_file_rename" return_value=<true>) patch("salt.utils.win_system.get_pending_servermanager" return_value=<true>) patch("salt.utils.win_system.get_pending_component_servicing" return_value=<true>) patch("salt.utils.win_system.get_pending_dvd_reboot" return_value=<true>) patch("salt.utils.win_system.get_reboot_required_witnessed" return_value=<true>) patch("salt.utils.win_system.get_pending_computer_name" return_value="pending name" ) patch("salt.utils.win_system.get_pending_domain_join" return_value=<true>)<block_start>expected={"Pending Component Servicing":<true> "Pending Computer Rename":<true> "Pending DVD Reboot":<true> "Pending File Rename":<true> "Pending Join Domain":<true> "Pending ServerManager":<true> "Pending Update":<true> "Pending Windows Update":<true> "Reboot Required Witnessed":<true> "Volatile Update Exe":<true> }<line_sep>result=win_system.get_pending_reboot_details()<line_sep>self.assertDictEqual(expected result)<block_end><block_end><block_end> |
expected_output={"ospf-statistics-information":{"ospf-statistics":{"dbds-retransmit":"203656" "dbds-retransmit-5seconds":"0" "flood-queue-depth":"0" "lsas-acknowledged":"225554974" "lsas-acknowledged-5seconds":"0" "lsas-flooded":"66582263" "lsas-flooded-5seconds":"0" "lsas-high-prio-flooded":"375568998" "lsas-high-prio-flooded-5seconds":"0" "lsas-nbr-transmit":"3423982" "lsas-nbr-transmit-5seconds":"0" "lsas-requested":"3517" "lsas-requested-5seconds":"0" "lsas-retransmit":"8064643" "lsas-retransmit-5seconds":"0" "ospf-errors":{"subnet-mismatch-error":"12"} "packet-statistics":[{"ospf-packet-type":"Hello" "packets-received":"5703920" "packets-received-5seconds":"3" "packets-sent":"6202169" "packets-sent-5seconds":"0"} {"ospf-packet-type":"DbD" "packets-received":"185459" "packets-received-5seconds":"0" "packets-sent":"212983" "packets-sent-5seconds":"0"} {"ospf-packet-type":"LSReq" "packets-received":"208" "packets-received-5seconds":"0" "packets-sent":"214" "packets-sent-5seconds":"0"} {"ospf-packet-type":"LSUpdate" "packets-received":"16742100" "packets-received-5seconds":"0" "packets-sent":"15671465" "packets-sent-5seconds":"0"} {"ospf-packet-type":"LSAck" "packets-received":"2964236" "packets-received-5seconds":"0" "packets-sent":"5229203" "packets-sent-5seconds":"0"}] "total-database-summaries":"0" "total-linkstate-request":"0" "total-retransmits":"0"}}}<line_sep> |
<import_stmt>numpy<as>np<import_stmt>itertools<import_from_stmt>.contrib compress_filter smooth residual_model<import_from_stmt>.contrib reduce_interferences<def_stmt>expectation_maximization y x iterations=2 verbose=0 eps=<none><block_start>r"""Expectation maximization algorithm, for refining source separation
estimates.
This algorithm allows to make source separation results better by
enforcing multichannel consistency for the estimates. This usually means
a better perceptual quality in terms of spatial artifacts.
The implementation follows the details presented in [1]_, taking
inspiration from the original EM algorithm proposed in [2]_ and its
weighted refinement proposed in [3]_, [4]_.
It works by iteratively:
* Re-estimate source parameters (power spectral densities and spatial
covariance matrices) through :func:`get_local_gaussian_model`.
* Separate again the mixture with the new parameters by first computing
the new modelled mixture covariance matrices with :func:`get_mix_model`,
prepare the Wiener filters through :func:`wiener_gain` and apply them
with :func:`apply_filter``.
References
----------
.. [1] <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME>, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] <NAME> and <NAME> and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
.. [3] <NAME> and <NAME> and <NAME>. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [4] <NAME> and <NAME> and <NAME>. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [5] <NAME> and <NAME> and <NAME> "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Parameters
----------
y: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
initial estimates for the sources
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
complex STFT of the mixture signal
iterations: int [scalar]
number of iterations for the EM algorithm.
verbose: boolean
display some information if True
eps: float or None [scalar]
The epsilon value to use for regularization and filters.
If None, the default will use the epsilon of np.real(x) dtype.
Returns
-------
y: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
estimated sources after iterations
v: np.ndarray [shape=(nb_frames, nb_bins, nb_sources)]
estimated power spectral densities
R: np.ndarray [shape=(nb_bins, nb_channels, nb_channels, nb_sources)]
estimated spatial covariance matrices
Note
-----
* You need an initial estimate for the sources to apply this
algorithm. This is precisely what the :func:`wiener` function does.
* This algorithm *is not* an implementation of the "exact" EM
proposed in [1]_. In particular, it does compute the posterior
covariance matrices the same (exact) way. Instead, it uses the
simplified approximate scheme initially proposed in [5]_ and further
refined in [3]_, [4]_, that boils down to just take the empirical
covariance of the recent source estimates, followed by a weighted
average for the update of the spatial covariance matrix. It has been
empirically demonstrated that this simplified algorithm is more
robust for music separation.
Warning
-------
It is *very* important to make sure `x.dtype` is `np.complex`
if you want double precision, because this function will **not**
do such conversion for you from `np.complex64`, in case you want the
smaller RAM usage on purpose.
It is usually always better in terms of quality to have double
precision, by e.g. calling :func:`expectation_maximization`
with ``x.astype(np.complex)``.
This is notably needed if you let common deep learning frameworks like
PyTorch or TensorFlow do the STFT, because this usually happens in
single precision.
"""<line_sep># to avoid dividing by zero
<if_stmt>eps<is><none><block_start>eps=np.finfo(np.real(x[0]).dtype).eps<block_end># dimensions
(nb_frames nb_bins nb_channels)=x.shape<line_sep>nb_sources=y.shape[-1]<line_sep># allocate the spatial covariance matrices and PSD
R=np.zeros((nb_bins nb_channels nb_channels nb_sources) x.dtype)<line_sep>v=np.zeros((nb_frames nb_bins nb_sources))<if_stmt>verbose<block_start>print('Number of iterations: ' iterations)<block_end>regularization=np.sqrt(eps)<times>(np.tile(np.eye(nb_channels dtype=np.complex64) (1 nb_bins 1 1)))<for_stmt>it range(iterations)# constructing the mixture covariance matrix. Doing it with a loop
# to avoid storing anytime in RAM the whole 6D tensor
<block_start><if_stmt>verbose<block_start>print('EM, iteration %d'%(it+1))<block_end><for_stmt>j range(nb_sources)# update the spectrogram model for source j
<block_start>v[<ellipsis> j],R[<ellipsis> j]=get_local_gaussian_model(y[<ellipsis> j] eps)<block_end><for_stmt>t range(nb_frames)<block_start>Cxx=get_mix_model(v[<none> t <ellipsis>] R)<line_sep>Cxx<augadd>regularization<line_sep>inv_Cxx=_invert(Cxx eps)<line_sep># separate the sources
<for_stmt>j range(nb_sources)<block_start>W_j=wiener_gain(v[<none> t <ellipsis> j] R[<ellipsis> j] inv_Cxx)<line_sep>y[t <ellipsis> j]=apply_filter(x[<none> t <ellipsis>] W_j)[0]<block_end><block_end><block_end><return>y v R<block_end><def_stmt>wiener v x iterations=1 use_softmask=<true> eps=<none><block_start>"""Wiener-based separation for multichannel audio.
The method uses the (possibly multichannel) spectrograms `v` of the
sources to separate the (complex) Short Term Fourier Transform `x` of the
mix. Separation is done in a sequential way by:
* Getting an initial estimate. This can be done in two ways: either by
directly using the spectrograms with the mixture phase, or
by using :func:`softmask`.
* Refinining these initial estimates through a call to
:func:`expectation_maximization`.
This implementation also allows to specify the epsilon value used for
regularization. It is based on [1]_, [2]_, [3]_, [4]_.
References
----------
.. [1] <NAME> and <NAME> and <NAME> and <NAME> and <NAME> and
<NAME> and <NAME>, "Improving music source separation based
on deep neural networks through data augmentation and network
blending." 2017 IEEE International Conference on Acoustics, Speech
and Signal Processing (ICASSP). IEEE, 2017.
.. [2] <NAME> and <NAME> and <NAME>. "Multichannel audio source
separation with deep neural networks." IEEE/ACM Transactions on Audio,
Speech, and Language Processing 24.9 (2016): 1652-1664.
.. [3] <NAME> and <NAME> and <NAME>. "Multichannel music
separation with deep neural networks." 2016 24th European Signal
Processing Conference (EUSIPCO). IEEE, 2016.
.. [4] <NAME> and <NAME> and <NAME> "Kernel additive models for
source separation." IEEE Transactions on Signal Processing
62.16 (2014): 4298-4310.
Parameters
----------
v: np.ndarray [shape=(nb_frames, nb_bins, {1,nb_channels}, nb_sources)]
spectrograms of the sources. This is a nonnegative tensor that is
usually the output of the actual separation method of the user. The
spectrograms may be mono, but they need to be 4-dimensional in all
cases.
x: np.ndarray [complex, shape=(nb_frames, nb_bins, nb_channels)]
STFT of the mixture signal.
iterations: int [scalar]
number of iterations for the EM algorithm
use_softmask: boolean
* if `False`, then the mixture phase will directly be used with the
spectrogram as initial estimates.
* if `True`, a softmasking strategy will be used as described in
:func:`softmask`.
eps: {None, float}
Epsilon value to use for computing the separations. This is used
whenever division with a model energy is performed, i.e. when
softmasking and when iterating the EM.
It can be understood as the energy of the additional white noise
that is taken out when separating.
If `None`, the default value is taken as `np.finfo(np.real(x[0])).eps`.
Returns
-------
y: np.ndarray
[complex, shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
STFT of estimated sources
Note
----
* Be careful that you need *magnitude spectrogram estimates* for the
case `softmask==False`.
* We recommand to use `softmask=False` only if your spectrogram model is
pretty good, e.g. when the output of a deep neural net. In the case
it is not so great, opt for an initial softmasking strategy.
* The epsilon value will have a huge impact on performance. If it's large,
only the parts of the signal with a significant energy will be kept in
the sources. This epsilon then directly controls the energy of the
reconstruction error.
Warning
-------
As in :func:`expectation_maximization`, we recommend converting the
mixture `x` to double precision `np.complex` *before* calling
:func:`wiener`.
"""<if_stmt>use_softmask<block_start>y=softmask(v x eps=eps)<block_end><else_stmt><block_start>y=v<times>np.exp(1j<times>np.angle(x[<ellipsis> <none>]))<block_end><if_stmt><not>iterations<block_start><return>y<block_end># we need to refine the estimates. Scales down the estimates for
# numerical stability
max_abs=max(1 np.abs(x).max()/10.)<line_sep>x<augdiv>max_abs<line_sep>y=expectation_maximization(y/max_abs x iterations eps=eps)[0]<line_sep><return>y<times>max_abs<block_end><def_stmt>softmask v x logit=<none> eps=<none><block_start>"""Separates a mixture with a ratio mask, using the provided sources
spectrograms estimates. Additionally allows compressing the mask with
a logit function for soft binarization.
The filter does *not* take multichannel correlations into account.
The masking strategy can be traced back to the work of <NAME> in the
case of *power* spectrograms [1]_. In the case of *fractional* spectrograms
like magnitude, this filter is often referred to a "ratio mask", and
has been shown to be the optimal separation procedure under alpha-stable
assumptions [2]_.
References
----------
.. [1] <NAME>,"Extrapolation, Inerpolation, and Smoothing of Stationary
Time Series." 1949.
.. [2] <NAME> and <NAME>. "Generalized Wiener filtering with
fractional power spectrograms." 2015 IEEE International Conference on
Acoustics, Speech and Signal Processing (ICASSP). IEEE, 2015.
Parameters
----------
v: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]
spectrograms of the sources
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
mixture signal
logit: {None, float between 0 and 1}
enable a compression of the filter. If not None, it is the threshold
value for the logit function: a softmask above this threshold is
brought closer to 1, and a softmask below is brought closer to 0.
Returns
-------
ndarray, shape=(nb_frames, nb_bins, nb_channels, nb_sources)
estimated sources
"""<line_sep># to avoid dividing by zero
<if_stmt>eps<is><none><block_start>eps=np.finfo(np.real(x[0]).dtype).eps<block_end>total_energy=np.sum(v axis=-1 keepdims=<true>)<line_sep>filter=v/(eps+total_energy.astype(x.dtype))<if_stmt>logit<is><not><none><block_start>filter=compress_filter(filter eps thresh=logit multichannel=<false>)<block_end><return>filter<times>x[<ellipsis> <none>]<block_end><def_stmt>_invert M eps<block_start>"""
Invert matrices, with special fast handling of the 1x1 and 2x2 cases.
Will generate errors if the matrices are singular: user must handle this
through his own regularization schemes.
Parameters
----------
M: np.ndarray [shape=(..., nb_channels, nb_channels)]
matrices to invert: must be square along the last two dimensions
eps: [scalar]
regularization parameter to use _only in the case of matrices
bigger than 2x2
Returns
-------
invM: np.ndarray, [shape=M.shape]
inverses of M
"""<line_sep>nb_channels=M.shape[-1]<if_stmt>nb_channels<eq>1# scalar case
<block_start>invM=1.0/(M+eps)<block_end><elif_stmt>nb_channels<eq>2# two channels case: analytical expression
<block_start>det=(M[<ellipsis> 0 0]<times>M[<ellipsis> 1 1]-M[<ellipsis> 0 1]<times>M[<ellipsis> 1 0])<line_sep>invDet=1.0/(det)<line_sep>invM=np.empty_like(M)<line_sep>invM[<ellipsis> 0 0]=invDet<times>M[<ellipsis> 1 1]<line_sep>invM[<ellipsis> 1 0]=-invDet<times>M[<ellipsis> 1 0]<line_sep>invM[<ellipsis> 0 1]=-invDet<times>M[<ellipsis> 0 1]<line_sep>invM[<ellipsis> 1 1]=invDet<times>M[<ellipsis> 0 0]<block_end><else_stmt># general case : no use of analytical expression (slow!)
<block_start>invM=np.linalg.pinv(M eps)<block_end><return>invM<block_end><def_stmt>wiener_gain v_j R_j inv_Cxx<block_start>"""
Compute the wiener gain for separating one source, given all parameters.
It is the matrix applied to the mix to get the posterior mean of the source
as in [1]_
References
----------
.. [1] <NAME> and <NAME> and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
Parameters
----------
v_j: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
power spectral density of the target source.
R_j: np.ndarray [shape=(nb_bins, nb_channels, nb_channels)]
spatial covariance matrix of the target source
inv_Cxx: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
inverse of the mixture covariance matrices
Returns
-------
G: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
wiener filtering matrices, to apply to the mix, e.g. through
:func:`apply_filter` to get the target source estimate.
"""<line_sep>(_ nb_channels)=R_j.shape[:2]<line_sep># computes multichannel Wiener gain as v_j R_j inv_Cxx
G=np.zeros_like(inv_Cxx)<for_stmt>(i1 i2 i3) itertools.product(*(range(nb_channels) )<times>3)<block_start>G[<ellipsis> i1 i2]<augadd>(R_j[<none> : i1 i3]<times>inv_Cxx[<ellipsis> i3 i2])<block_end>G<augmul>v_j[<ellipsis> <none> <none>]<line_sep><return>G<block_end><def_stmt>apply_filter x W<block_start>"""
Applies a filter on the mixture. Just corresponds to a matrix
multiplication.
Parameters
----------
x: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
STFT of the signal on which to apply the filter.
W: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
filtering matrices, as returned, e.g. by :func:`wiener_gain`
Returns
-------
y_hat: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
filtered signal
"""<line_sep>nb_channels=W.shape[-1]<line_sep># apply the filter
y_hat=0+0j<for_stmt>i range(nb_channels)<block_start>y_hat<augadd>W[<ellipsis> i]<times>x[<ellipsis> i <none>]<block_end><return>y_hat<block_end><def_stmt>get_mix_model v R<block_start>"""
Compute the model covariance of a mixture based on local Gaussian models.
simply adds up all the v[..., j] * R[..., j]
Parameters
----------
v: np.ndarray [shape=(nb_frames, nb_bins, nb_sources)]
Power spectral densities for the sources
R: np.ndarray [shape=(nb_bins, nb_channels, nb_channels, nb_sources)]
Spatial covariance matrices of each sources
Returns
-------
Cxx: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
Covariance matrix for the mixture
"""<line_sep>nb_channels=R.shape[1]<line_sep>(nb_frames nb_bins nb_sources)=v.shape<line_sep>Cxx=np.zeros((nb_frames nb_bins nb_channels nb_channels) R.dtype)<for_stmt>j range(nb_sources)<block_start>Cxx<augadd>v[<ellipsis> j <none> <none>]<times>R[<none> <ellipsis> j]<block_end><return>Cxx<block_end><def_stmt>_covariance y_j<block_start>"""
Compute the empirical covariance for a source.
Parameters
----------
y_j: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)].
complex stft of the source.
Returns
-------
Cj: np.ndarray [shape=(nb_frames, nb_bins, nb_channels, nb_channels)]
just y_j * conj(y_j.T): empirical covariance for each TF bin.
"""<line_sep>(nb_frames nb_bins nb_channels)=y_j.shape<line_sep>Cj=np.zeros((nb_frames nb_bins nb_channels nb_channels) y_j.dtype)<for_stmt>(i1 i2) itertools.product(*(range(nb_channels) )<times>2)<block_start>Cj[<ellipsis> i1 i2]<augadd>y_j[<ellipsis> i1]<times>np.conj(y_j[<ellipsis> i2])<block_end><return>Cj<block_end><def_stmt>get_local_gaussian_model y_j eps=1.<block_start>r"""
Compute the local Gaussian model [1]_ for a source given the complex STFT.
First get the power spectral densities, and then the spatial covariance
matrix, as done in [1]_, [2]_
References
----------
.. [1] <NAME> and <NAME> and R.Gribonval. "Under-determined
reverberant audio source separation using a full-rank spatial
covariance model." IEEE Transactions on Audio, Speech, and Language
Processing 18.7 (2010): 1830-1840.
.. [2] <NAME> and <NAME> and <NAME>. "Low bitrate informed
source separation of realistic mixtures." 2013 IEEE International
Conference on Acoustics, Speech and Signal Processing. IEEE, 2013.
Parameters
----------
y_j: np.ndarray [shape=(nb_frames, nb_bins, nb_channels)]
complex stft of the source.
eps: float [scalar]
regularization term
Returns
-------
v_j: np.ndarray [shape=(nb_frames, nb_bins)]
power spectral density of the source
R_J: np.ndarray [shape=(nb_bins, nb_channels, nb_channels)]
Spatial covariance matrix of the source
"""<line_sep>v_j=np.mean(np.abs(y_j)<power>2 axis=2)<line_sep># updates the spatial covariance matrix
nb_frames=y_j.shape[0]<line_sep>R_j=0<line_sep>weight=eps<for_stmt>t range(nb_frames)<block_start>R_j<augadd>_covariance(y_j[<none> t <ellipsis>])<line_sep>weight<augadd>v_j[<none> t <ellipsis>]<block_end>R_j<augdiv>weight[<ellipsis> <none> <none>]<line_sep><return>v_j R_j<block_end> |
<import_from_stmt>eblib libcollect<line_sep># Create a LibCollect object
lc=libcollect.LibCollect()<line_sep># Prepare arguments for do_collect
#
# Path to the script (can be absolute or relative)
scriptname='plotting_data_monitor.pyw'<line_sep># Ask the resulting distribution to be placed in
# directory distrib
targetdir='distrib'<line_sep># Specify which libraries to exclude from the
# distribution (because you know they're installed
# on the target machine)
excludes=["PyQt4" "numpy" "serial" "pywin" "win32api" "win32com"]<line_sep># This does the actual work
# See the documentation of LibCollect for more options
#
lc.do_collect(scriptname targetdir excludes verbose=<true>)<line_sep> |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>datetime datetime<import_from_stmt>qf_lib.common.utils.dateutils.relative_delta RelativeDelta<class_stmt>RegularDateTimeRule(object)<block_start>"""
RegularDateTimeRule is a helper class for TimeEvents. It has a convenience method for calculating
next trigger time for events which occur on certain date/time on regular basis (e.g. each day at 9:30,
each first day of a month, etc.).
"""<def_stmt>__init__ self year:int=<none> month:int=<none> day:int=<none> weekday:int=<none> hour:int=<none> minute:int=<none> second:int=<none> microsecond:int=<none><block_start>self.trigger_time=RelativeDelta(year=year month=month day=day weekday=weekday hour=hour minute=minute second=second microsecond=microsecond)<block_end><def_stmt>next_trigger_time self now:datetime<arrow>datetime<block_start>next_trigger_time=now+self.trigger_time<line_sep># check if next_trigger_time is in the past and if it is, it needs to be adjusted so that it's in the future
<if_stmt>next_trigger_time<le>now<block_start>next_trigger_time=self._get_next_trigger_time_after(next_trigger_time)<block_end><return>next_trigger_time<block_end><def_stmt>_get_next_trigger_time_after self start_time:datetime# calculate proper adjustment (time shift):
# if the month is important for the trigger time, than we should go to the next year
# for getting the next occurrence, if it is unimportant but day is important,
# then we should go to the next month etc.
<block_start>time_adjustment=<none><if_stmt>self.trigger_time.year<is><not><none># nothing can be done if the year is important. No way of getting next occurrence (there will never be
# the same year again)
<block_start><raise>ArithmeticError("Cannot get next occurrence of the event with `year` specified "<concat>"(there will never be the same year again).")<block_end><elif_stmt>self.trigger_time.month<is><not><none><block_start>time_adjustment=RelativeDelta(years=1)<block_end><elif_stmt>self.trigger_time.day<is><not><none><block_start>time_adjustment=RelativeDelta(months=1)<block_end><elif_stmt>self.trigger_time.weekday<is><not><none><block_start>time_adjustment=RelativeDelta(weeks=1)<block_end><elif_stmt>self.trigger_time.hour<is><not><none><block_start>time_adjustment=RelativeDelta(days=1)<block_end><elif_stmt>self.trigger_time.minute<is><not><none><block_start>time_adjustment=RelativeDelta(hours=1)<block_end><elif_stmt>self.trigger_time.second<is><not><none><block_start>time_adjustment=RelativeDelta(minutes=1)<block_end><elif_stmt>self.trigger_time.microsecond<is><not><none><block_start>time_adjustment=RelativeDelta(seconds=1)<block_end>next_trigger_time=start_time+time_adjustment<line_sep><return>next_trigger_time<block_end><block_end> |
<import_stmt>utils<import_stmt>functions<as>func<import_from_stmt>commands.base Cmd<line_sep>help_text=[[("Usage:" "<PREFIX><COMMAND>\n"<concat>"<PREFIX><COMMAND> `N`") ("Description:" "Use when already in a channel - Limit the number of users allowed in your channel to either the current "<concat>"number of users, or the specified number.\n\n"<concat>"Use *<PREFIX>un<COMMAND>* to remove the limit.") ("Example:" "<PREFIX><COMMAND> 4") ]]<async_keyword><def_stmt>execute ctx params<block_start>params_str=' '.join(params)<line_sep>guild=ctx['guild']<line_sep>settings=ctx['settings']<line_sep>limit=utils.strip_quotes(params_str)<line_sep>author=ctx['message'].author<line_sep>vc=ctx['voice_channel']<if_stmt>limit<block_start><try_stmt><block_start>limit=abs(int(limit))<block_end><except_stmt>ValueError<block_start><return><false> "`{}` is not a number.".format(limit)<block_end><block_end><else_stmt><block_start>limit=len(vc.members)<block_end><if_stmt>limit<g>99<block_start><return><false> "The user limit cannot be higher than 99."<block_end><await>vc.edit(user_limit=limit)<if_stmt>limit<ne>0<block_start>log_msg="👪 {} (`{}`) set the user limit of \"**{}**\" (`{}`) to {}".format(func.user_hash(author) author.id func.esc_md(vc.name) vc.id limit)<block_end><else_stmt><block_start>log_msg="👨👩👧👦 {} (`{}`) removed the user limit of \"**{}**\" (`{}`)".format(func.user_hash(author) author.id func.esc_md(vc.name) vc.id)<block_end><await>func.server_log(guild log_msg 2 settings)<line_sep><return><true> <none><block_end>command=Cmd(execute=execute help_text=help_text params_required=0 admin_required=<false> voice_required=<true> creator_only=<true> )<line_sep> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db models migrations<import_stmt>django.db.models.deletion<import_from_stmt>django.conf settings<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[migrations.swappable_dependency(settings.AUTH_USER_MODEL) ('notes' '0004_auto_20151022_1517') ]<line_sep>operations=[migrations.CreateModel(name='Notebook' fields=[('id' models.AutoField(verbose_name='ID' serialize=<false> auto_created=<true> primary_key=<true>)) ('title' models.CharField(max_length=255)) ('user' models.ForeignKey(to=settings.AUTH_USER_MODEL)) ] options={'ordering':['title'] } bases=(models.Model ) ) migrations.AddField(model_name='note' name='notebook' field=models.ForeignKey(on_delete=django.db.models.deletion.SET_NULL blank=<true> to='notes.Notebook' null=<true>) preserve_default=<true> ) ]<block_end> |
<import_from_stmt>flask Blueprint Markup<import_from_stmt>flask render_template<class_stmt>Youku(object)<block_start>"""Flask-Youku extents."""<def_stmt>__init__ self app=<none> **kwargs<block_start>"""Init Flask-Youku's instance via app object"""<if_stmt>app<block_start>self.init_app(app)<block_end><block_end><def_stmt>init_app self app<block_start>"""Init Flask-Youku's instance via app object"""<line_sep>self.register_blueprint(app)<line_sep># Create the Jinja function `youku`
app.add_template_global(youku)<block_end><def_stmt>register_blueprint self app<block_start>"""Register the youku blueprint into app object."""<line_sep>module=Blueprint('youku' __name__ template_folder='templates')<line_sep>app.register_blueprint(module)<line_sep><return>module<block_end><block_end><class_stmt>Video(object)<block_start>"""Receive the youku_id to rendering the video.html"""<def_stmt>__init__ self video_id cls='youku'<block_start>self.video_id=video_id<line_sep>self.cls=cls<block_end><def_stmt>render self *args **kwargs<block_start><return>render_template(*args **kwargs)<block_end>@property<def_stmt>html self<block_start>"""Tag the HTML as security string."""<line_sep><return>Markup(self.render('youku/video.html' video=self))<block_end><block_end><def_stmt>youku *args **kwargs<block_start>"""Define the Jinja function."""<line_sep>video=Video(*args **kwargs)<line_sep><return>video.html<block_end> |
""" GLSL shader generation """<import_from_stmt>utils Stages getHeader getShader getMacro genFnCall fsl_assert get_whitespace<import_from_stmt>utils isArray getArrayLen getArrayBaseName getMacroName DescriptorSets is_groupshared_decl<import_stmt>os sys importlib re<import_from_stmt>shutil copyfile<def_stmt>pssl fsl dst rootSignature=<none><block_start><return>d3d(fsl dst pssl=<true> d3d12=<false> rootSignature=rootSignature)<block_end><def_stmt>prospero fsl dst<block_start><return>d3d(fsl dst pssl=<true> prospero=<true>)<block_end><def_stmt>xbox fsl dst rootSignature=<none><block_start><return>d3d(fsl dst xbox=<true> d3d12=<true> rootSignature=rootSignature)<block_end><def_stmt>d3d12 fsl dst<block_start><return>d3d(fsl dst d3d12=<true>)<block_end><def_stmt>scarlett fsl dst rootSignature=<none><block_start><return>xbox(fsl dst rootSignature)<block_end><def_stmt>d3d fsl dst pssl=<false> prospero=<false> xbox=<false> rootSignature=<none> d3d12=<false><block_start>shader=getShader(fsl dst)<line_sep>shader_src=getHeader(fsl)<if_stmt><not>(d3d12<or>pssl<or>xbox)<block_start>shader_src<augadd>['#define DIRECT3D11\n']<block_end><if_stmt>prospero<block_start><import_stmt>prospero<line_sep>pssl=prospero<line_sep>shader_src<augadd>['#define PROSPERO\n']<line_sep>shader_src<augadd>prospero.preamble()<block_end><elif_stmt>pssl<block_start><import_stmt>orbis<line_sep>pssl=orbis<line_sep>shader_src<augadd>['#define ORBIS\n']<line_sep>shader_src<augadd>orbis.preamble()<block_end><if_stmt>xbox<block_start><import_stmt>xbox<line_sep>shader_src<augadd>['#define XBOX\n']<line_sep>shader_src<augadd>xbox.preamble()<block_end><if_stmt>d3d12<block_start>shader_src<augadd>['#define DIRECT3D12\n']<block_end>shader_src<augadd>['#define STAGE_' shader.stage.name '\n']<if_stmt>shader.enable_waveops<block_start>shader_src<augadd>['#define ENABLE_WAVEOPS()\n']<block_end># directly embed d3d header in shader
header_path=os.path.join(os.path.dirname(os.path.dirname(__file__)) 'includes' 'd3d.h')<line_sep>header_lines=open(header_path).readlines()<line_sep>shader_src<augadd>header_lines+['\n']<line_sep>nonuniformresourceindex=<none><line_sep># tesselation
pcf_returnType=<none><line_sep># for SV_PrimitiveID usage in pixel shaders, generate a pass-through gs
passthrough_gs=<false><if_stmt>pssl<and>shader.stage<eq>Stages.FRAG<block_start><for_stmt>dtype,dvar shader.flat_args<block_start><if_stmt>getMacroName(dtype).upper()<eq>'SV_PRIMITIVEID'<block_start>passthrough_gs=<true><if_stmt>prospero<block_start>prospero.gen_passthrough_gs(shader dst.replace('frag' 'geom'))<block_end><else_stmt><block_start>orbis.gen_passthrough_gs(shader dst.replace('frag' 'geom'))<block_end><block_end><block_end><block_end>last_res_decl=0<line_sep>explicit_res_decl=<none><line_sep>srt_resources={descriptor_set.name:[]<for>descriptor_set DescriptorSets}<line_sep>srt_free_resources=[]<line_sep>srt_references=[]<line_sep>defineLoc=len(shader_src)<line_sep>parsing_struct=<none><line_sep>skip_semantics=<false><line_sep>struct_elements=[]<line_sep>srt_redirections=set()<for_stmt>line shader.lines<block_start><def_stmt>get_uid name<block_start><return>name+'_'+str(len(shader_src))<block_end># dont process commented lines
<if_stmt>line.strip().startswith('//')<block_start>shader_src<augadd>[line]<line_sep><continue><block_end><if_stmt>is_groupshared_decl(line)<block_start>dtype,dname=getMacro(line)<line_sep>basename=getArrayBaseName(dname)<line_sep>shader_src<augadd>['#define srt_'+basename+' '+basename+'\n']<if_stmt><not>pssl<block_start>line='groupshared '+dtype+' '+dname+';\n'<block_end><else_stmt><block_start>line='thread_group_memory '+dtype+' '+dname+';\n'<block_end><block_end><if_stmt>'DECLARE_RESOURCES'<in>line<block_start>explicit_res_decl=len(shader_src)+1<line_sep>line='//'+line<block_end><if_stmt>line.strip().startswith('STRUCT(')<or>line.strip().startswith('CBUFFER(')<or>line.strip().startswith('PUSH_CONSTANT(')<block_start>parsing_struct=getMacro(line)<line_sep>struct_name=parsing_struct[0]<line_sep>struct_elements=[]<if_stmt>pssl<and>'PUSH_CONSTANT'<in>line<block_start>skip_semantics=<true><line_sep>macro=get_uid(struct_name)<line_sep>shader_src<augadd>['#define ' macro '\n']<line_sep>srt_free_resources<augadd>[(macro pssl.declare_rootconstant(struct_name))]<block_end><if_stmt>pssl<and>'CBUFFER'<in>line<block_start>skip_semantics=<true><line_sep>res_freq=parsing_struct[1]<line_sep>macro=get_uid(struct_name)<line_sep>shader_src<augadd>['#define ' macro '\n']<if_stmt>'rootcbv'<in>struct_name<block_start>srt_free_resources<augadd>[(macro pssl.declare_cbuffer(struct_name))]<block_end><else_stmt><block_start>srt_resources[res_freq]<augadd>[(macro pssl.declare_cbuffer(struct_name))]<block_end><block_end><block_end><if_stmt>parsing_struct<and>line.strip().startswith('DATA(')<block_start>data_decl=getMacro(line)<if_stmt>skip_semantics<or>data_decl[-1]<eq>'None'<block_start>line=get_whitespace(line)+data_decl[0]+' '+data_decl[1]+';\n'<block_end><if_stmt>pssl<and>type(parsing_struct)<is><not>str<block_start>basename=getArrayBaseName(data_decl[1])<line_sep>macro='REF_'+get_uid(basename)<line_sep>shader_src<augadd>['#define ' macro '\n']<line_sep>init,ref=pssl.declare_element_reference(shader parsing_struct data_decl)<line_sep>shader_src<augadd>[*init '\n']<line_sep>srt_redirections.add(basename)<line_sep>struct_elements<augadd>[(macro ref)]<line_sep>srt_references<augadd>[(macro (init ref))]<line_sep>shader_src<augadd>[line]<line_sep><continue><block_end><block_end><if_stmt>parsing_struct<and>'};'<in>line# if this shader is the receiving end of a passthrough_gs, insert the necessary inputs
<block_start><if_stmt>passthrough_gs<and>shader.struct_args[0][0]<eq>parsing_struct<block_start>shader_src<augadd>['\tDATA(FLAT(uint), PrimitiveID, TEXCOORD8);\n']<block_end>shader_src<augadd>[line]<line_sep>skip_semantics=<false><if_stmt>type(parsing_struct)<is><not>str<block_start>last_res_decl=len(shader_src)+1<block_end>parsing_struct=<none><line_sep><continue><block_end>resource_decl=<none><if_stmt>line.strip().startswith('RES(')<block_start>resource_decl=getMacro(line)<line_sep>last_res_decl=len(shader_src)+1<block_end><if_stmt>pssl<and>resource_decl# shader_src += ['// ', line.strip(), '\n']
<block_start>_,res_name,res_freq,_,_=resource_decl<line_sep>basename=getArrayBaseName(res_name)<line_sep>macro=get_uid(basename)<line_sep># shader_src += ['#define ', macro, ' //', line.strip(), '\n']
shader_src<augadd>['#define ' macro '\n']<line_sep>srt_resources[res_freq]<augadd>[(macro pssl.declare_resource(resource_decl))]<line_sep># macro = 'REF_' + macro
# shader_src += ['#define ', macro, '\n']
init,ref=pssl.declare_reference(shader resource_decl)<line_sep>shader_src<augadd>[*init '\n']<line_sep>srt_references<augadd>[(macro (init ref))]<line_sep>srt_redirections.add(basename)<line_sep>last_res_decl=len(shader_src)+1<line_sep># continue
<block_end><if_stmt>'TESS_VS_SHADER('<in>line<and>prospero<block_start>vs_filename=getMacro(line).strip('"')<line_sep>vs_fsl_path=os.path.join(os.path.dirname(fsl) vs_filename)<line_sep>ls_vs_filename='ls_'+vs_filename.replace('.fsl' '')<line_sep>vs_pssl=os.path.join(os.path.dirname(dst) ls_vs_filename)<line_sep>d3d(vs_fsl_path vs_pssl pssl=<true> prospero=<true>)<line_sep>shader_src<augadd>['#undef VS_MAIN\n' '#define VS_MAIN vs_main\n' '#include "' ls_vs_filename '"\n']<line_sep><continue><block_end><if_stmt>'_MAIN('<in>line<and>shader.stage<eq>Stages.TESC<and>prospero<block_start>shader_src<augadd>pssl.insert_tesc('vs_main')<block_end><if_stmt>'_MAIN('<in>line<and>shader.returnType<block_start><if_stmt>shader.returnType<not><in>shader.structs<block_start><if_stmt>shader.stage<eq>Stages.FRAG<block_start><if_stmt><not>'SV_DEPTH'<in>shader.returnType.upper()<block_start>line=line[:-1]+': SV_TARGET\n'<block_end><else_stmt><block_start>line=line[:-1]+': SV_DEPTH\n'<block_end><block_end><if_stmt>shader.stage<eq>Stages.VERT<block_start>line=line[:-1]+': SV_POSITION\n'<block_end><block_end><block_end># manually transform Type(var) to Type var (necessary for DX11/fxc)
<if_stmt>'_MAIN('<in>line<block_start><for_stmt>dtype,var shader.struct_args<block_start>line=line.replace(dtype+'('+var+')' dtype+' '+var)<block_end><for_stmt>dtype,dvar shader.flat_args<block_start>sem=getMacroName(dtype).upper()<line_sep>innertype=getMacro(dtype)<line_sep>ldtype=line.find(dtype)<line_sep>line=line[:ldtype]+innertype+line[ldtype+len(dtype):]<line_sep>l0=line.find(' '+dvar ldtype)+len(dvar)+1<line_sep>line=line[:l0]+' : '+sem+line[l0:]<block_end># if this shader is the receiving end of a passthrough_gs, get rid of the PrimitiveID input
<if_stmt>passthrough_gs<block_start><for_stmt>dtype,dvar shader.flat_args<block_start><if_stmt>'SV_PRIMITIVEID'<in>dtype.upper()<block_start>upper_line=line.upper()<line_sep>l0=upper_line.find('SV_PRIMITIVEID')<line_sep>l1=upper_line.rfind(',' 0 l0)<line_sep>line=line.replace(line[l1:l0+len('SV_PRIMITIVEID')] '')<block_end><block_end><block_end><if_stmt>pssl<block_start><for_stmt>dtype,darg shader.flat_args<block_start><if_stmt>'SV_INSTANCEID'<in>dtype.upper()<block_start>shader_src<augadd>pssl.set_indirect_draw()<block_end><block_end><block_end><block_end><if_stmt>'_MAIN('<in>line<and>(pssl<or>xbox)<and>rootSignature<block_start>l0=rootSignature.find('SrtSignature')<line_sep>l1=rootSignature.find('{' l0)<line_sep>srt_name=rootSignature[l0:l1].split()[-1]<line_sep>res_sig='RootSignature'<if>xbox<else>'SrtSignature'<line_sep>shader_src<augadd>['[' res_sig '(' srt_name ')]\n' line]<line_sep><continue><block_end># if 'INIT_MAIN' in line:
# if pssl:
# shader_src += ['\tinit_global_references();\n']
<if_stmt>'INIT_MAIN'<in>line<and>shader.returnType# mName = getMacroName(shader.returnType)
# mArg = getMacro(shader.returnType)
# line = line.replace('INIT_MAIN', '{} {}'.format(mName, mArg))
<block_start>line=get_whitespace(line)+'//'+line.strip()+'\n'<line_sep># if this shader is the receiving end of a passthrough_gs, copy the PrimitiveID from GS output
<if_stmt>passthrough_gs<block_start><for_stmt>dtype,dvar shader.flat_args<block_start><if_stmt>'SV_PRIMITIVEID'<in>dtype.upper()<block_start>shader_src<augadd>['uint ' dvar ' = ' shader.struct_args[0][1] '.PrimitiveID;\n']<block_end><block_end><block_end><block_end><if_stmt>'BeginNonUniformResourceIndex('<in>line<block_start>index,max_index=getMacro(line) <none><assert_stmt>index<ne>[] 'No index provided for {}'.format(line)<if_stmt>type(index)<eq>list<block_start>max_index=index[1]<line_sep>index=index[0]<block_end>nonuniformresourceindex=index<if_stmt>pssl<block_start>shader_src<augadd>pssl.begin_nonuniformresourceindex(nonuniformresourceindex max_index)<line_sep><continue><block_end><else_stmt><block_start>line='#define {0} NonUniformResourceIndex({0})\n'.format(nonuniformresourceindex)<block_end><block_end><if_stmt>'EndNonUniformResourceIndex()'<in>line<block_start><assert_stmt>nonuniformresourceindex 'EndNonUniformResourceIndex: BeginNonUniformResourceIndex not called/found'<if_stmt>pssl<block_start>shader_src<augadd>pssl.end_nonuniformresourceindex(nonuniformresourceindex)<line_sep><continue><block_end><else_stmt><block_start>line='#undef {}\n'.format(nonuniformresourceindex)<block_end>nonuniformresourceindex=<none><block_end><elif_stmt>re.match('\s*RETURN' line)<block_start><if_stmt>shader.returnType<block_start>line=line.replace('RETURN' 'return ')<block_end><else_stmt><block_start>line=line.replace('RETURN()' 'return')<block_end><block_end># tesselation
<if_stmt>shader.pcf<and>shader.pcf<in>line<and><not>pcf_returnType<block_start>loc=line.find(shader.pcf)<line_sep>pcf_returnType=line[:loc].strip()<line_sep># line = getMacroName(pcf_returnType) + ' ' + line[loc:]
<for_stmt>dtype,dvar shader.pcf_arguments<block_start><if_stmt><not>'INPUT_PATCH'<in>dtype<and><not>'OUTPUT_PATCH'<in>dtype<block_start>line=line.replace(dtype getMacro(dtype))<line_sep>line=line.replace(dvar dvar+': '+getMacroName(dtype))<block_end><block_end><block_end><if_stmt>pcf_returnType<and>re.match('\s*PCF_INIT' line)# line = line.replace('PCF_INIT', getMacroName(pcf_returnType) + ' ' + getMacro(pcf_returnType))
<block_start>line=line.replace('PCF_INIT' '')<block_end><if_stmt>pcf_returnType<and>'PCF_RETURN'<in>line<block_start>line=line.replace('PCF_RETURN' 'return ')<line_sep># line = line.replace('PCF_RETURN', '{ return ' + getMacro(pcf_returnType) + ';}')
<block_end><if_stmt>'INDIRECT_DRAW('<in>line<block_start><if_stmt>pssl<block_start>shader_src<augadd>pssl.set_indirect_draw()<block_end>line='//'+line<block_end><if_stmt>'SET_OUTPUT_FORMAT('<in>line<block_start><if_stmt>pssl<block_start>shader_src<augadd>pssl.set_output_format(getMacro(line))<block_end>line='//'+line<block_end><if_stmt>'PS_ZORDER_EARLYZ('<in>line<block_start><if_stmt>xbox<block_start>shader_src<augadd>xbox.set_ps_zorder_earlyz()<block_end>line='//'+line<block_end>shader_src<augadd>[line]<block_end><if_stmt>pssl<block_start><if_stmt>explicit_res_decl<block_start>last_res_decl=explicit_res_decl<block_end><if_stmt>last_res_decl<g>0# skip srt altogether if no declared resourced or not requested
<block_start>srt=pssl.gen_srt(srt_resources srt_free_resources srt_references)<line_sep>open(dst+'.srt.h' 'w').write(srt)<line_sep>shader_src.insert(last_res_decl '\n#include \"'+os.path.basename(dst)+'.srt.h\"\n')<block_end><block_end># insert root signature at the end (not sure whether that will work for xbox)
<if_stmt>rootSignature<and>pssl<block_start>shader_src<augadd>[_line+'\n'<for>_line rootSignature.splitlines()]# + shader.lines
<block_end><if_stmt>rootSignature<and>xbox<block_start>shader_src<augadd>rootSignature+['\n']<block_end># + shader.lines
open(dst 'w').writelines(shader_src)<line_sep><return>0<block_end> |
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>pydriller.repository Repository<import_stmt>logging<line_sep>logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s' level=logging.INFO)<def_stmt>test_between_revisions <block_start>from_tag='tag1'<line_sep>to_tag='tag3'<line_sep>lc=list(Repository('test-repos/tags' from_tag=from_tag to_tag=to_tag).traverse_commits())<assert_stmt>len(lc)<eq>5<assert_stmt>'6bb9e2c6a8080e6b5b34e6e316c894b2ddbf7fcd'<eq>lc[0].hash<assert_stmt>'f1a90b8d7b151ceefd3e3dfc0dc1d0e12b5f48d0'<eq>lc[1].hash<assert_stmt>'4638730126d40716e230c2040751a13153fb1556'<eq>lc[2].hash<assert_stmt>'a26f1438bd85d6b22497c0e5dae003812becd0bc'<eq>lc[3].hash<assert_stmt>'627e1ad917a188a861c9fedf6e5858b79edbe439'<eq>lc[4].hash<block_end><def_stmt>test_multiple_repos_with_tags <block_start>from_tag='tag2'<line_sep>to_tag='tag3'<line_sep>repos=['test-repos/tags' 'test-repos/tags' 'test-repos/tags']<line_sep>lc=list(Repository(path_to_repo=repos from_tag=from_tag to_tag=to_tag).traverse_commits())<assert_stmt>len(lc)<eq>9<block_end> |
<import_from_stmt>appJar gui<def_stmt>press btn<block_start><if_stmt>btn<eq>"info"<block_start>app.infoBox("Title Here" "Message here...")<block_end><if_stmt>btn<eq>"error"<block_start>app.errorBox("Title Here" "Message here...")<block_end><if_stmt>btn<eq>"warning"<block_start>app.warningBox("Title Here" "Message here...")<block_end><if_stmt>btn<eq>"yesno"<block_start>app.yesNoBox("Title Here" "Message here...")<block_end><if_stmt>btn<eq>"question"<block_start>app.questionBox("Title Here" "Message here...")<block_end><if_stmt>btn<eq>"ok"<block_start>app.okBox("Title Here" "Message here...")<block_end><if_stmt>btn<eq>"retry"<block_start>app.retryBox("Title Here" "Message here...")<block_end><if_stmt>btn<eq>"text"<block_start>app.textBox("Title Here" "Message here...")<block_end><if_stmt>btn<eq>"number"<block_start>app.numberBox("Title Here" "Message here...")<block_end><block_end>app=gui()<line_sep>app.addButtons(["info" "error" "warning" "yesno" "question"] press)<line_sep>app.addButtons(["ok" "retry" "text" "number"] press)<line_sep>app.go()<line_sep> |
<import_stmt>unittest<import_from_stmt>cozy.polynomials Polynomial<class_stmt>TestPolynomials(unittest.TestCase)<block_start><def_stmt>test_sorting self<block_start>self.assertLess(Polynomial([2019 944 95]) Polynomial([2012 945 95]))<line_sep>self.assertGreater(Polynomial([2012 945 95]) Polynomial([2019 944 95]))<block_end><block_end> |
###############################################################################
#
# The MIT License (MIT)
#
# Copyright (c) Crossbar.io Technologies GmbH
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
###############################################################################
<import_stmt>sys<import_stmt>json<import_stmt>urllib<import_stmt>Cookie<import_from_stmt>twisted.internet reactor<import_from_stmt>twisted.python log<import_from_stmt>twisted.web.server Site<import_from_stmt>twisted.web.static File<import_stmt>autobahn<import_from_stmt>autobahn.util newid utcnow<import_from_stmt>autobahn.websocket http<import_from_stmt>autobahn.twisted.websocket WebSocketServerFactory WebSocketServerProtocol<import_from_stmt>autobahn.twisted.resource WebSocketResource<class_stmt>PersonaServerProtocol(WebSocketServerProtocol)<block_start>"""
WebSocket server protocol that tracks WebSocket connections using HTTP cookies,
and authenticates WebSocket connections using Mozilla Persona.
"""<def_stmt>onConnect self request# This is called during the initial WebSocket opening handshake.
<block_start>protocol,headers=<none> {}<line_sep># our cookie tracking ID
self._cbtid=<none><line_sep># see if there already is a cookie set ..
<if_stmt>'cookie'<in>request.headers<block_start><try_stmt><block_start>cookie=Cookie.SimpleCookie()<line_sep>cookie.load(str(request.headers['cookie']))<block_end><except_stmt>Cookie.CookieError<block_start><pass><block_end><else_stmt><block_start><if_stmt>'cbtid'<in>cookie<block_start>cbtid=cookie['cbtid'].value<if_stmt>cbtid<in>self.factory._cookies<block_start>self._cbtid=cbtid<line_sep>log.msg("Cookie already set: %s"%self._cbtid)<block_end><block_end><block_end><block_end># if no cookie is set, create a new one ..
<if_stmt>self._cbtid<is><none><block_start>self._cbtid=newid()<line_sep>maxAge=86400<line_sep>cbtData={'created':utcnow() 'authenticated':<none> 'maxAge':maxAge 'connections':set()}<line_sep>self.factory._cookies[self._cbtid]=cbtData<line_sep># do NOT add the "secure" cookie attribute! "secure" refers to the
# scheme of the Web page that triggered the WS, not WS itself!!
##
headers['Set-Cookie']='cbtid=%s;max-age=%d'%(self._cbtid maxAge)<line_sep>log.msg("Setting new cookie: %s"%self._cbtid)<block_end># add this WebSocket connection to the set of connections
# associated with the same cookie
self.factory._cookies[self._cbtid]['connections'].add(self)<line_sep># accept the WebSocket connection, speaking subprotocol `protocol`
# and setting HTTP headers `headers`
<return>(protocol headers)<block_end><def_stmt>onOpen self# This is called when initial WebSocket opening handshake has
# been completed.
# see if we are authenticated ..
<block_start>authenticated=self.factory._cookies[self._cbtid]['authenticated']<if_stmt><not>authenticated# .. if not, send authentication request
<block_start>self.sendMessage(json.dumps({'cmd':'AUTHENTICATION_REQUIRED'}))<block_end><else_stmt># .. if yes, send info on authenticated user
<block_start>self.sendMessage(json.dumps({'cmd':'AUTHENTICATED' 'email':authenticated}))<block_end><block_end><def_stmt>onClose self wasClean code reason# This is called when WebSocket connection is gone
# remove this connection from list of connections associated with
# same cookie
<block_start>self.factory._cookies[self._cbtid]['connections'].remove(self)<line_sep># if list gets empty, possibly do something ..
<if_stmt><not>self.factory._cookies[self._cbtid]['connections']<block_start>log.msg("All connections for {} gone".format(self._cbtid))<block_end><block_end><def_stmt>onMessage self payload isBinary# This is called when we receive a WebSocket message
<block_start><if_stmt><not>isBinary<block_start>msg=json.loads(payload)<if_stmt>msg['cmd']<eq>'AUTHENTICATE'# The client did it's Mozilla Persona authentication thing
# and now wants to verify the authentication and login.
<block_start>assertion=msg.get('assertion')<line_sep>audience=msg.get('audience')<line_sep># To verify the authentication, we need to send a HTTP/POST
# to Mozilla Persona. When successful, Persona will send us
# back something like:
# {
# "audience": "http://192.168.1.130:8080/",
# "expires": 1393681951257,
# "issuer": "gmail.login.persona.org",
# "email": "<EMAIL>",
# "status": "okay"
# }
headers={'Content-Type':'application/x-www-form-urlencoded'}<line_sep>body=urllib.urlencode({'audience':audience 'assertion':assertion})<import_from_stmt>twisted.web.client getPage<line_sep>d=getPage(url="https://verifier.login.persona.org/verify" method='POST' postdata=body headers=headers)<line_sep>log.msg("Authentication request sent.")<def_stmt>done res<block_start>res=json.loads(res)<if_stmt>res['status']<eq>'okay'# Mozilla Persona successfully authenticated the user
# remember the user's email address. this marks the cookie as
# authenticated
<block_start>self.factory._cookies[self._cbtid]['authenticated']=res['email']<line_sep># inform _all_ WebSocket connections of the successful auth.
msg=json.dumps({'cmd':'AUTHENTICATED' 'email':res['email']})<for_stmt>proto self.factory._cookies[self._cbtid]['connections']<block_start>proto.sendMessage(msg)<block_end>log.msg("Authenticated user {}".format(res['email']))<block_end><else_stmt><block_start>log.msg("Authentication failed: {}".format(res.get('reason')))<line_sep>self.sendMessage(json.dumps({'cmd':'AUTHENTICATION_FAILED' 'reason':res.get('reason')}))<line_sep>self.sendClose()<block_end><block_end><def_stmt>error err<block_start>log.msg("Authentication request failed: {}".format(err.value))<line_sep>self.sendMessage(json.dumps({'cmd':'AUTHENTICATION_FAILED' 'reason':str(err.value)}))<line_sep>self.sendClose()<block_end>d.addCallbacks(done error)<block_end><elif_stmt>msg['cmd']<eq>'LOGOUT'# user wants to logout ..
<block_start><if_stmt>self.factory._cookies[self._cbtid]['authenticated']<block_start>self.factory._cookies[self._cbtid]['authenticated']=<false><line_sep># inform _all_ WebSocket connections of the logout
msg=json.dumps({'cmd':'LOGGED_OUT'})<for_stmt>proto self.factory._cookies[self._cbtid]['connections']<block_start>proto.sendMessage(msg)<block_end><block_end><block_end><else_stmt><block_start>log.msg("unknown command {}".format(msg))<block_end><block_end><block_end><block_end><class_stmt>PersonaServerFactory(WebSocketServerFactory)<block_start>"""
WebSocket server factory with cookie/sessions map.
"""<line_sep>protocol=PersonaServerProtocol<def_stmt>__init__ self url<block_start>WebSocketServerFactory.__init__(self url)<line_sep># map of cookies
self._cookies={}<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>log.startLogging(sys.stdout)<line_sep>print("Running Autobahn|Python {}".format(autobahn.version))<line_sep># our WebSocket server factory
factory=PersonaServerFactory("ws://127.0.0.1:8080")<line_sep># we serve static files under "/" ..
root=File(".")<line_sep># .. and our WebSocket server under "/ws" (note that Twisted uses
# bytes for URIs)
resource=WebSocketResource(factory)<line_sep>root.putChild(b"ws" resource)<line_sep># run both under one Twisted Web Site
site=Site(root)<line_sep>site.log=<lambda>_:<none># disable any logging
reactor.listenTCP(8080 site)<line_sep>reactor.run()<block_end> |
<import_stmt>subprocess re sys<def_stmt>get_coref_score metric path_to_scorer gold=<none> preds=<none><block_start>output=subprocess.check_output(["perl" path_to_scorer metric preds gold]).decode("utf-8")<line_sep>output=output.split("\n")[-3]<line_sep>matcher=re.search("Coreference: Recall: \(.*?\) (.*?)% Precision: \(.*?\) (.*?)% F1: (.*?)%" output)<if_stmt>matcher<is><not><none><block_start>recall=float(matcher.group(1))<line_sep>precision=float(matcher.group(2))<line_sep>f1=float(matcher.group(3))<block_end><return>recall precision f1<block_end><def_stmt>get_conll path_to_scorer gold=<none> preds=<none><block_start>bcub_r,bcub_p,bcub_f=get_coref_score("bcub" path_to_scorer gold preds)<line_sep>muc_r,muc_p,muc_f=get_coref_score("muc" path_to_scorer gold preds)<line_sep>ceaf_r,ceaf_p,ceaf_f=get_coref_score("ceafe" path_to_scorer gold preds)<line_sep>print("bcub:\t%.1f"%bcub_f)<line_sep>print("muc:\t%.1f"%muc_f)<line_sep>print("ceaf:\t%.1f"%ceaf_f)<line_sep>avg=(bcub_f+muc_f+ceaf_f)/3.<line_sep>print("Average F1: %.1f"%(avg))<line_sep># Generate Latex table
# print("%.1f&%.1f&%.1f&%.1f" % (bcub_f, muc_f, ceaf_f, avg))
<return>bcub_f avg<block_end><if_stmt>__name__<eq>"__main__"<block_start>goldFile=sys.argv[1]<line_sep>predFile=sys.argv[2]<line_sep>scorer=sys.argv[3]<line_sep>bcub_f,avg=get_conll(scorer gold=goldFile preds=predFile)<block_end> |
# Árvore Huffman
<class_stmt>node<block_start><def_stmt>__init__ self freq symbol left=<none> right=<none># Frequência do Símbolo
<block_start>self.freq=freq<line_sep># Símbolo (caracter)
self.symbol=symbol<line_sep># nó à esquerda do nó atual
self.left=left<line_sep># nó à direita do nó atual
self.right=right<line_sep># direção da árvore (0/1)
self.huff=''<block_end><block_end># Função utilitária para imprimir
# códigos huffman para todos os símbolos
# na nova árvore huffman que sera criada
<def_stmt>printNodes node val=''# código huffman para o nó atual
<block_start>newVal=val+str(node.huff)<line_sep># se o nó não pertence á ponta da
# árvore então caminha dentro do mesmo
# até a ponta
<if_stmt>(node.left)<block_start>printNodes(node.left newVal)<block_end><if_stmt>(node.right)<block_start>printNodes(node.right newVal)<block_end># Se o nó estiver na ponta da árore
# então exibe o código huffman
<if_stmt>(<not>node.left<and><not>node.right)<block_start>print(f"{node.symbol} -> {newVal}")<block_end><block_end># caracteres para à árvore huffman
chars=['a' 'b' 'c' 'd' 'e' 'f']<line_sep># frequência dos caracteres
freq=[5 9 12 13 16 45]<line_sep># lista contendo os nós não utilizados
nodes=[]<if_stmt>__name__<eq>'__main__'# convertendo caracteres e frequência em
# nós da árvore huffman
<block_start><for_stmt>x range(len(chars))<block_start>nodes.append(node(freq[x] chars[x]))<block_end><while_stmt>len(nodes)<g>1# Ordena todos os nós de forma ascendente
# baseado em sua frequência
<block_start>nodes=sorted(nodes key=<lambda>x:x.freq)<line_sep># Seleciona os dois nós menores
left=nodes[0]<line_sep>right=nodes[1]<line_sep># Atribui um valor direcional à estes nós
# (direita ou esquerda)
left.huff=0<line_sep>right.huff=1<line_sep># Combina os 2 nós menores para um novo nó pai
# para eles.
newNode=node(left.freq+right.freq left.symbol+right.symbol left right)<line_sep># remove os 2 nós e adiciona o nó pai
# como um novo só sobre os outros
nodes.remove(left)<line_sep>nodes.remove(right)<line_sep>nodes.append(newNode)<block_end># <NAME> pronta!
printNodes(nodes[0])<block_end> |
<import_from_stmt>string ascii_letters<import_stmt>textwrap<import_from_stmt>fontTools.misc.testTools getXML<import_from_stmt>fontTools subset<import_from_stmt>fontTools.fontBuilder FontBuilder<import_from_stmt>fontTools.pens.ttGlyphPen TTGlyphPen<import_from_stmt>fontTools.ttLib TTFont newTable<import_from_stmt>fontTools.subset.svg NAMESPACES ranges<import_stmt>pytest<line_sep>etree=pytest.importorskip("lxml.etree")<line_sep>@pytest.fixture<def_stmt>empty_svg_font <block_start>glyph_order=[".notdef"]+list(ascii_letters)<line_sep>pen=TTGlyphPen(glyphSet=<none>)<line_sep>pen.moveTo((0 0))<line_sep>pen.lineTo((0 500))<line_sep>pen.lineTo((500 500))<line_sep>pen.lineTo((500 0))<line_sep>pen.closePath()<line_sep>glyph=pen.glyph()<line_sep>glyphs={g:glyph<for>g glyph_order}<line_sep>fb=FontBuilder(unitsPerEm=1024 isTTF=<true>)<line_sep>fb.setupGlyphOrder(glyph_order)<line_sep>fb.setupCharacterMap({ord(c):c<for>c ascii_letters})<line_sep>fb.setupGlyf(glyphs)<line_sep>fb.setupHorizontalMetrics({g:(500 0)<for>g glyph_order})<line_sep>fb.setupHorizontalHeader()<line_sep>fb.setupOS2()<line_sep>fb.setupPost()<line_sep>fb.setupNameTable({"familyName":"TestSVG" "styleName":"Regular"})<line_sep>svg_table=newTable("SVG ")<line_sep>svg_table.docList=[]<line_sep>fb.font["SVG "]=svg_table<line_sep><return>fb.font<block_end><def_stmt>new_svg **attrs<block_start><return>etree.Element("svg" {"xmlns":NAMESPACES["svg"] **attrs})<block_end><def_stmt>_lines s<block_start><return>textwrap.dedent(s).splitlines()<block_end>@pytest.mark.parametrize("gids, retain_gids, expected_xml" [# keep four glyphs in total, don't retain gids, which thus get remapped
("2,4-6" <false> _lines("""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph1" d="M2,2"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="2" startGlyphID="2">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph2" d="M4,4"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="3" startGlyphID="3">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph3" d="M5,5"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="4" startGlyphID="4">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph4" d="M6,6"/></svg>]]>
</svgDoc>
""") ) # same four glyphs, but we now retain gids
("2,4-6" <true> _lines("""\
<svgDoc endGlyphID="2" startGlyphID="2">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph2" d="M2,2"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="4" startGlyphID="4">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph4" d="M4,4"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="5" startGlyphID="5">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph5" d="M5,5"/></svg>]]>
</svgDoc>
<svgDoc endGlyphID="6" startGlyphID="6">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><path id="glyph6" d="M6,6"/></svg>]]>
</svgDoc>
""") ) ] )<def_stmt>test_subset_single_glyph_per_svg empty_svg_font tmp_path gids retain_gids expected_xml<block_start>font=empty_svg_font<line_sep>svg_docs=font["SVG "].docList<for_stmt>i range(1 11)<block_start>svg=new_svg()<line_sep>etree.SubElement(svg "path" {"id":f"glyph{i}" "d":f"M{i},{i}"})<line_sep>svg_docs.append((etree.tostring(svg).decode() i i))<block_end>svg_font_path=tmp_path/"TestSVG.ttf"<line_sep>font.save(svg_font_path)<line_sep>subset_path=svg_font_path.with_suffix(".subset.ttf")<line_sep>subset.main([str(svg_font_path) f"--output-file={subset_path}" f"--gids={gids}" "--retain_gids"<if>retain_gids<else>"--no-retain_gids" ])<line_sep>subset_font=TTFont(subset_path)<assert_stmt>getXML(subset_font["SVG "].toXML subset_font)<eq>expected_xml<block_end># This contains a bunch of cross-references between glyphs, paths, gradients, etc.
# Note the path coordinates are completely made up and not meant to be rendered.
# We only care about the tree structure, not it's visual content.
COMPLEX_SVG="""\
<svg xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<radialGradient id="rg2" cx="50" cy="50" r="10" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</radialGradient>
<radialGradient id="rg3" xlink:href="#rg2" r="20"/>
<radialGradient id="rg4" xlink:href="#rg3" cy="100"/>
<path id="p1" d="M3,3"/>
<clipPath id="c1">
<circle cx="10" cy="10" r="1"/>
</clipPath>
</defs>
<g id="glyph1">
<g id="glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
<g id="glyph3">
<use xlink:href="#p1"/>
</g>
<use id="glyph4" xlink:href="#glyph1" x="10"/>
<use id="glyph5" xlink:href="#glyph2" y="-10"/>
<g id="glyph6">
<use xlink:href="#p1" transform="scale(2, 1)"/>
</g>
<g id="group1">
<g id="glyph7">
<path id="p2" d="M4,4"/>
</g>
<g id=".glyph7">
<path d="M4,4"/>
</g>
<g id="glyph8">
<g id=".glyph8">
<path id="p3" d="M5,5"/>
<path id="M6,6"/>
</g>
<path d="M7,7"/>
</g>
<g id="glyph9">
<use xlink:href="#p2"/>
</g>
<g id="glyph10">
<use xlink:href="#p3"/>
</g>
</g>
<g id="glyph11">
<path d="M7,7" fill="url(#rg4)"/>
</g>
<g id="glyph12">
<path d="M7,7" style="fill:url(#lg1);stroke:red;clip-path:url(#c1)"/>
</g>
</svg>
"""<line_sep>@pytest.mark.parametrize("subset_gids, expected_xml" [# we only keep gid=2, with 'glyph2' defined inside 'glyph1': 'glyph2'
# is renamed 'glyph1' to match the new subset indices, and the old 'glyph1'
# is kept (as it contains 'glyph2') but renamed '.glyph1' to avoid clash
("2" _lines("""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id=".glyph1">
<g id="glyph1">
<path d="M0,0"/>
</g>
</g>
</svg>
]]>
</svgDoc>
""") ) # we keep both gid 1 and 2: the glyph elements' ids stay as they are (only the
# range endGlyphID change); a gradient is kept since it's referenced by glyph1
("1,2" _lines("""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
</defs>
<g id="glyph1">
<g id="glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
</svg>
]]>
</svgDoc>
""") ) (# both gid 3 and 6 refer (via <use xlink:href="#...") to path 'p1', which
# is thus kept in <defs>; the glyph ids and range start/end are renumbered.
"3,6" _lines("""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<path id="p1" d="M3,3"/>
</defs>
<g id="glyph1">
<use xlink:href="#p1"/>
</g>
<g id="glyph2">
<use xlink:href="#p1" transform="scale(2, 1)"/>
</g>
</svg>
]]>
</svgDoc>
""") ) (# 'glyph4' uses the whole 'glyph1' element (translated); we keep the latter
# renamed to avoid clashes with new gids
"3-4" _lines("""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<path id="p1" d="M3,3"/>
</defs>
<g id=".glyph1">
<g id=".glyph2">
<path d="M0,0"/>
</g>
<g>
<path d="M1,1" fill="url(#lg1)"/>
<path d="M2,2"/>
</g>
</g>
<g id="glyph1">
<use xlink:href="#p1"/>
</g>
<use id="glyph2" xlink:href="#.glyph1" x="10"/>
</svg>
]]>
</svgDoc>
""") ) (# 'glyph9' uses a path 'p2' defined inside 'glyph7', the latter is excluded
# from our subset, thus gets renamed '.glyph7'; an unrelated element with
# same id=".glyph7" doesn't clash because it was dropped.
# Similarly 'glyph10' uses path 'p3' defined inside 'glyph8', also excluded
# from subset and prefixed with '.'. But since an id=".glyph8" is already
# used in the doc, we append a .{digit} suffix to disambiguate.
"9,10" _lines("""\
<svgDoc endGlyphID="2" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<g id="group1">
<g id=".glyph7">
<path id="p2" d="M4,4"/>
</g>
<g id=".glyph8.1">
<g id=".glyph8">
<path id="p3" d="M5,5"/>
</g>
</g>
<g id="glyph1">
<use xlink:href="#p2"/>
</g>
<g id="glyph2">
<use xlink:href="#p3"/>
</g>
</g>
</svg>
]]>
</svgDoc>
""") ) (# 'glyph11' uses gradient 'rg4' which inherits from 'rg3', which inherits
# from 'rg2', etc.
"11" _lines("""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<radialGradient id="rg2" cx="50" cy="50" r="10" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</radialGradient>
<radialGradient id="rg3" xlink:href="#rg2" r="20"/>
<radialGradient id="rg4" xlink:href="#rg3" cy="100"/>
</defs>
<g id="glyph1">
<path d="M7,7" fill="url(#rg4)"/>
</g>
</svg>
]]>
</svgDoc>
""") ) (# 'glyph12' contains a style attribute with inline CSS declarations that
# contains references to a gradient fill and a clipPath: we keep those
"12" _lines("""\
<svgDoc endGlyphID="1" startGlyphID="1">
<![CDATA[<svg xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
<defs>
<linearGradient id="lg1" x1="50" x2="50" y1="80" y2="80" gradientUnits="userSpaceOnUse">
<stop stop-color="#A47B62" offset="0"/>
<stop stop-color="#AD8264" offset="1.0"/>
</linearGradient>
<clipPath id="c1">
<circle cx="10" cy="10" r="1"/>
</clipPath>
</defs>
<g id="glyph1">
<path d="M7,7" style="fill:url(#lg1);stroke:red;clip-path:url(#c1)"/>
</g>
</svg>
]]>
</svgDoc>
""") ) ] )<def_stmt>test_subset_svg_with_references empty_svg_font tmp_path subset_gids expected_xml<block_start>font=empty_svg_font<line_sep>font["SVG "].docList.append((COMPLEX_SVG 1 12))<line_sep>svg_font_path=tmp_path/"TestSVG.ttf"<line_sep>font.save(svg_font_path)<line_sep>subset_path=svg_font_path.with_suffix(".subset.ttf")<line_sep>subset.main([str(svg_font_path) f"--output-file={subset_path}" f"--gids={subset_gids}" "--pretty-svg" ])<line_sep>subset_font=TTFont(subset_path)<if_stmt>expected_xml<is><not><none><block_start><assert_stmt>getXML(subset_font["SVG "].toXML subset_font)<eq>expected_xml<block_end><else_stmt><block_start><assert_stmt>"SVG "<not><in>subset_font<block_end><block_end><def_stmt>test_subset_svg_empty_table empty_svg_font tmp_path<block_start>font=empty_svg_font<line_sep>svg=new_svg()<line_sep>etree.SubElement(svg "rect" {"id":"glyph1" "x":"1" "y":"2"})<line_sep>font["SVG "].docList.append((etree.tostring(svg).decode() 1 1))<line_sep>svg_font_path=tmp_path/"TestSVG.ttf"<line_sep>font.save(svg_font_path)<line_sep>subset_path=svg_font_path.with_suffix(".subset.ttf")<line_sep># there's no gid=2 in SVG table, drop the empty table
subset.main([str(svg_font_path) f"--output-file={subset_path}" f"--gids=2"])<assert_stmt>"SVG "<not><in>TTFont(subset_path)<block_end><def_stmt>test_subset_svg_missing_glyph empty_svg_font tmp_path<block_start>font=empty_svg_font<line_sep>svg=new_svg()<line_sep>etree.SubElement(svg "rect" {"id":"glyph1" "x":"1" "y":"2"})<line_sep>font["SVG "].docList.append((etree.tostring(svg).decode() 1 # the range endGlyphID=2 declares two glyphs however our svg contains
# only one glyph element with id="glyph1", the "glyph2" one is absent.
# Techically this would be invalid according to the OT-SVG spec.
2 ))<line_sep>svg_font_path=tmp_path/"TestSVG.ttf"<line_sep>font.save(svg_font_path)<line_sep>subset_path=svg_font_path.with_suffix(".subset.ttf")<line_sep># make sure we don't crash when we don't find the expected "glyph2" element
subset.main([str(svg_font_path) f"--output-file={subset_path}" f"--gids=1"])<line_sep>subset_font=TTFont(subset_path)<assert_stmt>getXML(subset_font["SVG "].toXML subset_font)<eq>['<svgDoc endGlyphID="1" startGlyphID="1">' ' <![CDATA[<svg xmlns="http://www.w3.org/2000/svg"><rect id="glyph1" x="1" y="2"/></svg>]]>' "</svgDoc>" ]<line_sep># ignore the missing gid even if included in the subset; in this test case we
# end up with an empty svg document--which is dropped, along with the empty table
subset.main([str(svg_font_path) f"--output-file={subset_path}" f"--gids=2"])<assert_stmt>"SVG "<not><in>TTFont(subset_path)<block_end>@pytest.mark.parametrize("ints, expected_ranges" [(() []) ((0 ) [(0 0)]) ((0 1) [(0 1)]) ((1 1 1 1) [(1 1)]) ((1 3) [(1 1) (3 3)]) ((4 2 1 3) [(1 4)]) ((1 2 4 5 6 9 13 14 15) [(1 2) (4 6) (9 9) (13 15)]) ] )<def_stmt>test_ranges ints expected_ranges<block_start><assert_stmt>list(ranges(ints))<eq>expected_ranges<block_end> |
# -*- coding: utf-8 -*-
"""
Tests for vector_tile/polygon.py
"""<import_stmt>unittest<import_from_stmt>mapbox_vector_tile.polygon make_it_valid<import_from_stmt>shapely wkt<import_stmt>os<class_stmt>TestPolygonMakeValid(unittest.TestCase)<block_start><def_stmt>test_dev_errors self<block_start>test_dir=os.path.dirname(os.path.realpath(__file__))<with_stmt>open(os.path.join(test_dir 'errors.wkt'))<as>fh<block_start><for_stmt>line fh<block_start>geom=wkt.loads(line)<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertTrue(fixed.area<g>0.9<times>abs(geom.area))<block_end><block_end><block_end><def_stmt>test_multipolygon_with_flipped_ring self<block_start>geom=wkt.loads("""MULTIPOLYGON(
((0 0, 0 4, 4 4, 4 0, 0 0), (1 1, 1 3, 3 3, 3 1, 1 1)),
((5 0, 9 0, 9 4, 5 4, 5 0), (6 1, 6 3, 8 3, 8 1, 6 1))
)""")<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(24 fixed.area)<block_end><def_stmt>test_polygon_self_touching self<block_start>geom=wkt.loads("""POLYGON(
(1 0, 5 0, 5 5, 0 5, 0 2, 2 2, 2 4, 3 4, 1 0)
)""")<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(21 fixed.area)<block_end><def_stmt>test_polygon_self_touching_inner self<block_start>geom=wkt.loads("""POLYGON(
(-1 -1, -1 6, 6 6, 6 -1, -1 -1),
(1 0, 5 0, 5 5, 0 5, 0 2, 2 2, 2 4, 3 4, 1 0)
)""")<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(28 fixed.area)<block_end><def_stmt>test_polygon_inners_touching self<block_start>geom=wkt.loads("""POLYGON(
(0 0, 6 0, 6 6, 0 6, 0 0),
(1 1, 1 3, 3 3, 3 1, 1 1),
(3 3, 3 5, 5 5, 5 3, 3 3)
)""")<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(28 fixed.area)<block_end><def_stmt>test_polygon_inner_touching_outer self<block_start>geom=wkt.loads("""POLYGON(
(0 0, 3 0, 3 3, 0 3, 0 0),
(1 1, 2 3, 2 1, 1 1)
)""")<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(8 fixed.area)<block_end><def_stmt>test_polygon_two_inners_touching_outer self<block_start>geom=wkt.loads("""POLYGON(
(0 0, 6 0, 6 3, 0 3, 0 0),
(1 1, 2 3, 2 1, 1 1),
(4 1, 5 3, 5 1, 4 1)
)""")<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(16 fixed.area)<block_end><def_stmt>test_polygon_inners_touching_colinear self<block_start>geom=wkt.loads("""POLYGON(
(0 0, 6 0, 6 6, 0 6, 0 0),
(1 1, 1 3, 3 4, 3 1, 1 1),
(3 2, 3 5, 5 5, 5 3, 3 2)
)""")<line_sep>self.assertFalse(geom.is_valid)<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(26 fixed.area)<block_end><def_stmt>test_polygon_inner_colinear_outer self<block_start>geom=wkt.loads("""POLYGON(
(0 0, 3 0, 3 3, 0 3, 0 0),
(1 1, 1 3, 2 3, 2 1, 1 1)
)""")<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(7 fixed.area)<block_end><def_stmt>test_polygon_many_inners_touching self<block_start>geom=wkt.loads("""POLYGON(
(0 0, 5 0, 5 5, 0 5, 0 0),
(1 1, 1 2, 3 2, 1 1),
(3 1, 3 3, 4 1, 3 1),
(2 2, 1 4, 2 4, 2 2),
(2 3, 4 4, 4 3, 2 3)
)""")<line_sep>self.assertFalse(geom.is_valid)<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(21 fixed.area)<block_end><def_stmt>test_polygon_inner_spike self<block_start>geom=wkt.loads("""POLYGON(
(0 0, 3 0, 3 4, 0 4, 0 0),
(1 1, 1 3, 2 3, 2 2, 1 2, 2 2, 2 1, 1 1)
)""")<line_sep>self.assertFalse(geom.is_valid)<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(10 fixed.area)<block_end><def_stmt>test_polygon_disconnected_inner self<block_start>geom=wkt.loads("""POLYGON(
(0 0, 5 0, 5 5, 0 5, 0 0),
(1 1, 1 2, 2 2, 1 1),
(2 1, 2 2, 3 2, 2 1),
(3 1, 3 2, 4 2, 3 1),
(1 2, 1 3, 2 3, 1 2),
(2 2, 2 3, 3 3, 2 2),
(3 2, 3 3, 4 3, 3 2),
(1 3, 1 4, 2 4, 1 3),
(2 3, 2 4, 3 4, 2 3),
(3 3, 3 4, 4 4, 3 3)
)""")<line_sep>self.assertFalse(geom.is_valid)<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(20.5 fixed.area)<block_end><def_stmt>test_polygon_disconnected_outer self<block_start>geom=wkt.loads("""POLYGON(
(0 0, 4 0, 4 3, 3 3, 3 2, 2 3, 1 2, 1 3, 0 3, 0 0),
(1 1, 1 2, 3 2, 3 1, 1 1)
)""")<line_sep>self.assertFalse(geom.is_valid)<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(9 fixed.area)<block_end><def_stmt>test_polygon_ring_of_inners self<block_start>geom=wkt.loads("""POLYGON(
(0 0, 4 0, 4 4, 0 4, 0 0),
(1 1, 1 2, 2 1, 1 1),
(1 2, 1 3, 2 3, 1 2),
(2 3, 3 3, 3 2, 2 3),
(2 1, 3 2, 3 1, 2 1)
)""")<line_sep>self.assertFalse(geom.is_valid)<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(14 fixed.area)<block_end><def_stmt>test_polygon_ring_of_inners_2 self<block_start>geom=wkt.loads("""POLYGON(
(0 0, 5 0, 5 5, 0 5, 0 0),
(1 3, 1 4, 2 4, 1 3),
(3 3, 4 3, 4 2, 3 3),
(1 1, 1 2, 2 1, 1 1),
(1 2, 1 3, 2 3, 1 2),
(2 3, 3 3, 3 2, 2 3),
(2 1, 3 2, 3 1, 2 1)
)""")<line_sep>self.assertFalse(geom.is_valid)<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep>self.assertEquals(22 fixed.area)<block_end><def_stmt>test_polygon_inners_crossing_outer self<block_start>geom=wkt.loads("""POLYGON (
(2325 1015, 2329 1021, 2419 1057, 2461 944, 2369 907, 2325 1015),
(2329 1012, 2370 909, 2457 944, 2417 1050, 2329 1012),
(2410 1053, 2410 1052, 2412 1053, 2411 1054, 2410 1053),
(2378 1040, 2378 1039, 2379 1040, 2379 1041, 2378 1040),
(2369 1037, 2370 1036, 2371 1036, 2371 1038, 2369 1037),
(2361 1034, 2362 1033, 2363 1033, 2363 1034, 2361 1034),
(2353 1031, 2354 1029, 2355 1030, 2354 1031, 2353 1031),
(2337 1024, 2338 1023, 2339 1023, 2338 1025, 2337 1024)
)""")<line_sep>self.assertFalse(geom.is_valid)<line_sep>fixed=make_it_valid(geom)<line_sep>self.assertTrue(fixed.is_valid)<line_sep># different versions of GEOS hit this bug in slightly different ways,
# meaning that some inners get included and some don't, depending on
# the version. therefore, we need quite a wide range of acceptable
# answers.
#
# the main part of this polygon (outer - largest inner) has area 1551,
# and the smaller inners sum up to area 11, so we'll take +/-6 from
# 1545.
self.assertAlmostEqual(1545 fixed.area delta=6)<block_end><block_end> |
#! /urs/bin/env python
# Copyright 2017-2020 Fitbit, Inc
# SPDX-License-Identifier: Apache-2.0
#####################################################################
# This script post-processes the XCode project generated
# by CMake, so that it no longer contains absolute paths.
# It also remaps UUIDs so that they are stable across invocations
# of this script, which allows the generated project to be put under
# source code control.
#####################################################################
#####################################################################
# Imports
#####################################################################
<import_stmt>sys<import_stmt>re<import_stmt>os<import_stmt>shutil<line_sep>#####################################################################
# Constants
#####################################################################
XCODE_PROJECT_FILE_NAME="project.pbxproj"<line_sep>#####################################################################
<def_stmt>print_usage_and_exit <block_start>sys.stderr.write("""\
Usage: gg_post_process_xcode_project.py <project_file_in> <project_file_out> <gg_root> <gg_variant>
Where <project_file_in> is the XCode project generated by CMake,
<project_file_out> is the post-processed XCode project generated by
this script, <gg_root> is the directory where the GG repo is checked
out, and <gg_variant> is 'iOS' or 'macOS'
""")<line_sep>sys.exit(1)<block_end>#####################################################################
<def_stmt>print_error error<block_start>sys.stderr.write("ERROR: %s\n"%(error))<block_end>#####################################################################
<def_stmt>replace_substrings original replacements<block_start>cursor=0<line_sep>segments=[]<for_stmt>replacement replacements<block_start>start,end,string=replacement<line_sep>segments.append(original[cursor:start])<line_sep>segments.append(string)<line_sep>cursor=end<block_end>segments.append(original[cursor:])<line_sep><return>"".join(segments)<block_end>#####################################################################
# Even after making paths relative, we still have some include paths
# path point to CMake-generated directories.
# They have the form: xp/build/cmake/<platform>
# We replace them by an equivalent, pointing to the `generated` subdir
# of xp/build
#####################################################################
<def_stmt>fix_header_search_paths match<block_start><return>match.group(1)+match.group(2).replace('xp/build/cmake' 'xp/build/generated')<block_end>#####################################################################
<def_stmt>process_project_file input_file output_file gg_root uuid_prefix# Read the entire project file
<block_start>project=open(os.path.join(input_file XCODE_PROJECT_FILE_NAME) "r").read()<line_sep># Remove SYMROOT entries, so that we use the default location for XCode
project=re.sub(r'(SYMROOT = )' r'// Removed by GG script \1' project)<line_sep># Remove CONFIGURATION_BUILD_DIR entries
project=re.sub(r'(CONFIGURATION_BUILD_DIR = )' r'// Removed by GG script \1' project)<line_sep># Replace defaultConfigurationName to Release
project=re.sub(r'(defaultConfigurationName = Debug)' r'defaultConfigurationName = Release' project)<line_sep># Compute the relative path from the output project to the GG root
abs_output_dir_path=os.path.abspath(os.path.dirname(output_file))<line_sep>abs_gg_root_path=os.path.abspath(gg_root)<line_sep>abs_gg_xp_root_path=os.path.join(abs_gg_root_path "xp")<line_sep>gg_xp_root_relpath=os.path.relpath(abs_gg_xp_root_path abs_output_dir_path)<line_sep># Rewrite the `projectDirPath` definition in the project
project_dir_path="projectDirPath = "+gg_xp_root_relpath+";"<line_sep>project=re.sub(r'projectDirPath = \S+;' project_dir_path project 1)<line_sep># Replace absolute paths with paths relative to `projectDirPath`
project=re.sub(abs_gg_root_path '..' project)<line_sep># Replace references to object files and libraries.
# They have the form: ../xp/<some-path>/<prefix>$(EFFECTIVE_PLATFORM_NAME)/<build-variant>/<object-name>
# We replace them with just the object name, relative to the built products directory.
# NOTE: those entries can end with a quote, or a whitespace
project=re.sub(r'(\.\./xp/\S+\$\(EFFECTIVE_PLATFORM_NAME\)/[^/ ]+/)([^/" ]+[" ])' r'$(BUILT_PRODUCTS_DIR)/\2' project)<line_sep># Scan for all entity IDs and store them in a map, associating them with
# a number equal to their order or appearance in the file
# Entity IDs generated by CMake: we're looking for a block of 24 uppercase hex chars
# preceded by whitespace and followed by whitespace or a separator
entity_id_pattern=re.compile(re.compile(r'(\s)([0-9A-F]{24})(\s|[,;])'))<line_sep>entity_id_map={}<line_sep>entity_ids=entity_id_pattern.findall(project)<for_stmt>(_ entity_id _) entity_ids<block_start><if_stmt>entity_id<not><in>entity_id_map<block_start>entity_id_map[entity_id]="%s%022X"%(uuid_prefix len(entity_id_map))<block_end><block_end># Replace IDs with their mapped value
project=entity_id_pattern.sub(<lambda>match:match.group(1)+entity_id_map[match.group(2)]+match.group(3) project)<line_sep># Fix HEADER_SEARCH_PATHS elements
# Look for: HEADER_SEARCH_PATHS = (...)
project=re.sub(r'(HEADER_SEARCH_PATHS\s*=\s*\()([^\(\)]+)' fix_header_search_paths project)<line_sep># Fix Info.plist references
project=re.sub(r'(INFOPLIST_FILE\s*=\s*)"(.*GoldenGateXP\.dir/Info.plist)"' r'\1"bundle/Info.plist"' project)<line_sep># Replace the shell script generated by CMake for the gg-common target
# For simplicity, we just look for a `shellScript` entry with the term `gg-common` in it
gg_common_shell_script='shellScript = "$PROJECT_DIR/build/scripts/gg_process_version_info_header.py \\\"$PROJECT_FILE_PATH/..\\\"";'<line_sep>gg_common_input_paths='inputPaths = ( "$(BUILT_PRODUCTS_DIR)" );'<line_sep>gg_common_output_paths='outputPaths = ();'<line_sep>project=re.sub(r'shellScript\s*=\s*".*gg-common_preBuildCommands.*";' gg_common_shell_script+"\n"+gg_common_input_paths+"\n"+gg_common_output_paths project)<line_sep># Replace the ALL_BUILD shell script so that it doesn't depend on a CMake-generated script
# We use a script file that's just a comment, because we don't need to actually do anything
all_build_shell_script='shellScript = "# replaced by gg_post_process_xcode_project.py";'<line_sep>project=re.sub(r'shellScript\s*=\s*".*ALL_BUILD_cmakeRulesBuildPhase.*";' all_build_shell_script project)<line_sep>open(os.path.join(output_file XCODE_PROJECT_FILE_NAME) "w+").write(project)<block_end>#####################################################################
<def_stmt>copy_generated_files gg_root gg_variant_dir<block_start><for_stmt>filename ["config/lwipopts.h"]<block_start>src=os.path.join(gg_root "xp/build/cmake" gg_variant_dir filename)<line_sep>dst=os.path.join(gg_root "xp/build/generated" gg_variant_dir filename)<if_stmt><not>os.path.exists(os.path.dirname(dst))<block_start>os.makedirs(os.path.dirname(dst))<block_end>shutil.copyfile(src dst)<block_end><block_end>#####################################################################
# main
#####################################################################
<def_stmt>main <block_start><if_stmt>len(sys.argv)<ne>5<block_start>print_error("ERROR: invalid/missing arguments")<line_sep>print_usage_and_exit()<block_end># Assign the parameters
input_file=sys.argv[1]<line_sep>output_file=sys.argv[2]<line_sep>gg_root=sys.argv[3]<line_sep>gg_variant=sys.argv[4]<line_sep># Check that the input and output project files are XCode projects (XCode Project files are directories that
# contain a project.pbxproj file, and other files). For the output, it is Ok that the project.pbxproj file
# doesn't yet exist, since we will be writing it
<if_stmt><not>os.path.isfile(os.path.join(input_file XCODE_PROJECT_FILE_NAME))<block_start>print_error("ERROR: input file is not a valid XCode project")<line_sep><return>1<block_end><if_stmt><not>os.path.isdir(output_file)<block_start>print_error("ERROR: output file is not a valid XCode project")<line_sep><return>1<block_end><if_stmt><not>os.path.isdir(gg_root)<block_start>print_error("ERROR: Golden Gate root isn't a directory")<line_sep><return>1<block_end># Pick a UUID prefix based on the variant, to try and avoid having the same UUID in two
# different project files.
uuid_prefix_map={'iOS':'01' 'macOS':'02'}<line_sep>uuid_prefix=uuid_prefix_map.get(gg_variant '00')<line_sep>process_project_file(input_file output_file gg_root uuid_prefix)<line_sep>gg_variant_dir='xcode-'+gg_variant<line_sep>copy_generated_files(gg_root gg_variant_dir)<line_sep><return>0<block_end><if_stmt>__name__<eq>'__main__'<block_start>sys.exit(main())<block_end> |
# coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python2, python3
"""End-to-end test for ImageNet.
Tests for imagenet.resnet50_train, run_predict, run_temp_scaling, and
run_metrics. Real data doesn't work under blaze, so execute the test binary
directly.
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<import_stmt>tempfile<import_from_stmt>absl flags<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_stmt>tensorflow.compat.v2<as>tf<import_from_stmt>uq_benchmark_2019.imagenet resnet50_train# pylint: disable=line-too-long
<import_from_stmt>uq_benchmark_2019.imagenet run_metrics<import_from_stmt>uq_benchmark_2019.imagenet run_predict<import_from_stmt>uq_benchmark_2019.imagenet run_temp_scaling<line_sep>gfile=tf.io.gfile<line_sep>flags.DEFINE_bool('fake_data' <true> 'Use dummy random data.')<line_sep>flags.DEFINE_bool('fake_training' <true> 'Train with trivial number of steps.')<line_sep>DATA_NAMES=['train' 'test' 'corrupt-static-gaussian_noise-2' 'celeb_a']<line_sep>METHODS=['vanilla' 'll_dropout' 'll_svi' 'dropout']<class_stmt>EndToEndTest(parameterized.TestCase)<block_start>@parameterized.parameters(*[(d m)<for>d DATA_NAMES<for>m METHODS])# pylint: disable=g-complex-comprehension
<def_stmt>test_end_to_end_train self data_name method<block_start><with_stmt>tempfile.TemporaryDirectory()<as>model_dir<block_start>metrics=['sparse_categorical_crossentropy']<if_stmt>flags.FLAGS.fake_data<and>(data_name<ne>'test')<block_start><pass><block_end><else_stmt><block_start>temp_model_dir=os.path.join(model_dir data_name method)<line_sep>resnet50_train.run(method temp_model_dir task_number=0 use_tpu=<false> tpu=<none> metrics=metrics fake_data=flags.FLAGS.fake_data fake_training=flags.FLAGS.fake_training)<line_sep>run_predict.run(data_name temp_model_dir batch_size=8 predictions_per_example=4 max_examples=44 output_dir=temp_model_dir fake_data=flags.FLAGS.fake_data)<line_sep>tmpl=os.path.join(temp_model_dir '*_small_*')<line_sep>glob_results=gfile.glob(tmpl)<line_sep>path=glob_results[0]<if_stmt>data_name<eq>'valid'<block_start>run_temp_scaling(path)<block_end>run_metrics.run(path path model_dir_ensemble=<none> use_temp_scaling=<false>)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>absltest.main()<block_end> |
# Copyright 2008-2015 Nokia Networks
# Copyright 2016- Robot Framework Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>..lexer Token get_tokens get_resource_tokens get_init_tokens<import_from_stmt>..model Statement<import_from_stmt>.fileparser FileParser<def_stmt>get_model source data_only=<false> curdir=<none><block_start>"""Parses the given source to a model represented as an AST.
How to use the model is explained more thoroughly in the general
documentation of the :mod:`robot.parsing` module.
:param source: The source where to read the data. Can be a path to
a source file as a string or as ``pathlib.Path`` object, an already
opened file object, or Unicode text containing the date directly.
Source files must be UTF-8 encoded.
:param data_only: When ``False`` (default), returns all tokens. When set
to ``True``, omits separators, comments, continuation markers, and
other non-data tokens. Model like this cannot be saved back to
file system.
:param curdir: Directory where the source file exists. This path is used
to set the value of the built-in ``${CURDIR}`` variable during parsing.
When not given, the variable is left as-is. Should only be given
only if the model will be executed afterwards. If the model is saved
back to disk, resolving ``${CURDIR}`` is typically not a good idea.
Use :func:`get_resource_model` or :func:`get_init_model` when parsing
resource or suite initialization files, respectively.
"""<line_sep><return>_get_model(get_tokens source data_only curdir)<block_end><def_stmt>get_resource_model source data_only=<false> curdir=<none><block_start>"""Parses the given source to a resource file model.
Otherwise same as :func:`get_model` but the source is considered to be
a resource file. This affects, for example, what settings are valid.
"""<line_sep><return>_get_model(get_resource_tokens source data_only curdir)<block_end><def_stmt>get_init_model source data_only=<false> curdir=<none><block_start>"""Parses the given source to a init file model.
Otherwise same as :func:`get_model` but the source is considered to be
a suite initialization file. This affects, for example, what settings are
valid.
"""<line_sep><return>_get_model(get_init_tokens source data_only curdir)<block_end><def_stmt>_get_model token_getter source data_only=<false> curdir=<none><block_start>tokens=token_getter(source data_only)<line_sep>statements=_tokens_to_statements(tokens curdir)<line_sep>model=_statements_to_model(statements source)<line_sep>model.validate_model()<line_sep><return>model<block_end><def_stmt>_tokens_to_statements tokens curdir=<none><block_start>statement=[]<line_sep>EOS=Token.EOS<for_stmt>t tokens<block_start><if_stmt>curdir<and>'${CURDIR}'<in>t.value<block_start>t.value=t.value.replace('${CURDIR}' curdir)<block_end><if_stmt>t.type<ne>EOS<block_start>statement.append(t)<block_end><else_stmt><block_start><yield>Statement.from_tokens(statement)<line_sep>statement=[]<block_end><block_end><block_end><def_stmt>_statements_to_model statements source=<none><block_start>parser=FileParser(source=source)<line_sep>model=parser.model<line_sep>stack=[parser]<for_stmt>statement statements<block_start><while_stmt><not>stack[-1].handles(statement)<block_start>stack.pop()<block_end>parser=stack[-1].parse(statement)<if_stmt>parser<block_start>stack.append(parser)<block_end><block_end><return>model<block_end> |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>math<import_from_stmt>typing Mapping Dict List<import_from_stmt>qf_lib.backtesting.broker.broker Broker<import_from_stmt>qf_lib.backtesting.contract.contract Contract<import_from_stmt>qf_lib.backtesting.contract.contract_to_ticker_conversion.base ContractTickerMapper<import_from_stmt>qf_lib.backtesting.order.execution_style ExecutionStyle<import_from_stmt>qf_lib.backtesting.order.order Order<import_from_stmt>qf_lib.backtesting.order.time_in_force TimeInForce<import_from_stmt>qf_lib.common.enums.frequency Frequency<import_from_stmt>qf_lib.common.utils.logging.qf_parent_logger qf_logger<import_from_stmt>qf_lib.common.utils.miscellaneous.function_name get_function_name<import_from_stmt>qf_lib.data_providers.data_provider DataProvider<class_stmt>OrderFactory<block_start>""" Creates Orders.
Parameters
----------
broker: Broker
broker used to access the portfolio
data_provider: DataProvider
data provider used to download prices. In case of backtesting, the DataHandler wrapper should be used.
contract_to_ticker_mapper: ContractTickerMapper
object mapping contracts to tickers
"""<def_stmt>__init__ self broker:Broker data_provider:DataProvider contract_to_ticker_mapper:ContractTickerMapper<block_start>self.broker=broker<line_sep>self.data_provider=data_provider<line_sep>self.contract_to_ticker_mapper=contract_to_ticker_mapper<line_sep>self.logger=qf_logger.getChild(self.__class__.__name__)<block_end><def_stmt>orders self quantities:Mapping[Contract int] execution_style:ExecutionStyle time_in_force:TimeInForce<arrow>List[Order]<block_start>"""
Creates a list of Orders for given numbers of shares for each given asset.
Orders requiring 0 shares will be removed from resulting order list
Parameters
----------
quantities: Mapping[Contract, int]
mapping of a Contract to an amount of shares which should be bought/sold.
If number is positive then asset will be bought. Otherwise it will be sold.
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
Returns
--------
List[Order]
list of generated orders
"""<line_sep>self._log_function_call(vars())<line_sep>order_list=[]<for_stmt>contract,quantity quantities.items()<block_start><if_stmt>quantity<ne>0<block_start>order_list.append(Order(contract quantity execution_style time_in_force))<block_end><block_end><return>order_list<block_end><def_stmt>target_orders self target_quantities:Mapping[Contract float] execution_style:ExecutionStyle time_in_force:TimeInForce tolerance_quantities:Mapping[Contract float]=<none><arrow>List[Order]<block_start>"""
Creates a list of Orders from a dictionary of desired target number of shares (number of shares which should be
present in the portfolio after executing the Order).
If the position doesn't already exist, the new Order is placed for the :target_quantity of shares.
If the position does exist the Order for the difference between the target number of shares
and the current number of shares is placed.
Parameters
----------
target_quantities: Mapping[Contract, int]
mapping of a Contract to a target number of shares which should be present in the portfolio after the Order
is executed. After comparing with tolerance the math.floor of the quantity will be taken.
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
tolerance_quantities: None, Mapping[Contract, int]
tells what is a tolerance for the target_quantities (in both directions) for each Contract.
The tolerance is expressed in shares.
For example: assume that currently the portfolio contains 100 shares of asset A.
then calling target_orders({A: 101}, ..., tolerance_quantities={A: 2}) will not generate any trades as
the tolerance of 2 allows the allocation to be 100. while target value is 101.
Another example:
assume that currently the portfolio contains 100 shares of asset A.
then calling target_value_order({A: 103}, ..., tolerance_quantities={A: 2}) will generate a BUY order
for 3 shares
if abs(target - actual) > tolerance buy or sell assets to match the target
If tolerance for a specific contract is not provided it is assumed to be 0
Returns
--------
List[Order]
list of generated orders
"""<line_sep>self._log_function_call(vars())<line_sep># Dict of Contract -> Quantities of shares to buy/sell
quantities=dict()<if_stmt>tolerance_quantities<is><none><block_start>tolerance_quantities={}<block_end>contract_to_positions={position.contract():position<for>position self.broker.get_positions()}<for_stmt>contract,target_quantity target_quantities.items()<block_start>position=contract_to_positions.get(contract <none>)<line_sep>tolerance_quantity=tolerance_quantities.get(contract 0)<if_stmt>position<is><not><none><block_start>current_quantity=position.quantity()<block_end><else_stmt><block_start>current_quantity=0<block_end>quantity=target_quantity-current_quantity<if_stmt>abs(quantity)<g>tolerance_quantity<and>quantity<ne>0# tolerance_quantity can be 0
<block_start>quantities[contract]=math.floor(quantity)<block_end><block_end># type: int
<return>self.orders(quantities execution_style time_in_force)<block_end><def_stmt>value_orders self values:Mapping[Contract float] execution_style:ExecutionStyle time_in_force:TimeInForce frequency:Frequency=<none><arrow>List[Order]<block_start>"""
Creates a list of Orders by specifying the amount of money which should be spent on each asset rather
than the number of shares to buy/sell.
Parameters
----------
values: Mapping[Contract, int]
mapping of a Contract to the amount of money which should be spent on the asset (expressed in the currency
in which the asset is traded)
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
frequency: Frequency
frequency for the last available price sampling
Returns
--------
List[Order]
list of generated orders
"""<line_sep>self._log_function_call(vars())<line_sep>quantities,_=self._calculate_target_shares_and_tolerances(values frequency=frequency)<line_sep>int_quantities={contract:math.floor(quantity)<for>contract,quantity quantities.items()}<line_sep><return>self.orders(int_quantities execution_style time_in_force)<block_end><def_stmt>percent_orders self percentages:Mapping[Contract float] execution_style:ExecutionStyle time_in_force:TimeInForce frequency:Frequency=<none><arrow>List[Order]<block_start>"""
Creates a list of Orders by specifying the percentage of the current portfolio value which should be spent
on each asset.
Parameters
----------
percentages: Mapping[Contract, int]
mapping of a Contract to a percentage value of the current portfolio which should be allocated in the asset.
This is specified as a decimal value (e.g. 0.5 means 50%)
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
frequency: Frequency
frequency for the last available price sampling (daily or minutely)
Returns
--------
List[Order]
list of generated orders
"""<line_sep>self._log_function_call(vars())<line_sep>portfolio_value=self.broker.get_portfolio_value()<line_sep>values={contract:portfolio_value<times>fraction<for>contract,fraction percentages.items()}<line_sep><return>self.value_orders(values execution_style time_in_force frequency)<block_end><def_stmt>target_value_orders self target_values:Mapping[Contract float] execution_style:ExecutionStyle time_in_force:TimeInForce tolerance_percentage:float=0.0 frequency:Frequency=<none><arrow>List[Order]<block_start>"""
Creates a list of Orders by specifying how much should be allocated in each asset after the Orders
have been executed.
For example if we've already have 10M invested in 'SPY US Equity' and you call this method with target value of 11M
then only 1M will be spent on this asset
Parameters
----------
target_values: Mapping[Contract, int]
mapping of a Contract to a value which should be allocated in the asset after the Order has been executed
(expressed in the currency in which the asset is traded)
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
tolerance_percentage: float
tells the us what is a tolerance to the target_values (in both directions).
The tolerance is expressed as percentage of target_values.
For example: assume that currently the portfolio contains asset A with allocation 10 000$.
then calling target_value_order({A: 10 500}, ..., tolerance_percentage=0.05) will not generate any trades as
the tolerance of 0.05 allows the allocation to be 10 000$, while target value is 10 500$ (tolerance value
would be equal to 0.05 * 10 500 = 525 and the difference between current and target value would be < 525$).
Another example:
For example: assume that currently the portfolio contains asset A with allocation 10 000$.
then calling target_value_order({A: 13 000}, ..., tolerance_percentage=0.1) will generate a BUY order
corresponding to 3000$ of shares. The tolerance of 0.1 does not allow a difference of 3000$
if abs(target - actual) > tolerance_percentage * target value
frequency: Frequency
frequency for the last available price sampling (daily or minutely)
Returns
--------
List[Order]
list of generated orders
"""<line_sep>self._log_function_call(vars())<assert_stmt>0.0<le>tolerance_percentage<l>1.0 "The tolerance_percentage should belong to [0, 1) interval"<line_sep>target_quantities,tolerance_quantities=self._calculate_target_shares_and_tolerances(target_values tolerance_percentage frequency)<line_sep><return>self.target_orders(target_quantities execution_style time_in_force tolerance_quantities)<block_end><def_stmt>target_percent_orders self target_percentages:Mapping[Contract float] execution_style:ExecutionStyle time_in_force:TimeInForce tolerance_percentage:float=0.0 frequency:Frequency=<none><arrow>List[Order]<block_start>"""
Creates an Order adjusting a position to a value equal to the given percentage of the portfolio.
Parameters
----------
target_percentages: Mapping[Contract, int]
mapping of a Contract to a percentage of a current portfolio value which should be allocated in each asset
after the Order has been carried out
execution_style: ExecutionStyle
execution style of an order (e.g. MarketOrder, StopOrder, etc.)
time_in_force: TimeInForce
e.g. 'DAY' (Order valid for one trading session), 'GTC' (good till cancelled)
tolerance_percentage: float
tells the us what is a tolerance to the target_percentages (in both directions). The tolerance is expressed
in percentage points (0.02 corresponds to 2pp of the target_value). For more details look at the description
of target_value_orders.
frequency: Frequency
frequency for the last available price sampling (daily or minutely)
Returns
--------
List[Order]
list of generated orders
"""<line_sep>self._log_function_call(vars())<assert_stmt>0.0<le>tolerance_percentage<l>1.0 "The tolerance_percentage should belong to [0, 1) interval"<line_sep>portfolio_value=self.broker.get_portfolio_value()<line_sep>target_values={contract:portfolio_value<times>target_percent<for>contract,target_percent target_percentages.items()}<line_sep><return>self.target_value_orders(target_values execution_style time_in_force tolerance_percentage frequency)<block_end><def_stmt>_calculate_target_shares_and_tolerances self contract_to_amount_of_money:Mapping[Contract float] tolerance_percentage:float=0.0 frequency:Frequency=<none><arrow>(Mapping[Contract float] Mapping[Contract float])<block_start>"""
Returns
----------
Tuple(Mapping[Contract, float], Mapping[Contract, float])
Tells how many shares of each asset we should have in order to match the target and what is the tolerance
(in number of shares) for each asset
"""<line_sep>tickers_to_contract_and_amount_of_money=self._make_tickers_to_contract_and_amount_of_money(contract_to_amount_of_money)<line_sep>tickers=list(tickers_to_contract_and_amount_of_money.keys())<line_sep># In case of live trading the get_last_available_price will use datetime.now() as the current time to obtain
# last price and in case of a backtest - it will use the data handlers timer to compute the date
current_prices=self.data_provider.get_last_available_price(tickers frequency)<line_sep># Contract -> target number of shares
target_quantities=dict()# type: Dict[Contract, float]
# Contract -> tolerance expressed as number of shares
tolerance_quantities=dict()# type: Dict[Contract, float]
<for_stmt>ticker,(contract amount_of_money) tickers_to_contract_and_amount_of_money.items()<block_start>current_price=current_prices.loc[ticker]<line_sep>divisor=(current_price<times>contract.contract_size)<line_sep>target_quantity=amount_of_money/divisor# type: float
target_quantities[contract]=target_quantity<line_sep>tolerance_quantity=target_quantity<times>tolerance_percentage<line_sep>tolerance_quantities[contract]=tolerance_quantity<block_end><return>target_quantities tolerance_quantities<block_end><def_stmt>_make_tickers_to_contract_and_amount_of_money self contract_to_amount_of_money<block_start>tickers_to_contract_and_amount_of_money=dict()<for_stmt>contract,amount_of_money contract_to_amount_of_money.items()<block_start>ticker=self.contract_to_ticker_mapper.contract_to_ticker(contract)<line_sep>tickers_to_contract_and_amount_of_money[ticker]=contract amount_of_money<block_end><return>tickers_to_contract_and_amount_of_money<block_end><def_stmt>_log_function_call self params_dict<block_start><if_stmt>'self'<in>params_dict<block_start><del_stmt>params_dict['self']<block_end>fn_name_level_above=get_function_name(1)<line_sep>log_message="Function call: '{}' with parameters:".format(fn_name_level_above)<for_stmt>key,value params_dict.items()<block_start><if_stmt>isinstance(value dict)<and>value<block_start>value_str=""<for_stmt>inner_k,inner_v value.items()<block_start>value_str<augadd>"\n\t\t{}: {}".format(inner_k inner_v)<block_end><block_end><else_stmt><block_start>value_str=str(value)<block_end>log_message<augadd>"\n\t{}: {}".format(key value_str)<block_end>self.logger.debug(log_message)<block_end><block_end> |
<import_stmt>unittest<import_stmt>os.path<import_stmt>requests_mock<import_stmt>tableauserverclient<as>TSC<line_sep>TEST_ASSET_DIR=os.path.join(os.path.dirname(__file__) 'assets')<line_sep>SIGN_IN_XML=os.path.join(TEST_ASSET_DIR 'auth_sign_in.xml')<line_sep>SIGN_IN_IMPERSONATE_XML=os.path.join(TEST_ASSET_DIR 'auth_sign_in_impersonate.xml')<line_sep>SIGN_IN_ERROR_XML=os.path.join(TEST_ASSET_DIR 'auth_sign_in_error.xml')<class_stmt>AuthTests(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.server=TSC.Server('http://test')<line_sep>self.baseurl=self.server.auth.baseurl<block_end><def_stmt>test_sign_in self<block_start><with_stmt>open(SIGN_IN_XML 'rb')<as>f<block_start>response_xml=f.read().decode('utf-8')<block_end><with_stmt>requests_mock.mock()<as>m<block_start>m.post(self.baseurl+'/signin' text=response_xml)<line_sep>tableau_auth=TSC.TableauAuth('testuser' 'password' site_id='Samples')<line_sep>self.server.auth.sign_in(tableau_auth)<block_end>self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l' self.server.auth_token)<line_sep>self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6' self.server.site_id)<line_sep>self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01' self.server.user_id)<block_end><def_stmt>test_sign_in_with_personal_access_tokens self<block_start><with_stmt>open(SIGN_IN_XML 'rb')<as>f<block_start>response_xml=f.read().decode('utf-8')<block_end><with_stmt>requests_mock.mock()<as>m<block_start>m.post(self.baseurl+'/signin' text=response_xml)<line_sep>tableau_auth=TSC.PersonalAccessTokenAuth(token_name='mytoken' personal_access_token='<PASSWORD>' site_id='Samples')<line_sep>self.server.auth.sign_in(tableau_auth)<block_end>self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l' self.server.auth_token)<line_sep>self.assertEqual('6b7179ba-b82b-4f0f-91ed-812074ac5da6' self.server.site_id)<line_sep>self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01' self.server.user_id)<block_end><def_stmt>test_sign_in_impersonate self<block_start><with_stmt>open(SIGN_IN_IMPERSONATE_XML 'rb')<as>f<block_start>response_xml=f.read().decode('utf-8')<block_end><with_stmt>requests_mock.mock()<as>m<block_start>m.post(self.baseurl+'/signin' text=response_xml)<line_sep>tableau_auth=TSC.TableauAuth('testuser' 'password' user_id_to_impersonate='dd2239f6-ddf1-4107-981a-4cf94e415794')<line_sep>self.server.auth.sign_in(tableau_auth)<block_end>self.assertEqual('MJonFA6HDyy2C3oqR13fRGqE6cmgz<PASSWORD>' self.server.auth_token)<line_sep>self.assertEqual('dad65087-b08b-4603-af4e-2887b8aafc67' self.server.site_id)<line_sep>self.assertEqual('dd2239f6-ddf1-4107-981a-4cf94e415794' self.server.user_id)<block_end><def_stmt>test_sign_in_error self<block_start><with_stmt>open(SIGN_IN_ERROR_XML 'rb')<as>f<block_start>response_xml=f.read().decode('utf-8')<block_end><with_stmt>requests_mock.mock()<as>m<block_start>m.post(self.baseurl+'/signin' text=response_xml status_code=401)<line_sep>tableau_auth=TSC.TableauAuth('testuser' '<PASSWORD>')<line_sep>self.assertRaises(TSC.ServerResponseError self.server.auth.sign_in tableau_auth)<block_end><block_end><def_stmt>test_sign_in_invalid_token self<block_start><with_stmt>open(SIGN_IN_ERROR_XML 'rb')<as>f<block_start>response_xml=f.read().decode('utf-8')<block_end><with_stmt>requests_mock.mock()<as>m<block_start>m.post(self.baseurl+'/signin' text=response_xml status_code=401)<line_sep>tableau_auth=TSC.PersonalAccessTokenAuth(token_name='mytoken' personal_access_token='invalid')<line_sep>self.assertRaises(TSC.ServerResponseError self.server.auth.sign_in tableau_auth)<block_end><block_end><def_stmt>test_sign_in_without_auth self<block_start><with_stmt>open(SIGN_IN_ERROR_XML 'rb')<as>f<block_start>response_xml=f.read().decode('utf-8')<block_end><with_stmt>requests_mock.mock()<as>m<block_start>m.post(self.baseurl+'/signin' text=response_xml status_code=401)<line_sep>tableau_auth=TSC.TableauAuth('' '')<line_sep>self.assertRaises(TSC.ServerResponseError self.server.auth.sign_in tableau_auth)<block_end><block_end><def_stmt>test_sign_out self<block_start><with_stmt>open(SIGN_IN_XML 'rb')<as>f<block_start>response_xml=f.read().decode('utf-8')<block_end><with_stmt>requests_mock.mock()<as>m<block_start>m.post(self.baseurl+'/signin' text=response_xml)<line_sep>m.post(self.baseurl+'/signout' text='')<line_sep>tableau_auth=TSC.TableauAuth('testuser' 'password')<line_sep>self.server.auth.sign_in(tableau_auth)<line_sep>self.server.auth.sign_out()<block_end>self.assertIsNone(self.server._auth_token)<line_sep>self.assertIsNone(self.server._site_id)<line_sep>self.assertIsNone(self.server._user_id)<block_end><def_stmt>test_switch_site self<block_start>self.server.version='2.6'<line_sep>baseurl=self.server.auth.baseurl<line_sep>site_id,user_id,auth_token=list('<PASSWORD>')<line_sep>self.server._set_auth(site_id user_id auth_token)<with_stmt>open(SIGN_IN_XML 'rb')<as>f<block_start>response_xml=f.read().decode('utf-8')<block_end><with_stmt>requests_mock.mock()<as>m<block_start>m.post(baseurl+'/switchSite' text=response_xml)<line_sep>site=TSC.SiteItem('Samples' 'Samples')<line_sep>self.server.auth.switch_site(site)<block_end>self.assertEqual('eIX6mvFsq<PASSWORD>4KqEI1UwOpS8ggRs2l' self.server.auth_token)<line_sep>self.assertEqual('<PASSWORD>-8120<PASSWORD>' self.server.site_id)<line_sep>self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01' self.server.user_id)<block_end><def_stmt>test_revoke_all_server_admin_tokens self<block_start>self.server.version="3.10"<line_sep>baseurl=self.server.auth.baseurl<with_stmt>open(SIGN_IN_XML 'rb')<as>f<block_start>response_xml=f.read().decode('utf-8')<block_end><with_stmt>requests_mock.mock()<as>m<block_start>m.post(baseurl+'/signin' text=response_xml)<line_sep>m.post(baseurl+'/revokeAllServerAdminTokens' text='')<line_sep>tableau_auth=TSC.TableauAuth('testuser' 'password')<line_sep>self.server.auth.sign_in(tableau_auth)<line_sep>self.server.auth.revoke_all_server_admin_tokens()<block_end>self.assertEqual('eIX6mvFsqyansa4KqEI1UwOpS8ggRs2l' self.server.auth_token)<line_sep>self.assertEqual('<PASSWORD>ba-b82b-4f0f-91ed-812074ac5da6' self.server.site_id)<line_sep>self.assertEqual('1a96d216-e9b8-497b-a82a-0b899a965e01' self.server.user_id)<block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations<def_stmt>add_webhook_notification_template_fields apps schema_editor# loop over all existing webhook notification templates and make
# sure they have the new "http_method" field filled in with "POST"
<block_start>NotificationTemplate=apps.get_model('main' 'notificationtemplate')<line_sep>webhooks=NotificationTemplate.objects.filter(notification_type='webhook')<for_stmt>w webhooks<block_start>w.notification_configuration['http_method']='POST'<line_sep>w.save()<block_end><block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('main' '0081_v360_notify_on_start') ]<line_sep>operations=[migrations.RunPython(add_webhook_notification_template_fields migrations.RunPython.noop) ]<block_end> |
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
<import_from_stmt>oci.util formatted_flat_dict NONE_SENTINEL value_allowed_none_or_none_sentinel# noqa: F401
<import_from_stmt>oci.decorators init_model_state_from_kwargs<line_sep>@init_model_state_from_kwargs<class_stmt>QueryDetails(object)<block_start>"""
Input arguments for running a log anlaytics query. If the request is set to run in asynchronous mode
then shouldIncludeColumns and shouldIncludeFields can be overwritten when retrieving the results.
"""<line_sep>#: A constant which can be used with the sub_system property of a QueryDetails.
#: This constant has a value of "LOG"
SUB_SYSTEM_LOG="LOG"<line_sep>#: A constant which can be used with the async_mode property of a QueryDetails.
#: This constant has a value of "FOREGROUND"
ASYNC_MODE_FOREGROUND="FOREGROUND"<line_sep>#: A constant which can be used with the async_mode property of a QueryDetails.
#: This constant has a value of "BACKGROUND"
ASYNC_MODE_BACKGROUND="BACKGROUND"<def_stmt>__init__ self **kwargs<block_start>"""
Initializes a new QueryDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this QueryDetails.
:type compartment_id: str
:param compartment_id_in_subtree:
The value to assign to the compartment_id_in_subtree property of this QueryDetails.
:type compartment_id_in_subtree: bool
:param saved_search_id:
The value to assign to the saved_search_id property of this QueryDetails.
:type saved_search_id: str
:param query_string:
The value to assign to the query_string property of this QueryDetails.
:type query_string: str
:param sub_system:
The value to assign to the sub_system property of this QueryDetails.
Allowed values for this property are: "LOG"
:type sub_system: str
:param max_total_count:
The value to assign to the max_total_count property of this QueryDetails.
:type max_total_count: int
:param time_filter:
The value to assign to the time_filter property of this QueryDetails.
:type time_filter: oci.log_analytics.models.TimeRange
:param scope_filters:
The value to assign to the scope_filters property of this QueryDetails.
:type scope_filters: list[oci.log_analytics.models.ScopeFilter]
:param query_timeout_in_seconds:
The value to assign to the query_timeout_in_seconds property of this QueryDetails.
:type query_timeout_in_seconds: int
:param should_run_async:
The value to assign to the should_run_async property of this QueryDetails.
:type should_run_async: bool
:param async_mode:
The value to assign to the async_mode property of this QueryDetails.
Allowed values for this property are: "FOREGROUND", "BACKGROUND"
:type async_mode: str
:param should_include_total_count:
The value to assign to the should_include_total_count property of this QueryDetails.
:type should_include_total_count: bool
:param should_include_columns:
The value to assign to the should_include_columns property of this QueryDetails.
:type should_include_columns: bool
:param should_include_fields:
The value to assign to the should_include_fields property of this QueryDetails.
:type should_include_fields: bool
:param should_use_acceleration:
The value to assign to the should_use_acceleration property of this QueryDetails.
:type should_use_acceleration: bool
"""<line_sep>self.swagger_types={'compartment_id':'str' 'compartment_id_in_subtree':'bool' 'saved_search_id':'str' 'query_string':'str' 'sub_system':'str' 'max_total_count':'int' 'time_filter':'TimeRange' 'scope_filters':'list[ScopeFilter]' 'query_timeout_in_seconds':'int' 'should_run_async':'bool' 'async_mode':'str' 'should_include_total_count':'bool' 'should_include_columns':'bool' 'should_include_fields':'bool' 'should_use_acceleration':'bool'}<line_sep>self.attribute_map={'compartment_id':'compartmentId' 'compartment_id_in_subtree':'compartmentIdInSubtree' 'saved_search_id':'savedSearchId' 'query_string':'queryString' 'sub_system':'subSystem' 'max_total_count':'maxTotalCount' 'time_filter':'timeFilter' 'scope_filters':'scopeFilters' 'query_timeout_in_seconds':'queryTimeoutInSeconds' 'should_run_async':'shouldRunAsync' 'async_mode':'asyncMode' 'should_include_total_count':'shouldIncludeTotalCount' 'should_include_columns':'shouldIncludeColumns' 'should_include_fields':'shouldIncludeFields' 'should_use_acceleration':'shouldUseAcceleration'}<line_sep>self._compartment_id=<none><line_sep>self._compartment_id_in_subtree=<none><line_sep>self._saved_search_id=<none><line_sep>self._query_string=<none><line_sep>self._sub_system=<none><line_sep>self._max_total_count=<none><line_sep>self._time_filter=<none><line_sep>self._scope_filters=<none><line_sep>self._query_timeout_in_seconds=<none><line_sep>self._should_run_async=<none><line_sep>self._async_mode=<none><line_sep>self._should_include_total_count=<none><line_sep>self._should_include_columns=<none><line_sep>self._should_include_fields=<none><line_sep>self._should_use_acceleration=<none><block_end>@property<def_stmt>compartment_id self<block_start>"""
**[Required]** Gets the compartment_id of this QueryDetails.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this QueryDetails.
:rtype: str
"""<line_sep><return>self._compartment_id<block_end>@compartment_id.setter<def_stmt>compartment_id self compartment_id<block_start>"""
Sets the compartment_id of this QueryDetails.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this QueryDetails.
:type: str
"""<line_sep>self._compartment_id=compartment_id<block_end>@property<def_stmt>compartment_id_in_subtree self<block_start>"""
Gets the compartment_id_in_subtree of this QueryDetails.
Flag to search all child compartments of the compartment Id specified in the compartmentId query parameter.
:return: The compartment_id_in_subtree of this QueryDetails.
:rtype: bool
"""<line_sep><return>self._compartment_id_in_subtree<block_end>@compartment_id_in_subtree.setter<def_stmt>compartment_id_in_subtree self compartment_id_in_subtree<block_start>"""
Sets the compartment_id_in_subtree of this QueryDetails.
Flag to search all child compartments of the compartment Id specified in the compartmentId query parameter.
:param compartment_id_in_subtree: The compartment_id_in_subtree of this QueryDetails.
:type: bool
"""<line_sep>self._compartment_id_in_subtree=compartment_id_in_subtree<block_end>@property<def_stmt>saved_search_id self<block_start>"""
Gets the saved_search_id of this QueryDetails.
Saved search OCID for this query if known.
:return: The saved_search_id of this QueryDetails.
:rtype: str
"""<line_sep><return>self._saved_search_id<block_end>@saved_search_id.setter<def_stmt>saved_search_id self saved_search_id<block_start>"""
Sets the saved_search_id of this QueryDetails.
Saved search OCID for this query if known.
:param saved_search_id: The saved_search_id of this QueryDetails.
:type: str
"""<line_sep>self._saved_search_id=saved_search_id<block_end>@property<def_stmt>query_string self<block_start>"""
**[Required]** Gets the query_string of this QueryDetails.
Query to perform. Must conform to logging analytic querylanguage syntax. Syntax errors will be returned if present.
:return: The query_string of this QueryDetails.
:rtype: str
"""<line_sep><return>self._query_string<block_end>@query_string.setter<def_stmt>query_string self query_string<block_start>"""
Sets the query_string of this QueryDetails.
Query to perform. Must conform to logging analytic querylanguage syntax. Syntax errors will be returned if present.
:param query_string: The query_string of this QueryDetails.
:type: str
"""<line_sep>self._query_string=query_string<block_end>@property<def_stmt>sub_system self<block_start>"""
**[Required]** Gets the sub_system of this QueryDetails.
Default subsystem to qualify fields with in the queryString if not specified.
Allowed values for this property are: "LOG"
:return: The sub_system of this QueryDetails.
:rtype: str
"""<line_sep><return>self._sub_system<block_end>@sub_system.setter<def_stmt>sub_system self sub_system<block_start>"""
Sets the sub_system of this QueryDetails.
Default subsystem to qualify fields with in the queryString if not specified.
:param sub_system: The sub_system of this QueryDetails.
:type: str
"""<line_sep>allowed_values=["LOG"]<if_stmt><not>value_allowed_none_or_none_sentinel(sub_system allowed_values)<block_start><raise>ValueError("Invalid value for `sub_system`, must be None or one of {0}".format(allowed_values))<block_end>self._sub_system=sub_system<block_end>@property<def_stmt>max_total_count self<block_start>"""
Gets the max_total_count of this QueryDetails.
Maximum number of results to count. Note a maximum of 2001 will be enforced; that is, actualMaxTotalCountUsed = Math.min(maxTotalCount, 2001).
:return: The max_total_count of this QueryDetails.
:rtype: int
"""<line_sep><return>self._max_total_count<block_end>@max_total_count.setter<def_stmt>max_total_count self max_total_count<block_start>"""
Sets the max_total_count of this QueryDetails.
Maximum number of results to count. Note a maximum of 2001 will be enforced; that is, actualMaxTotalCountUsed = Math.min(maxTotalCount, 2001).
:param max_total_count: The max_total_count of this QueryDetails.
:type: int
"""<line_sep>self._max_total_count=max_total_count<block_end>@property<def_stmt>time_filter self<block_start>"""
Gets the time_filter of this QueryDetails.
:return: The time_filter of this QueryDetails.
:rtype: oci.log_analytics.models.TimeRange
"""<line_sep><return>self._time_filter<block_end>@time_filter.setter<def_stmt>time_filter self time_filter<block_start>"""
Sets the time_filter of this QueryDetails.
:param time_filter: The time_filter of this QueryDetails.
:type: oci.log_analytics.models.TimeRange
"""<line_sep>self._time_filter=time_filter<block_end>@property<def_stmt>scope_filters self<block_start>"""
Gets the scope_filters of this QueryDetails.
List of filters to be applied when the query executes. More than one filter per field is not permitted.
:return: The scope_filters of this QueryDetails.
:rtype: list[oci.log_analytics.models.ScopeFilter]
"""<line_sep><return>self._scope_filters<block_end>@scope_filters.setter<def_stmt>scope_filters self scope_filters<block_start>"""
Sets the scope_filters of this QueryDetails.
List of filters to be applied when the query executes. More than one filter per field is not permitted.
:param scope_filters: The scope_filters of this QueryDetails.
:type: list[oci.log_analytics.models.ScopeFilter]
"""<line_sep>self._scope_filters=scope_filters<block_end>@property<def_stmt>query_timeout_in_seconds self<block_start>"""
Gets the query_timeout_in_seconds of this QueryDetails.
Amount of time, in seconds, allowed for a query to execute. If this time expires before the query is complete, any partial results will be returned.
:return: The query_timeout_in_seconds of this QueryDetails.
:rtype: int
"""<line_sep><return>self._query_timeout_in_seconds<block_end>@query_timeout_in_seconds.setter<def_stmt>query_timeout_in_seconds self query_timeout_in_seconds<block_start>"""
Sets the query_timeout_in_seconds of this QueryDetails.
Amount of time, in seconds, allowed for a query to execute. If this time expires before the query is complete, any partial results will be returned.
:param query_timeout_in_seconds: The query_timeout_in_seconds of this QueryDetails.
:type: int
"""<line_sep>self._query_timeout_in_seconds=query_timeout_in_seconds<block_end>@property<def_stmt>should_run_async self<block_start>"""
Gets the should_run_async of this QueryDetails.
Option to run the query asynchronously. This will lead to a LogAnalyticsQueryJobWorkRequest being submitted and the {workRequestId} will be returned to use for fetching the results.
:return: The should_run_async of this QueryDetails.
:rtype: bool
"""<line_sep><return>self._should_run_async<block_end>@should_run_async.setter<def_stmt>should_run_async self should_run_async<block_start>"""
Sets the should_run_async of this QueryDetails.
Option to run the query asynchronously. This will lead to a LogAnalyticsQueryJobWorkRequest being submitted and the {workRequestId} will be returned to use for fetching the results.
:param should_run_async: The should_run_async of this QueryDetails.
:type: bool
"""<line_sep>self._should_run_async=should_run_async<block_end>@property<def_stmt>async_mode self<block_start>"""
Gets the async_mode of this QueryDetails.
Execution mode for the query if running asynchronously i.e (shouldRunAsync is set to true).
Allowed values for this property are: "FOREGROUND", "BACKGROUND"
:return: The async_mode of this QueryDetails.
:rtype: str
"""<line_sep><return>self._async_mode<block_end>@async_mode.setter<def_stmt>async_mode self async_mode<block_start>"""
Sets the async_mode of this QueryDetails.
Execution mode for the query if running asynchronously i.e (shouldRunAsync is set to true).
:param async_mode: The async_mode of this QueryDetails.
:type: str
"""<line_sep>allowed_values=["FOREGROUND" "BACKGROUND"]<if_stmt><not>value_allowed_none_or_none_sentinel(async_mode allowed_values)<block_start><raise>ValueError("Invalid value for `async_mode`, must be None or one of {0}".format(allowed_values))<block_end>self._async_mode=async_mode<block_end>@property<def_stmt>should_include_total_count self<block_start>"""
Gets the should_include_total_count of this QueryDetails.
Include the total number of results from the query. Note, this value will always be equal to or less than maxTotalCount.
:return: The should_include_total_count of this QueryDetails.
:rtype: bool
"""<line_sep><return>self._should_include_total_count<block_end>@should_include_total_count.setter<def_stmt>should_include_total_count self should_include_total_count<block_start>"""
Sets the should_include_total_count of this QueryDetails.
Include the total number of results from the query. Note, this value will always be equal to or less than maxTotalCount.
:param should_include_total_count: The should_include_total_count of this QueryDetails.
:type: bool
"""<line_sep>self._should_include_total_count=should_include_total_count<block_end>@property<def_stmt>should_include_columns self<block_start>"""
Gets the should_include_columns of this QueryDetails.
Include columns in response
:return: The should_include_columns of this QueryDetails.
:rtype: bool
"""<line_sep><return>self._should_include_columns<block_end>@should_include_columns.setter<def_stmt>should_include_columns self should_include_columns<block_start>"""
Sets the should_include_columns of this QueryDetails.
Include columns in response
:param should_include_columns: The should_include_columns of this QueryDetails.
:type: bool
"""<line_sep>self._should_include_columns=should_include_columns<block_end>@property<def_stmt>should_include_fields self<block_start>"""
Gets the should_include_fields of this QueryDetails.
Include fields in response
:return: The should_include_fields of this QueryDetails.
:rtype: bool
"""<line_sep><return>self._should_include_fields<block_end>@should_include_fields.setter<def_stmt>should_include_fields self should_include_fields<block_start>"""
Sets the should_include_fields of this QueryDetails.
Include fields in response
:param should_include_fields: The should_include_fields of this QueryDetails.
:type: bool
"""<line_sep>self._should_include_fields=should_include_fields<block_end>@property<def_stmt>should_use_acceleration self<block_start>"""
Gets the should_use_acceleration of this QueryDetails.
Controls if query should ignore pre-calculated results if available and only use raw data. If set and no acceleration data is found it will fallback to raw data.
:return: The should_use_acceleration of this QueryDetails.
:rtype: bool
"""<line_sep><return>self._should_use_acceleration<block_end>@should_use_acceleration.setter<def_stmt>should_use_acceleration self should_use_acceleration<block_start>"""
Sets the should_use_acceleration of this QueryDetails.
Controls if query should ignore pre-calculated results if available and only use raw data. If set and no acceleration data is found it will fallback to raw data.
:param should_use_acceleration: The should_use_acceleration of this QueryDetails.
:type: bool
"""<line_sep>self._should_use_acceleration=should_use_acceleration<block_end><def_stmt>__repr__ self<block_start><return>formatted_flat_dict(self)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>other<is><none><block_start><return><false><block_end><return>self.__dict__<eq>other.__dict__<block_end><def_stmt>__ne__ self other<block_start><return><not>self<eq>other<block_end><block_end> |
<import_from_stmt>collections namedtuple<line_sep>RGB=namedtuple("RGB" "red, green, blue")<line_sep>COLORS={"red":RGB(255 0 0) "orange-deep":RGB(255 40 0) "orange":RGB(255 120 0) "yellow":RGB(255 200 0) "yellow-acid":RGB(160 255 0) "green":RGB(0 255 0) "green-forest":RGB(34 139 34) "green-spring":RGB(0 255 127) "green-teal":RGB(0 128 128) "green-turquoise":RGB(0 199 140) "green-coral":RGB(0 255 50) "cyan":RGB(0 255 255) "blue":RGB(0 0 255) "blue-light":RGB(65 105 225) "blue-navy":RGB(0 0 128) "blue-aqua":RGB(0 255 255) "purple":RGB(128 0 128) "pink":RGB(255 0 178) "magenta":RGB(255 0 255) "black":RGB(0 0 0) "white":RGB(255 255 255) "brown":RGB(139 69 19) "gold":RGB(255 215 0) "hotpink":RGB(255 105 180) "lightblue":RGB(173 216 230) "lightgreen":RGB(152 251 152) "lightpink":RGB(255 182 193) "lightyellow":RGB(255 255 224) "maroon":RGB(128 0 0) "mint":RGB(189 252 201) "olive":RGB(85 107 47) "peach":RGB(255 100 100) "plum":RGB(221 160 221) "sepia":RGB(94 38 18) "skyblue":RGB(135 206 235) "steelblue":RGB(70 130 180) "tan":RGB(210 180 140) "violetred":RGB(208 32 144) }<line_sep>GRADIENTS={"Rainbow":{"colors":["red" "orange" "yellow" "green" "green-turquoise" "blue" "purple" "pink" ]} "Dancefloor":{"colors":["red" "pink" "blue"]} "Plasma":{"colors":["blue" "purple" "red" "orange-deep" "yellow"]} "Ocean":{"colors":["blue-aqua" "blue"]} "Viridis":{"colors":["purple" "blue" "green-teal" "green" "yellow"]} "Jungle":{"colors":["green" "green-forest" "orange"]} "Spring":{"colors":["pink" "orange-deep" "yellow"]} "Winter":{"colors":["green-turquoise" "green-coral"]} "Frost":{"colors":["blue" "blue-aqua" "purple" "pink"]} "Sunset":{"colors":["blue-navy" "orange" "red"]} "Borealis":{"colors":["orange-deep" "purple" "green-turquoise" "green" ]} "Rust":{"colors":["orange-deep" "red"]} "Christmas":{"colors":["red" "red" "red" "red" "red" "green" "green" "green" "green" "green" ] "method":"repeat" } "Winamp":{"colors":["green" "yellow" "orange" "orange-deep" "red" ]} }<line_sep> |
<import_from_stmt>menpofit.result ParametricIterativeResult MultiScaleParametricIterativeResult <class_stmt>LucasKanadeAlgorithmResult(ParametricIterativeResult)<block_start>r"""
Class for storing the iterative result of a Lucas-Kanade Image Alignment
optimization algorithm.
Parameters
----------
shapes : `list` of `menpo.shape.PointCloud`
The `list` of shapes per iteration. The first and last members
correspond to the initial and final shapes, respectively.
homogeneous_parameters : `list` of ``(n_parameters,)`` `ndarray`
The `list` of parameters of the homogeneous transform per iteration.
The first and last members correspond to the initial and final
shapes, respectively.
initial_shape : `menpo.shape.PointCloud` or ``None``, optional
The initial shape from which the fitting process started. If
``None``, then no initial shape is assigned.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then no
ground truth shape is assigned.
costs : `list` of `float` or ``None``, optional
The `list` of cost per iteration. If ``None``, then it is assumed that
the cost function cannot be computed for the specific algorithm.
"""<def_stmt>__init__ self shapes homogeneous_parameters initial_shape=<none> image=<none> gt_shape=<none> costs=<none><block_start>super(LucasKanadeAlgorithmResult self).__init__(shapes=shapes shape_parameters=homogeneous_parameters initial_shape=initial_shape image=image gt_shape=gt_shape costs=costs)<line_sep>self._homogeneous_parameters=homogeneous_parameters<block_end>@property<def_stmt>homogeneous_parameters self<block_start>r"""
Returns the `list` of parameters of the homogeneous transform
obtained at each iteration of the fitting process. The `list`
includes the parameters of the `initial_shape` (if it exists) and
`final_shape`.
:type: `list` of ``(n_params,)`` `ndarray`
"""<line_sep><return>self._shape_parameters<block_end><block_end><class_stmt>LucasKanadeResult(MultiScaleParametricIterativeResult)<block_start>r"""
Class for storing the multi-scale iterative fitting result of an ATM. It
holds the shapes, shape parameters and costs per iteration.
Parameters
----------
results : `list` of :map:`ATMAlgorithmResult`
The `list` of optimization results per scale.
scales : `list` or `tuple`
The `list` of scale values per scale (low to high).
affine_transforms : `list` of `menpo.transform.Affine`
The list of affine transforms per scale that transform the shapes into
the original image space.
scale_transforms : `list` of `menpo.shape.Scale`
The list of scaling transforms per scale.
image : `menpo.image.Image` or `subclass` or ``None``, optional
The image on which the fitting process was applied. Note that a copy
of the image will be assigned as an attribute. If ``None``, then no
image is assigned.
gt_shape : `menpo.shape.PointCloud` or ``None``, optional
The ground truth shape associated with the image. If ``None``, then no
ground truth shape is assigned.
"""<def_stmt>__init__ self results scales affine_transforms scale_transforms image=<none> gt_shape=<none><block_start>super(LucasKanadeResult self).__init__(results=results scales=scales affine_transforms=affine_transforms scale_transforms=scale_transforms image=image gt_shape=gt_shape)<line_sep># Create parameters list
self._homogeneous_parameters=[]<for_stmt>r results<block_start>self._homogeneous_parameters<augadd>r.homogeneous_parameters<block_end># Correct n_iters
self._n_iters<augsub>len(scales)<block_end>@property<def_stmt>homogeneous_parameters self<block_start>r"""
Returns the `list` of parameters of the homogeneous transform
obtained at each iteration of the fitting process. The `list`
includes the parameters of the `initial_shape` (if it exists) and
`final_shape`.
:type: `list` of ``(n_params,)`` `ndarray`
"""<line_sep><return>self._homogeneous_parameters<block_end>@property<def_stmt>shape_parameters self# Use homogeneous_parameters instead.
<block_start><raise>AttributeError<block_end><block_end> |
<import_from_stmt>setuptools setup<line_sep>setup(name="example-advanced-package" version="0.0.0" packages=[] )<line_sep> |
<def_stmt>_recipes_pil_prescript plugins<block_start><try_stmt><block_start><import_stmt>Image<line_sep>have_PIL=<false><block_end><except_stmt>ImportError<block_start><import_from_stmt>PIL Image<line_sep>have_PIL=<true><block_end><import_stmt>sys<def_stmt>init <block_start><if_stmt>Image._initialized<ge>2<block_start><return><block_end><if_stmt>have_PIL<block_start><try_stmt><block_start><import_stmt>PIL.JpegPresets<line_sep>sys.modules["JpegPresets"]=PIL.JpegPresets<block_end><except_stmt>ImportError<block_start><pass><block_end><block_end><for_stmt>plugin plugins<block_start><try_stmt><block_start><if_stmt>have_PIL<block_start><try_stmt># First try absolute import through PIL (for
# Pillow support) only then try relative imports
<block_start>m=__import__("PIL."+plugin globals() locals() [])<line_sep>m=getattr(m plugin)<line_sep>sys.modules[plugin]=m<line_sep><continue><block_end><except_stmt>ImportError<block_start><pass><block_end><block_end>__import__(plugin globals() locals() [])<block_end><except_stmt>ImportError<block_start>print("Image: failed to import")<block_end><block_end><if_stmt>Image.OPEN<or>Image.SAVE<block_start>Image._initialized=2<line_sep><return>1<block_end><block_end>Image.init=init<block_end> |
# coding=utf-8
# Copyright 2020 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for turkish_morphology.validate."""<import_stmt>os<import_from_stmt>turkish_morphology analysis_pb2<import_from_stmt>turkish_morphology validate<import_from_stmt>absl.testing absltest<import_from_stmt>absl.testing parameterized<import_from_stmt>google.protobuf text_format<line_sep>_TESTDATA_DIR="turkish_morphology/testdata"<def_stmt>_read_file path<block_start><with_stmt>open(path "r")<as>f<block_start>read=f.read()<block_end><return>read<block_end><def_stmt>_read_analysis basename<block_start>path=os.path.join(_TESTDATA_DIR f"{basename}.pbtxt")<line_sep><return>text_format.Parse(_read_file(path) analysis_pb2.Analysis())<block_end><class_stmt>AnalysisTest(parameterized.TestCase)<block_start>@parameterized.named_parameters([{"testcase_name":"SingleInflectionalGroupsWithProperFeature" "basename":"araba_with_proper" } {"testcase_name":"SingleInflectionalGroupsWithoutProperFeature" "basename":"araba_without_proper" } {"testcase_name":"MultipleInflectionalGroupsWithProperFeature" "basename":"yasa_with_proper" } {"testcase_name":"MultipleInflectionalGroupsWithoutProperFeature" "basename":"yasa_without_proper" } ])<def_stmt>test_success self basename<block_start>analysis=_read_analysis(basename)<line_sep>actual=validate.analysis(analysis)<line_sep>self.assertIsNone(actual)<block_end>@parameterized.named_parameters([{"testcase_name":"AnalysisMissingInflectionalGroups" "basename":"invalid_empty_analysis" "message":"Analysis is missing inflectional groups" } {"testcase_name":"InflectionalGroupMissingPartOfSpeechTag" "basename":"invalid_ig_missing_pos" "message":"Inflectional group 2 is missing part-of-speech tag" } {"testcase_name":"InflectionalGroupEmptyPartOfSpeechTag" "basename":"invalid_ig_empty_pos" "message":"Inflectional group 2 part-of-speech tag is empty" } {"testcase_name":"FirstInflectionalGroupMissingRoot" "basename":"invalid_first_ig_missing_root" "message":"Inflectional group 1 is missing root" } {"testcase_name":"DerivedInflectionalGroupMissingDerivation" "basename":"invalid_derived_ig_missing_derivation" "message":"Inflectional group 2 is missing derivational affix" } {"testcase_name":"AffixMissingFeature" "basename":"invalid_affix_missing_feature" "message":"Affix is missing feature" } {"testcase_name":"DerivationalAffixMissingMetaMorpheme" "basename":"invalid_derivational_affix_missing_meta_morpheme" "message":"Derivational affix is missing meta-morpheme" } {"testcase_name":"DerivationalAffixEmptyMetaMorpheme" "basename":"invalid_derivational_affix_empty_meta_morpheme" "message":"Derivational affix meta-morpheme is empty" } {"testcase_name":"FeatureMissingCategory" "basename":"invalid_feature_missing_category" "message":"Feature is missing category" } {"testcase_name":"FeatureEmptyCategory" "basename":"invalid_feature_empty_category" "message":"Feature category is empty" } {"testcase_name":"FeatureMissingValue" "basename":"invalid_feature_missing_value" "message":"Feature is missing value" } {"testcase_name":"FeatureEmptyValue" "basename":"invalid_feature_empty_value" "message":"Feature value is empty" } {"testcase_name":"RootMissingMorpheme" "basename":"invalid_root_missing_morpheme" "message":"Root is missing morpheme" } {"testcase_name":"RootEmptyMorpheme" "basename":"invalid_root_empty_morpheme" "message":"Root morpheme is empty" } ])<def_stmt>test_raises_exception self basename message<block_start>analysis=_read_analysis(basename)<with_stmt>self.assertRaisesRegexp(validate.IllformedAnalysisError message)<block_start>validate.analysis(analysis)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>absltest.main()<block_end> |
<import_stmt>argparse<import_stmt>pickle<import_from_stmt>tokenization Vocab Tokenizer<line_sep>TOKENIZER=('treebank' 'mecab')<def_stmt>argparser <block_start>p=argparse.ArgumentParser()<line_sep># Required parameters
p.add_argument('--corpus' default=<none> type=str required=<true>)<line_sep>p.add_argument('--vocab' default=<none> type=str required=<true>)<line_sep># Other parameters
p.add_argument('--pretrained_vectors' default=<none> type=str)<line_sep>p.add_argument('--is_sentence' action='store_true' help='Whether the corpus is already split into sentences')<line_sep>p.add_argument('--tokenizer' default='treebank' type=str help='Tokenizer used for input corpus tokenization: '+', '.join(TOKENIZER))<line_sep>p.add_argument('--max_seq_length' default=1024 type=int help='The maximum total input sequence length after tokenization')<line_sep>p.add_argument('--unk_token' default='<unk>' type=str help='The representation for any unknown token')<line_sep>p.add_argument('--pad_token' default='<pad>' type=str help='The representation for the special token of padding token')<line_sep>p.add_argument('--bos_token' default='<bos>' type=str help='The representation for the special token of beginning-of-sequence token')<line_sep>p.add_argument('--eos_token' default='<eos>' type=str help='The representation for the special token of end-of-sequence token')<line_sep>p.add_argument('--min_freq' default=3 type=int help='The minimum frequency required for a token')<line_sep>p.add_argument('--lower' action='store_true' help='Whether to convert the texts to lowercase')<line_sep>config=p.parse_args()<line_sep><return>config<block_end><def_stmt>load_pretrained fname<block_start>"""
Load pre-trained FastText word vectors
:param fname: text file containing the word vectors, one per line.
"""<line_sep>fin=open(fname 'r' encoding='utf-8' newline='\n' errors='ignore')<line_sep>n,d=map(int fin.readline().split())<line_sep>print('Loading {} word vectors(dim={})...'.format(n d))<line_sep>word2vec_dict={}<for_stmt>line fin<block_start>tokens=line.rstrip().split(' ')<line_sep>word2vec_dict[tokens[0]]=list(map(float tokens[1:]))<block_end>print('#pretrained_word_vectors:' len(word2vec_dict))<line_sep><return>word2vec_dict<block_end><if_stmt>__name__<eq>'__main__'<block_start>config=argparser()<line_sep>print(config)<line_sep># Select tokenizer
config.tokenizer=config.tokenizer.lower()<if_stmt>config.tokenizer<eq>TOKENIZER[0]<block_start><import_from_stmt>nltk.tokenize word_tokenize<line_sep>tokenization_fn=word_tokenize<block_end><elif_stmt>config.tokenizer<eq>TOKENIZER[1]<block_start><import_from_stmt>konlpy.tag Mecab<line_sep>tokenization_fn=Mecab().morphs<block_end>tokenizer=Tokenizer(tokenization_fn=tokenization_fn is_sentence=config.is_sentence max_seq_length=config.max_seq_length)<line_sep># Tokenization & read tokens
list_of_tokens=[]<with_stmt>open(config.corpus 'r' encoding='-utf-8' errors='ignore')<as>reader<block_start><for_stmt>li,line enumerate(reader)<block_start>text=' '.join(line.split('\t')[1:]).strip()<line_sep>list_of_tokens<augadd>tokenizer.tokenize(text)<block_end><block_end># Build vocabulary
vocab=Vocab(list_of_tokens=list_of_tokens unk_token=config.unk_token pad_token=config.pad_token bos_token=config.bos_token eos_token=config.eos_token min_freq=config.min_freq lower=config.lower)<line_sep>vocab.build()<if_stmt>config.pretrained_vectors<block_start>pretrained_vectors=load_pretrained(fname=config.pretrained_vectors)<line_sep>vocab.from_pretrained(pretrained_vectors=pretrained_vectors)<block_end>print('Vocabulary size: ' len(vocab))<line_sep># Save vocabulary
<with_stmt>open(config.vocab 'wb')<as>writer<block_start>pickle.dump(vocab writer)<block_end>print('Vocabulary saved to' config.vocab)<block_end> |
################################################################################
#
# cmd_copyToExternal.py
#
# Author: <NAME> | <NAME>
#
# Description: Copies Geo/Weights/Morphs/UV's to External File
#
# Last Update:
#
################################################################################
<import_stmt>lx<import_stmt>lxifc<import_stmt>lxu.command<import_from_stmt>od_copy_paste_external copy_to_external<class_stmt>ODCopyToExternal(lxu.command.BasicCommand)<block_start><def_stmt>__init__ self<block_start>lxu.command.BasicCommand.__init__(self)<block_end><def_stmt>cmd_Flags self<block_start><return>lx.symbol.fCMD_MODEL|lx.symbol.fCMD_UNDO<block_end><def_stmt>basic_Enable self msg<block_start><return><true><block_end><def_stmt>cmd_Interact self<block_start><pass><block_end><def_stmt>basic_Execute self msg flags# TODO: Disable reload for release
<block_start>reload(copy_to_external)<line_sep>copy_to_external.execute()<block_end><def_stmt>cmd_Query self index vaQuery<block_start>lx.notimpl()<block_end><block_end>lx.bless(ODCopyToExternal "OD_CopyToExternal")<line_sep> |
<import_from_future_stmt> unicode_literals<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>common MayanAppConfig<import_from_stmt>.licenses *# NOQA
<class_stmt>MIMETypesApp(MayanAppConfig)<block_start>name='mimetype'<line_sep>verbose_name=_('MIME types')<def_stmt>ready self *args **kwargs<block_start>super(MIMETypesApp self).ready(*args **kwargs)<block_end><block_end> |
<import_from_stmt>base BaseDataSet BaseDataLoader<import_from_stmt>utils pallete<import_stmt>numpy<as>np<import_stmt>os<import_stmt>scipy<import_stmt>torch<import_from_stmt>PIL Image<import_stmt>cv2<import_from_stmt>torch.utils.data Dataset<import_from_stmt>torchvision transforms<import_stmt>json<class_stmt>VOCDataset(BaseDataSet)<block_start><def_stmt>__init__ self **kwargs<block_start>self.num_classes=21<line_sep>self.palette=pallete.get_voc_pallete(self.num_classes)<line_sep>super(VOCDataset self).__init__(**kwargs)<block_end><def_stmt>_set_files self<block_start>self.root=os.path.join(self.root 'VOCdevkit/VOC2012')<if_stmt>self.split<eq>"val"<block_start>file_list=os.path.join("dataloaders/voc_splits" f"{self.split}"+".txt")<block_end><elif_stmt>self.split<in>["train_supervised" "train_unsupervised"]<block_start>file_list=os.path.join("dataloaders/voc_splits" f"{self.n_labeled_examples}_{self.split}"+".txt")<block_end><else_stmt><block_start><raise>ValueError(f"Invalid split name {self.split}")<block_end>file_list=[line.rstrip().split(' ')<for>line tuple(open(file_list "r"))]<line_sep>self.files,self.labels=list(zip(*file_list))<block_end><def_stmt>_load_data self index<block_start>image_path=os.path.join(self.root self.files[index][1:])<line_sep>image=np.asarray(Image.open(image_path) dtype=np.float32)<line_sep>image_id=self.files[index].split("/")[-1].split(".")[0]<if_stmt>self.use_weak_lables<block_start>label_path=os.path.join(self.weak_labels_output image_id+".png")<block_end><else_stmt><block_start>label_path=os.path.join(self.root self.labels[index][1:])<block_end>label=np.asarray(Image.open(label_path) dtype=np.int32)<line_sep><return>image label image_id<block_end><block_end><class_stmt>VOC(BaseDataLoader)<block_start><def_stmt>__init__ self kwargs<block_start>self.MEAN=[0.485 0.456 0.406]<line_sep>self.STD=[0.229 0.224 0.225]<line_sep>self.batch_size=kwargs.pop('batch_size')<line_sep>kwargs['mean']=self.MEAN<line_sep>kwargs['std']=self.STD<line_sep>kwargs['ignore_index']=255<try_stmt><block_start>shuffle=kwargs.pop('shuffle')<block_end><except_stmt><block_start>shuffle=<false><block_end>num_workers=kwargs.pop('num_workers')<line_sep>self.dataset=VOCDataset(**kwargs)<line_sep>super(VOC self).__init__(self.dataset self.batch_size shuffle num_workers val_split=<none>)<block_end><block_end> |
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<def_stmt>soft_dice_score output:torch.Tensor target:torch.Tensor smooth:float=0.0 eps:float=1e-7 dims=<none><arrow>torch.Tensor<block_start><assert_stmt>output.size()<eq>target.size()<if_stmt>dims<is><not><none><block_start>intersection=torch.sum(output<times>target dim=dims)<line_sep>cardinality=torch.sum(output+target dim=dims)<line_sep># print('cardinality', cardinality, 'intersection', intersection)
<block_end><else_stmt><block_start>intersection=torch.sum(output<times>target)<line_sep>cardinality=torch.sum(output+target)<block_end>dice_score=(2.0<times>intersection+smooth)/(cardinality+smooth).clamp_min(eps)<line_sep># print('dice_score', dice_score)
<return>dice_score<block_end><class_stmt>DiceLoss(nn.Module)<block_start><def_stmt>__init__ self smooth=1.0 eps=1e-7 ignore_index=<none> weight=<none> mode='MULTICLASS_MODE'<block_start>"""Implementation of Dice loss for image segmentation task.
https://github.com/qubvel/segmentation_models.pytorch
"""<line_sep>super().__init__()<line_sep>self.smooth=smooth<line_sep>self.eps=eps<line_sep>self.ignore_index=ignore_index<line_sep>self.weight=weight<line_sep>self.mode=mode<block_end><def_stmt>forward self output target<block_start>bs=target.size(0)<line_sep>num_classes=output.size(1)<line_sep>dims=(0 2)<line_sep># print(self.mode, self.ignore_index)
<if_stmt>self.mode<eq>'MULTICLASS_MODE'<block_start>output=output.log_softmax(dim=1).exp()<block_end><else_stmt><block_start>output=F.logsigmoid(output).exp()<block_end># output = output.log_softmax(dim=1).exp()
<if_stmt>self.mode<eq>'BINARY_MODE'<block_start>target=target.view(bs 1 -1)<line_sep>output=output.view(bs 1 -1)<if_stmt>self.ignore_index<is><not><none><block_start>mask=target<ne>self.ignore_index<line_sep>output=output<times>mask<line_sep>target=target<times>mask<block_end><block_end><else_stmt><block_start>target=target.view(bs -1)<line_sep>output=output.view(bs num_classes -1)<if_stmt>self.ignore_index<is><not><none><block_start>mask=target<ne>self.ignore_index<line_sep>output=output<times>mask.unsqueeze(1)<line_sep>target=F.one_hot((target<times>mask).to(torch.long) num_classes)# N,H*W -> N,H*W, C
target=target.permute(0 2 1)<times>mask.unsqueeze(1)<block_end><else_stmt><block_start>target=F.one_hot(target num_classes)# N,H*W -> N,H*W, C
target=target.permute(0 2 1)<block_end><block_end># H, C, H*W
scores=soft_dice_score(output target.type_as(output) smooth=self.smooth eps=self.eps dims=dims)<line_sep>loss=1.0-scores<line_sep>mask=target.sum(dims)<g>0<line_sep>loss<augmul>mask.to(loss.dtype)<line_sep><return>loss.mean()<block_end><block_end> |
<import_from_stmt>ryu.base.app_manager RyuApp<import_from_stmt>ryu.controller.ofp_event EventOFPSwitchFeatures<import_from_stmt>ryu.controller.ofp_event EventOFPPortDescStatsReply<import_from_stmt>ryu.controller.handler set_ev_cls<import_from_stmt>ryu.controller.handler CONFIG_DISPATCHER<import_from_stmt>ryu.controller.handler MAIN_DISPATCHER<import_from_stmt>ryu.ofproto.ofproto_v1_2 OFPG_ANY<import_from_stmt>ryu.ofproto.ofproto_v1_3 OFP_VERSION<import_from_stmt>ryu.lib.mac haddr_to_bin<class_stmt>App(RyuApp)<block_start>OFP_VERSIONS=[OFP_VERSION]<def_stmt>__init__ self *args **kwargs<block_start>super(App self).__init__(*args **kwargs)<block_end>@set_ev_cls(EventOFPSwitchFeatures CONFIG_DISPATCHER)<def_stmt>switch_features_handler self ev<block_start>datapath=ev.msg.datapath<line_sep>[self.install_sample(datapath n)<for>n [0]]<block_end><def_stmt>create_meter_mod self datapath command flags_ meter_id bands<block_start>ofproto=datapath.ofproto<line_sep>ofp_parser=datapath.ofproto_parser<line_sep>meter_mod=ofp_parser.OFPMeterMod(datapath command flags_ meter_id bands)<line_sep><return>meter_mod<block_end><def_stmt>install_sample self datapath table_id<block_start>parser=datapath.ofproto_parser<line_sep>ofproto=datapath.ofproto<line_sep>req=parser.OFPPortDescStatsRequest(datapath 0)<line_sep>datapath.send_msg(req)<block_end>@set_ev_cls(EventOFPPortDescStatsReply MAIN_DISPATCHER)<def_stmt>port_desc_stats_reply_handler self ev<block_start>ports=[]<for_stmt>p ev.msg.body<block_start>ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '<concat>'state=0x%08x curr=0x%08x advertised=0x%08x '<concat>'supported=0x%08x peer=0x%08x curr_speed=%d '<concat>'max_speed=%d'%(p.port_no p.hw_addr p.name p.config p.state p.curr p.advertised p.supported p.peer p.curr_speed p.max_speed))<block_end>self.logger.info('OFPPortDescStatsReply received: %s' ports)<block_end><block_end> |
<import_stmt>cv2<import_stmt>ProcessWithCV2<line_sep>img1=cv2.imread("D:/py/chinese/7.png")<line_sep>img2=cv2.imread("D:/py/chinese/8.png")<line_sep>a=ProcessWithCV2.dHash(img1 img2 1)<line_sep>print(a)<line_sep> |
# VALID IP ADDRESSES
# O(1) time and space
<def_stmt>validIPAddresses string# Write your code here.
<block_start>validIPAddresses=[]<if_stmt>len(string)<l>4<block_start><return>[]<block_end><for_stmt>i range(3)<block_start><if_stmt><not>isValidPart(string[:i+1])<block_start><continue><block_end><for_stmt>j range(i+1 i+4)<block_start><if_stmt><not>isValidPart(string[i+1:j+1])<block_start><continue><block_end><for_stmt>k range(j+1 j+4)<block_start><if_stmt><not>isValidPart(string[j+1:k+1])<or><not>isValidPart(string[k+1:])<block_start><continue><block_end>validIP=string[:i+1]+"."+string[i+1:j+1]+"."+string[j+1:k+1]+"."+string[k+1:]<line_sep>validIPAddresses.append(validIP)<block_end><block_end><block_end><return>validIPAddresses<block_end><def_stmt>isValidPart string<block_start><if_stmt>len(string)<eq>1<block_start><return><true><block_end><if_stmt><not>0<l>len(string)<l>4<or>string[0]<eq>"0"<block_start><return><false><block_end><return>0<le>int(string)<le>255<block_end> |
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
<import_stmt>attr<import_stmt>xmltodict<import_from_stmt>packagedcode models<import_from_stmt>commoncode filetype<line_sep># Tracing flags
TRACE=<false><def_stmt>logger_debug *args<block_start><pass><block_end><if_stmt>TRACE<block_start><import_stmt>logging<import_stmt>sys<line_sep>logger=logging.getLogger(__name__)<line_sep># logging.basicConfig(level=logging.DEBUG, stream=sys.stdout)
logging.basicConfig(stream=sys.stdout)<line_sep>logger.setLevel(logging.DEBUG)<def_stmt>logger_debug *args<block_start><return>logger.debug(' '.join(isinstance(a str)<and>a<or>repr(a)<for>a args))<block_end><block_end>@attr.s()<class_stmt>MicrosoftUpdatePackage(models.Package models.PackageManifest)<block_start>extensions=('.mum' )<line_sep>filetypes=('xml 1.0 document' )<line_sep>mimetypes=('text/xml' )<line_sep>default_type='windows-update'<block_end>@attr.s()<class_stmt>MicrosoftUpdateManifest(MicrosoftUpdatePackage models.PackageManifest)<block_start>@classmethod<def_stmt>is_manifest cls location<block_start>"""
Return True if the file at ``location`` is likely a manifest of this type.
"""<line_sep><return>filetype.is_file(location)<and>location.endswith('.mum')<block_end>@classmethod<def_stmt>recognize cls location<block_start>"""
Yield one or more Package manifest objects given a file ``location`` pointing to a
package archive, manifest or similar.
"""<with_stmt>open(location 'rb')<as>loc<block_start>parsed=xmltodict.parse(loc)<block_end><if_stmt>TRACE<block_start>logger_debug('parsed:' parsed)<block_end><if_stmt><not>parsed<block_start><return><block_end>assembly=parsed.get('assembly' {})<line_sep>description=assembly.get('@description' '')<line_sep>company=assembly.get('@company' '')<line_sep>copyright=assembly.get('@copyright' '')<line_sep>support_url=assembly.get('@supportInformation' '')<line_sep>assembly_identity=assembly.get('assemblyIdentity' {})<line_sep>name=assembly_identity.get('@name' '')<line_sep>version=assembly_identity.get('@version' '')<line_sep>parties=[]<if_stmt>company<block_start>parties.append(models.Party(name=company type=models.party_org role='owner' ))<block_end><yield>cls(name=name version=version description=description homepage_url=support_url parties=parties copyright=copyright )<block_end><block_end> |