hexsha
stringlengths 40
40
| size
int64 2
1.05M
| ext
stringclasses 9
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
193
| max_stars_repo_name
stringlengths 6
109
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
sequence | max_stars_count
int64 1
36.6k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
193
| max_issues_repo_name
stringlengths 6
109
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
sequence | max_issues_count
int64 1
29.8k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
193
| max_forks_repo_name
stringlengths 6
109
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
sequence | max_forks_count
int64 1
11.2k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.05M
| avg_line_length
float64 1
404k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f709f44fde235e8c6f38256a50cbfb476ceee8e9 | 373 | py | Python | JDjangoDemo/docs/migrations/0003_auto_20201028_1758.py | JIYANG-PLUS/JDjango | 57cbb13b2b4c07f34d546c0c637c22f60c1e692a | [
"MIT"
] | 3 | 2020-12-28T05:09:02.000Z | 2021-06-23T10:02:03.000Z | JDjangoDemo/docs/migrations/0003_auto_20201028_1758.py | JIYANG-PLUS/JDjango | 57cbb13b2b4c07f34d546c0c637c22f60c1e692a | [
"MIT"
] | null | null | null | JDjangoDemo/docs/migrations/0003_auto_20201028_1758.py | JIYANG-PLUS/JDjango | 57cbb13b2b4c07f34d546c0c637c22f60c1e692a | [
"MIT"
] | null | null | null | # Generated by Django 3.0.8 on 2020-10-28 17:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('docs', '0002_article_version'),
]
operations = [
migrations.AlterModelOptions(
name='article',
options={'verbose_name': '插件内容', 'verbose_name_plural': '插件内容'},
),
]
| 20.722222 | 76 | 0.603217 |
f709f87c4beb8fa85bcf5d596823ca75307a6393 | 29,244 | py | Python | test/test_fnetout.py | vanderhe/fortnet-python | 118237f0ce750852d973b213161fc04623fd7f82 | [
"BSD-2-Clause"
] | null | null | null | test/test_fnetout.py | vanderhe/fortnet-python | 118237f0ce750852d973b213161fc04623fd7f82 | [
"BSD-2-Clause"
] | 1 | 2022-03-11T15:21:56.000Z | 2022-03-11T15:33:46.000Z | test/test_fnetout.py | vanderhe/fortnet-python | 118237f0ce750852d973b213161fc04623fd7f82 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python3
#------------------------------------------------------------------------------#
# fortnet-python: Python Tools for the Fortnet Software Package #
# Copyright (C) 2021 - 2022 T. W. van der Heide #
# #
# See the LICENSE file for terms of usage and distribution. #
#------------------------------------------------------------------------------#
'''
Regression tests covering the Fnetout class of Fortformat.
'''
import os
import pytest
import numpy as np
from common import compare_fnetout_references
REFPATH = os.path.join(os.getcwd(), 'test', 'references', 'Fnetout')
def test_predict_atomic():
'''Test extraction capabilities for a prediction run
with a network that was trained on atomic targets.
'''
fname = 'predict_atomic.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 0
ref['natomictargets'] = 2
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['globalpredictions'] = None
ref['globalpredictions_atomic'] = None
ref['atomicpredictions'] = [
np.array([[1.961575401201565427e-01, 9.168128808877051839e-01],
[1.325239781646761206e-01, 7.994346410064820940e-01],
[1.826092611054506987e-01, 8.918864627286081648e-01],
[1.951603716977679814e-01, 9.149779051068115399e-01],
[1.963975544054146483e-01, 9.172546297234291934e-01],
[1.365085697599923986e-01, 8.068187835637852245e-01],
[1.937271428648690563e-01, 9.123404738385268997e-01],
[1.963833753374974733e-01, 9.172283491672438283e-01],
[-2.963259061179163711e-01, 6.622931487753776381e+00],
[-3.116645694102148090e-01, 6.341542248977436458e+00],
[-2.954852994924470622e-01, 6.639489278084699464e+00],
[-3.046303752343871851e-01, 6.455384967114186523e+00]],
dtype=float),
np.array([[1.811418904020697107e-01, 8.890399580545689240e-01],
[1.286134726005213336e-01, 7.921870956352004001e-01],
[1.287072680065694807e-01, 7.923610013248644224e-01],
[1.285878019428332852e-01, 7.921394561667119971e-01],
[-3.205833278148639831e-01, 6.199868006587744951e+00],
[-3.205832449473826062e-01, 6.199870243635043465e+00]],
dtype=float),
np.array([[1.508316035937055932e-01, 8.333084902706219266e-01],
[1.963987299989748136e-01, 9.172568038424152581e-01],
[1.963985352644728455e-01, 9.172564425915140651e-01],
[1.314458979434688091e-01, 7.974318952109518133e-01],
[1.959840207934034628e-01, 9.164924149116437935e-01],
[1.962475111339566924e-01, 9.169785285430018806e-01],
[1.963735428400687211e-01, 9.172103673056410944e-01],
[1.692361060177546561e-01, 8.672524620359242098e-01],
[-2.953595347026437556e-01, 6.642087650077651340e+00],
[-3.151594350113108844e-01, 6.282255421963240494e+00],
[-2.991868120084945071e-01, 6.559077847747195378e+00],
[-3.170787084631181418e-01, 6.252835565560094011e+00]],
dtype=float),
np.array([[1.304479687184249281e-01, 7.955871276861898878e-01],
[1.297462265528342706e-01, 7.942881684589961910e-01],
[1.298443617239196379e-01, 7.944708584405727470e-01],
[1.961872820312715870e-01, 9.168651269507970270e-01],
[-3.205789586106497779e-01, 6.199943703977714549e+00],
[-3.205781729831197469e-01, 6.199947713843369179e+00]],
dtype=float),
np.array([[1.288099388080513885e-01, 7.925517780736619500e-01],
[1.286199169387698682e-01, 7.921996037242402533e-01],
[1.286878255987483899e-01, 7.923246429757131448e-01],
[1.312376406171068266e-01, 7.970445915261700209e-01],
[-3.205835576648750629e-01, 6.199865084107108792e+00],
[-3.205822580166140523e-01, 6.199887555086769808e+00]],
dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_predict_global():
'''Test extraction capabilities for a prediction run
with a network that was trained on global targets.
'''
fname = 'predict_global.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 1
ref['natomictargets'] = 0
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['globalpredictions_atomic'] = [
np.array([-1.526436789762218496e+02], dtype=float),
np.array([[-4.585193773117663341e+02],
[-4.585193773117663341e+02]], dtype=float) / 2.0,
np.array([[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02]], dtype=float) / 3.0,
np.array([[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02]], dtype=float) / 4.0,
np.array([[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02]], dtype=float) / 5.0]
ref['globalpredictions'] = [
np.array([-1.526436789762218496e+02], dtype=float),
np.array([-4.585193773117663341e+02], dtype=float),
np.array([-2.290754290677185736e+02], dtype=float),
np.array([-6.877477714671086915e+02], dtype=float),
np.array([-5.349057545062817098e+02], dtype=float)]
ref['atomicpredictions'] = None
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_predict_global_singleforces():
'''Test extraction capabilities for a prediction run with a network
that was trained on global targets and calculates atomic forces.
'''
fname = 'predict_global_singleforces.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 2
ref['nglobaltargets'] = 1
ref['natomictargets'] = 0
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['atomicpredictions'] = None
ref['tforces'] = True
ref['forces'] = []
ref['forces'].append([])
ref['forces'].append([])
ref['forces'][0].append(np.array([
[-1.129280561189105470e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.129280561189105470e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.464270111301352983e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.464270111301352983e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['globalpredictions_atomic'] = [
np.array([[-4.301790810131604914e-01],
[-4.301790810131604914e-01]], dtype=float) / 2.0,
np.array([[-5.025593389423121948e-01],
[-5.025593389423121948e-01]], dtype=float) / 2.0]
ref['globalpredictions'] = [
np.array([-4.301790810131604914e-01], dtype=float),
np.array([-5.025593389423121948e-01], dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_predict_global_multiforces():
'''Test extraction capabilities for a prediction run with a network
that was trained on global targets and calculates atomic forces.
'''
fname = 'predict_global_multiforces.hdf5'
ref = {}
ref['mode'] = 'predict'
ref['ndatapoints'] = 2
ref['nglobaltargets'] = 3
ref['natomictargets'] = 0
ref['atomictargets'] = None
ref['globaltargets'] = None
ref['atomicpredictions'] = None
ref['tforces'] = True
ref['forces'] = []
ref['forces'].append([])
ref['forces'].append([])
ref['forces'][0].append(np.array([
[-1.113504383113195217e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.113504383113195217e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][0].append(np.array([
[-1.117387033151562292e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.117387033151562292e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][0].append(np.array([
[-1.110108965167277972e+00, 0.000000000000000000e+00,
0.000000000000000000e+00],
[1.110108965167277972e+00, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.450938994823964379e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.450938994823964379e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.465140042623886529e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.465140042623886529e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['forces'][1].append(np.array([
[-8.438788427604926312e-01, 0.000000000000000000e+00,
0.000000000000000000e+00],
[8.438788427604926312e-01, 0.000000000000000000e+00,
0.000000000000000000e+00]], dtype=float))
ref['globalpredictions_atomic'] = [
np.array([[-4.304246998683396441e-01, -4.302864774322330277e-01,
-4.305433861504512905e-01],
[-4.304246998683396441e-01, -4.302864774322330277e-01,
-4.305433861504512905e-01]], dtype=float) / 2.0,
np.array([[-5.022394949529731534e-01, -5.022869347972704901e-01,
-5.021969559503443037e-01],
[-5.022394949529731534e-01, -5.022869347972704901e-01,
-5.021969559503443037e-01]], dtype=float) / 2.0]
ref['globalpredictions'] = [
np.array([-4.304246998683396441e-01, -4.302864774322330277e-01,
-4.305433861504512905e-01], dtype=float),
np.array([-5.022394949529731534e-01, -5.022869347972704901e-01,
-5.021969559503443037e-01], dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_validate_atomic():
'''Test extraction capabilities for a validation run
with a network that was trained on atomic targets.
'''
fname = 'validate_atomic.hdf5'
ref = {}
ref['mode'] = 'validate'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 0
ref['natomictargets'] = 2
ref['globaltargets'] = None
ref['globalpredictions'] = None
ref['globalpredictions_atomic'] = None
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = [
np.array([
[1.540549993515014648e-01, 8.459450006484985352e-01],
[1.883080005645751953e-01, 8.116919994354248047e-01],
[1.595949977636337280e-01, 8.404050022363662720e-01],
[1.432220041751861572e-01, 8.567779958248138428e-01],
[1.232710033655166626e-01, 8.767289966344833374e-01],
[1.735100001096725464e-01, 8.264899998903274536e-01],
[1.588409990072250366e-01, 8.411590009927749634e-01],
[1.403059959411621094e-01, 8.596940040588378906e-01],
[-2.634609937667846680e-01, 6.263460993766784668e+00],
[-3.214380145072937012e-01, 6.321438014507293701e+00],
[-3.043099939823150635e-01, 6.304309993982315063e+00],
[-3.519429862499237061e-01, 6.351942986249923706e+00]],
dtype=float),
np.array([
[1.272429972887039185e-01, 8.727570027112960815e-01],
[1.549790054559707642e-01, 8.450209945440292358e-01],
[1.774729937314987183e-01, 8.225270062685012817e-01],
[1.796700060367584229e-01, 8.203299939632415771e-01],
[-3.525030016899108887e-01, 6.352503001689910889e+00],
[-2.868520021438598633e-01, 6.286852002143859863e+00]],
dtype=float),
np.array([
[1.852180063724517822e-01, 8.147819936275482178e-01],
[1.311800032854080200e-01, 8.688199967145919800e-01],
[1.232030019164085388e-01, 8.767969980835914612e-01],
[1.774370074272155762e-01, 8.225629925727844238e-01],
[1.587480008602142334e-01, 8.412519991397857666e-01],
[1.444180011749267578e-01, 8.555819988250732422e-01],
[1.365029960870742798e-01, 8.634970039129257202e-01],
[1.802569925785064697e-01, 8.197430074214935303e-01],
[-2.689329981803894043e-01, 6.268932998180389404e+00],
[-3.368290066719055176e-01, 6.336829006671905518e+00],
[-3.142969906330108643e-01, 6.314296990633010864e+00],
[-3.169249892234802246e-01, 6.316924989223480225e+00]],
dtype=float),
np.array([
[1.770180016756057739e-01, 8.229819983243942261e-01],
[1.812230050563812256e-01, 8.187769949436187744e-01],
[1.482979953289031982e-01, 8.517020046710968018e-01],
[9.460300207138061523e-02, 9.053969979286193848e-01],
[-2.429430037736892700e-01, 6.242943003773689270e+00],
[-3.581880033016204834e-01, 6.358188003301620483e+00]],
dtype=float),
np.array([
[1.596090048551559448e-01, 8.403909951448440552e-01],
[1.659840047359466553e-01, 8.340159952640533447e-01],
[1.713179945945739746e-01, 8.286820054054260254e-01],
[1.658540070056915283e-01, 8.341459929943084717e-01],
[-3.264440000057220459e-01, 6.326444000005722046e+00],
[-3.363139927387237549e-01, 6.336313992738723755e+00]],
dtype=float)]
ref['atomicpredictions'] = [
np.array([
[1.961575401201565427e-01, 9.168128808877051839e-01],
[1.325239781646761206e-01, 7.994346410064820940e-01],
[1.826092611054506987e-01, 8.918864627286081648e-01],
[1.951603716977679814e-01, 9.149779051068115399e-01],
[1.963975544054146483e-01, 9.172546297234291934e-01],
[1.365085697599923986e-01, 8.068187835637852245e-01],
[1.937271428648690563e-01, 9.123404738385268997e-01],
[1.963833753374974733e-01, 9.172283491672438283e-01],
[-2.963259061179163711e-01, 6.622931487753776381e+00],
[-3.116645694102148090e-01, 6.341542248977436458e+00],
[-2.954852994924470622e-01, 6.639489278084699464e+00],
[-3.046303752343871851e-01, 6.455384967114186523e+00]],
dtype=float),
np.array([
[1.811418904020697107e-01, 8.890399580545689240e-01],
[1.286134726005213336e-01, 7.921870956352004001e-01],
[1.287072680065694807e-01, 7.923610013248644224e-01],
[1.285878019428332852e-01, 7.921394561667119971e-01],
[-3.205833278148639831e-01, 6.199868006587744951e+00],
[-3.205832449473826062e-01, 6.199870243635043465e+00]],
dtype=float),
np.array([
[1.508316035937055932e-01, 8.333084902706219266e-01],
[1.963987299989748136e-01, 9.172568038424152581e-01],
[1.963985352644728455e-01, 9.172564425915140651e-01],
[1.314458979434688091e-01, 7.974318952109518133e-01],
[1.959840207934034628e-01, 9.164924149116437935e-01],
[1.962475111339566924e-01, 9.169785285430018806e-01],
[1.963735428400687211e-01, 9.172103673056410944e-01],
[1.692361060177546561e-01, 8.672524620359242098e-01],
[-2.953595347026437556e-01, 6.642087650077651340e+00],
[-3.151594350113108844e-01, 6.282255421963240494e+00],
[-2.991868120084945071e-01, 6.559077847747195378e+00],
[-3.170787084631181418e-01, 6.252835565560094011e+00]],
dtype=float),
np.array([
[1.304479687184249281e-01, 7.955871276861898878e-01],
[1.297462265528342706e-01, 7.942881684589961910e-01],
[1.298443617239196379e-01, 7.944708584405727470e-01],
[1.961872820312715870e-01, 9.168651269507970270e-01],
[-3.205789586106497779e-01, 6.199943703977714549e+00],
[-3.205781729831197469e-01, 6.199947713843369179e+00]],
dtype=float),
np.array([
[1.288099388080513885e-01, 7.925517780736619500e-01],
[1.286199169387698682e-01, 7.921996037242402533e-01],
[1.286878255987483899e-01, 7.923246429757131448e-01],
[1.312376406171068266e-01, 7.970445915261700209e-01],
[-3.205835576648750629e-01, 6.199865084107108792e+00],
[-3.205822580166140523e-01, 6.199887555086769808e+00]],
dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_validate_global():
'''Test extraction capabilities for a validation run
with a network that was trained on global targets.
'''
fname = 'validate_global.hdf5'
ref = {}
ref['mode'] = 'validate'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 1
ref['natomictargets'] = 0
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = None
ref['atomicpredictions'] = None
ref['globaltargets'] = [
np.array([-1.527736989418316114e+02], dtype=float),
np.array([-4.584216715420000128e+02], dtype=float),
np.array([-2.291870019319999869e+02], dtype=float),
np.array([-6.876760346160000381e+02], dtype=float),
np.array([-5.348338707069999600e+02], dtype=float)]
ref['globalpredictions'] = [
np.array([-1.526436789762218496e+02], dtype=float),
np.array([-4.585193773117663341e+02], dtype=float),
np.array([-2.290754290677185736e+02], dtype=float),
np.array([-6.877477714671086915e+02], dtype=float),
np.array([-5.349057545062817098e+02], dtype=float)]
ref['globalpredictions_atomic'] = [
np.array([[-1.526436789762218496e+02]], dtype=float),
np.array([[-4.585193773117663341e+02],
[-4.585193773117663341e+02]], dtype=float) / 2.0,
np.array([[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02]], dtype=float) / 3.0,
np.array([[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02]], dtype=float) / 4.0,
np.array([[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02]], dtype=float) / 5.0]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
def test_validate_atomic_global():
'''Test extraction capabilities for a validation run with a
network that was trained on both, atomic and global targets.
'''
fname = 'validate_atomic_global.hdf5'
ref = {}
ref['mode'] = 'validate'
ref['ndatapoints'] = 5
ref['nglobaltargets'] = 1
ref['natomictargets'] = 2
ref['targets'] = True
ref['tforces'] = False
ref['forces'] = None
ref['atomictargets'] = [
np.array([
[1.540549993515014648e-01, 8.459450006484985352e-01],
[1.883080005645751953e-01, 8.116919994354248047e-01],
[1.595949977636337280e-01, 8.404050022363662720e-01],
[1.432220041751861572e-01, 8.567779958248138428e-01],
[1.232710033655166626e-01, 8.767289966344833374e-01],
[1.735100001096725464e-01, 8.264899998903274536e-01],
[1.588409990072250366e-01, 8.411590009927749634e-01],
[1.403059959411621094e-01, 8.596940040588378906e-01],
[-2.634609937667846680e-01, 6.263460993766784668e+00],
[-3.214380145072937012e-01, 6.321438014507293701e+00],
[-3.043099939823150635e-01, 6.304309993982315063e+00],
[-3.519429862499237061e-01, 6.351942986249923706e+00]],
dtype=float),
np.array([
[1.272429972887039185e-01, 8.727570027112960815e-01],
[1.549790054559707642e-01, 8.450209945440292358e-01],
[1.774729937314987183e-01, 8.225270062685012817e-01],
[1.796700060367584229e-01, 8.203299939632415771e-01],
[-3.525030016899108887e-01, 6.352503001689910889e+00],
[-2.868520021438598633e-01, 6.286852002143859863e+00]],
dtype=float),
np.array([
[1.852180063724517822e-01, 8.147819936275482178e-01],
[1.311800032854080200e-01, 8.688199967145919800e-01],
[1.232030019164085388e-01, 8.767969980835914612e-01],
[1.774370074272155762e-01, 8.225629925727844238e-01],
[1.587480008602142334e-01, 8.412519991397857666e-01],
[1.444180011749267578e-01, 8.555819988250732422e-01],
[1.365029960870742798e-01, 8.634970039129257202e-01],
[1.802569925785064697e-01, 8.197430074214935303e-01],
[-2.689329981803894043e-01, 6.268932998180389404e+00],
[-3.368290066719055176e-01, 6.336829006671905518e+00],
[-3.142969906330108643e-01, 6.314296990633010864e+00],
[-3.169249892234802246e-01, 6.316924989223480225e+00]],
dtype=float),
np.array([
[1.770180016756057739e-01, 8.229819983243942261e-01],
[1.812230050563812256e-01, 8.187769949436187744e-01],
[1.482979953289031982e-01, 8.517020046710968018e-01],
[9.460300207138061523e-02, 9.053969979286193848e-01],
[-2.429430037736892700e-01, 6.242943003773689270e+00],
[-3.581880033016204834e-01, 6.358188003301620483e+00]],
dtype=float),
np.array([
[1.596090048551559448e-01, 8.403909951448440552e-01],
[1.659840047359466553e-01, 8.340159952640533447e-01],
[1.713179945945739746e-01, 8.286820054054260254e-01],
[1.658540070056915283e-01, 8.341459929943084717e-01],
[-3.264440000057220459e-01, 6.326444000005722046e+00],
[-3.363139927387237549e-01, 6.336313992738723755e+00]],
dtype=float)]
ref['atomicpredictions'] = [
np.array([
[1.961575401201565427e-01, 9.168128808877051839e-01],
[1.325239781646761206e-01, 7.994346410064820940e-01],
[1.826092611054506987e-01, 8.918864627286081648e-01],
[1.951603716977679814e-01, 9.149779051068115399e-01],
[1.963975544054146483e-01, 9.172546297234291934e-01],
[1.365085697599923986e-01, 8.068187835637852245e-01],
[1.937271428648690563e-01, 9.123404738385268997e-01],
[1.963833753374974733e-01, 9.172283491672438283e-01],
[-2.963259061179163711e-01, 6.622931487753776381e+00],
[-3.116645694102148090e-01, 6.341542248977436458e+00],
[-2.954852994924470622e-01, 6.639489278084699464e+00],
[-3.046303752343871851e-01, 6.455384967114186523e+00]],
dtype=float),
np.array([
[1.811418904020697107e-01, 8.890399580545689240e-01],
[1.286134726005213336e-01, 7.921870956352004001e-01],
[1.287072680065694807e-01, 7.923610013248644224e-01],
[1.285878019428332852e-01, 7.921394561667119971e-01],
[-3.205833278148639831e-01, 6.199868006587744951e+00],
[-3.205832449473826062e-01, 6.199870243635043465e+00]],
dtype=float),
np.array([
[1.508316035937055932e-01, 8.333084902706219266e-01],
[1.963987299989748136e-01, 9.172568038424152581e-01],
[1.963985352644728455e-01, 9.172564425915140651e-01],
[1.314458979434688091e-01, 7.974318952109518133e-01],
[1.959840207934034628e-01, 9.164924149116437935e-01],
[1.962475111339566924e-01, 9.169785285430018806e-01],
[1.963735428400687211e-01, 9.172103673056410944e-01],
[1.692361060177546561e-01, 8.672524620359242098e-01],
[-2.953595347026437556e-01, 6.642087650077651340e+00],
[-3.151594350113108844e-01, 6.282255421963240494e+00],
[-2.991868120084945071e-01, 6.559077847747195378e+00],
[-3.170787084631181418e-01, 6.252835565560094011e+00]],
dtype=float),
np.array([
[1.304479687184249281e-01, 7.955871276861898878e-01],
[1.297462265528342706e-01, 7.942881684589961910e-01],
[1.298443617239196379e-01, 7.944708584405727470e-01],
[1.961872820312715870e-01, 9.168651269507970270e-01],
[-3.205789586106497779e-01, 6.199943703977714549e+00],
[-3.205781729831197469e-01, 6.199947713843369179e+00]],
dtype=float),
np.array([
[1.288099388080513885e-01, 7.925517780736619500e-01],
[1.286199169387698682e-01, 7.921996037242402533e-01],
[1.286878255987483899e-01, 7.923246429757131448e-01],
[1.312376406171068266e-01, 7.970445915261700209e-01],
[-3.205835576648750629e-01, 6.199865084107108792e+00],
[-3.205822580166140523e-01, 6.199887555086769808e+00]],
dtype=float)]
ref['globaltargets'] = [
np.array([-1.527736989418316114e+02], dtype=float),
np.array([-4.584216715420000128e+02], dtype=float),
np.array([-2.291870019319999869e+02], dtype=float),
np.array([-6.876760346160000381e+02], dtype=float),
np.array([-5.348338707069999600e+02], dtype=float)]
ref['globalpredictions'] = [
np.array([-1.526436789762218496e+02], dtype=float) * 12.0,
np.array([-4.585193773117663341e+02], dtype=float) * 6.0,
np.array([-2.290754290677185736e+02], dtype=float) * 12.0,
np.array([-6.877477714671086915e+02], dtype=float) * 6.0,
np.array([-5.349057545062817098e+02], dtype=float) * 6.0]
ref['globalpredictions_atomic'] = [
np.array([[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02],
[-1.526436789762218496e+02]], dtype=float),
np.array([[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02],
[-4.585193773117663341e+02]], dtype=float),
np.array([[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02],
[-2.290754290677185736e+02]], dtype=float),
np.array([[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02],
[-6.877477714671086915e+02]], dtype=float),
np.array([[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02],
[-5.349057545062817098e+02]], dtype=float)]
equal = compare_fnetout_references(ref, os.path.join(REFPATH, '_' + fname))
assert equal
if __name__ == '__main__':
pytest.main()
| 45.622465 | 80 | 0.610792 |
f709ff30745f048a9069257f0e215166f25c956b | 1,509 | py | Python | msdsl/assignment.py | sgherbst/msdsl | e38d5ecdb88b3574bda62f22a4f91ce3e4173d12 | [
"MIT"
] | 15 | 2019-05-14T10:12:23.000Z | 2022-03-29T15:29:52.000Z | msdsl/assignment.py | sgherbst/msdsl | e38d5ecdb88b3574bda62f22a4f91ce3e4173d12 | [
"MIT"
] | 19 | 2020-01-22T21:44:33.000Z | 2021-06-05T02:10:41.000Z | msdsl/assignment.py | sgherbst/msdsl | e38d5ecdb88b3574bda62f22a4f91ce3e4173d12 | [
"MIT"
] | 5 | 2019-10-21T09:53:17.000Z | 2021-08-10T17:32:20.000Z | from msdsl.expr.expr import ModelExpr
from msdsl.expr.signals import Signal, DigitalSignal, AnalogSignal
from msdsl.expr.format import RealFormat
from msdsl.expr.table import Table
class Assignment:
def __init__(self, signal: Signal, expr: ModelExpr, check_format=True):
self.signal = signal
self.expr = expr
self.check_format = check_format
class BindingAssignment(Assignment):
pass
class ThisCycleAssignment(Assignment):
pass
class NextCycleAssignment(Assignment):
def __init__(self, *args, clk=None, rst=None, ce=None, **kwargs):
self.clk = clk
self.rst = rst
self.ce = ce
super().__init__(*args, **kwargs)
class SyncRomAssignment(Assignment):
def __init__(self, signal: Signal, table: Table, addr: ModelExpr,
clk=None, ce=None, should_bind=False):
self.table = table
self.clk = clk
self.ce = ce
self.should_bind = should_bind
super().__init__(signal=signal, expr=addr)
class SyncRamAssignment(Assignment):
def __init__(self, signal: AnalogSignal, format_: RealFormat, addr: ModelExpr,
clk: Signal=None, ce: Signal=None, we: Signal=None,
din: Signal=None, should_bind=False):
self.format_ = format_
self.clk = clk
self.ce = ce
self.we = we
self.din = din
self.should_bind = should_bind
super().__init__(signal=signal, expr=addr)
| 33.533333 | 83 | 0.636183 |
f70a125670f78eacba803b405bf1888af7478244 | 853 | py | Python | aitlas/transforms/classification.py | tiendzung-le/aitlas | 4725693a5c073cc80a617fb9bab5a1557c3c3270 | [
"MIT"
] | 32 | 2020-12-04T19:48:19.000Z | 2022-03-16T18:18:05.000Z | aitlas/transforms/classification.py | likyoo/aitlas | 1c365e055c18e349e41670a4137c4d2b88671af9 | [
"MIT"
] | 2 | 2021-04-11T17:09:14.000Z | 2021-05-14T13:22:41.000Z | aitlas/transforms/classification.py | likyoo/aitlas | 1c365e055c18e349e41670a4137c4d2b88671af9 | [
"MIT"
] | 8 | 2021-04-06T22:06:27.000Z | 2022-01-30T06:01:39.000Z | from torchvision import transforms
from ..base import BaseTransforms
class ResizeCenterCropFlipHVToTensor(BaseTransforms):
def __call__(self, sample):
data_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.RandomHorizontalFlip(),
transforms.RandomVerticalFlip(),
transforms.ToTensor(),
])
return data_transforms(sample)
class ResizeCenterCropToTensor(BaseTransforms):
def __call__(self, sample):
data_transforms = transforms.Compose([
transforms.ToPILImage(),
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
])
return data_transforms(sample)
| 28.433333 | 54 | 0.620164 |
f70a13ae6569d81bdfa4c8e185bdd5411b6c5afb | 273 | py | Python | sciwing/modules/__init__.py | sean-dingxu/sciwing | 75eca1ea43be165eab20cf8bd81bbc19cecda74c | [
"MIT"
] | 50 | 2019-09-13T10:32:29.000Z | 2022-02-14T16:52:53.000Z | sciwing/modules/__init__.py | sean-dingxu/sciwing | 75eca1ea43be165eab20cf8bd81bbc19cecda74c | [
"MIT"
] | 31 | 2019-09-03T11:06:03.000Z | 2021-08-20T14:57:09.000Z | sciwing/modules/__init__.py | sean-dingxu/sciwing | 75eca1ea43be165eab20cf8bd81bbc19cecda74c | [
"MIT"
] | 9 | 2019-09-16T03:25:15.000Z | 2021-05-11T10:28:25.000Z | from sciwing.modules.embedders import *
from sciwing.modules.bow_encoder import BOW_Encoder
from sciwing.modules.lstm2vecencoder import LSTM2VecEncoder
from sciwing.modules.lstm2seqencoder import Lstm2SeqEncoder
from sciwing.modules.charlstm_encoder import CharLSTMEncoder
| 45.5 | 60 | 0.886447 |
f70a4d8c027af9c7d598f7510fa3b661626b62ff | 4,113 | py | Python | inkscape_control.py | pkumath/datastructure | 0b440b59af73ed73c575df5cd1c67946aa510dba | [
"MIT"
] | 4 | 2020-05-19T05:38:37.000Z | 2020-05-27T04:14:17.000Z | inkscape_control.py | pkumath/datastructure | 0b440b59af73ed73c575df5cd1c67946aa510dba | [
"MIT"
] | null | null | null | inkscape_control.py | pkumath/datastructure | 0b440b59af73ed73c575df5cd1c67946aa510dba | [
"MIT"
] | 1 | 2020-05-19T05:41:53.000Z | 2020-05-19T05:41:53.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import paste
import subprocess
from multiprocessing import Process
from pathlib import Path
import tkinter.messagebox as messagebox
from shutil import copy
from appdirs import user_config_dir
import logging as log
from globe import Globe as globe
from util import StrUtil as strutil
import workspace
SYSTEM = globe.SYSTEM
if SYSTEM == "Darwin":
from pynput import keyboard
elif SYSTEM == "Windows":
import keyboard
import mouse as w_mouse
user_dir = Path(user_config_dir("project", "ww"))
if not user_dir.is_dir():
user_dir.mkdir(parents=True)
roots_file = user_dir / 'roots'
template = user_dir / 'template.svg'
config = user_dir / 'config.py'
if not template.is_file():
source = str(Path(__file__).parent / 'template.svg')
destination = str(template)
copy(source, destination)
def inkscape(path):
log.info("Inkscape function started")
#
# def for_canonical(f):
# log.info("for_canonical")
# return lambda k: f(l.canonical(k))
# hotkey = keyboard.HotKey(
# keyboard.HotKey.parse('<cmd>+u'),
# on_activate)
if SYSTEM == "Darwin":
processOpen = subprocess.Popen(['/Applications/Inkscape.app/Contents/MacOS/inkscape', str(path)])
log.info("Opening file")
elif SYSTEM == "Windows":
processOpen = subprocess.Popen(['inkscape', str(path)])
log.info("Opening file")
# with keyboard.GlobalHotKeys({'<cmd>+i': paste.open_vim}) as hotkey:
# hotkey.join()
# l = keyboard.Listener(
# on_press=for_canonical(hotkey.press),
# on_release=for_canonical(hotkey.release),
# # suppress=True
# )
# l.start()
processOpen.wait()
log.info("Inkscape terminated")
if SYSTEM == "Darwin":
version = os.popen('/Applications/Inkscape.app/Contents/MacOS/inkscape --version').readlines()
if '4035a4f' not in str(version):
messagebox.showinfo('警告!', 'inkscape版本可能不兼容!导致并没有生成latex能识别的文件,请检查是否为1.0 (4035a4f, 2020-05-01)')
inkscape_name = '/Applications/Inkscape.app/Contents/MacOS/inkscape'
subprocess.Popen([inkscape_name, str(path), '-o', str(path.with_suffix(".pdf")), '--export-latex'])
#else:
#os.system('/Applications/Inkscape.app/Contents/MacOS/inkscape '+ str(path)+ ' --export-file='+str(path.with_suffix(".pdf"))+' --export-latex')
elif SYSTEM == "Windows":
subprocess.Popen(['inkscape', str(path), '-o', str(path.with_suffix(".pdf")), '--export-latex'])
log.info("Export to pdf_tex process and InkscapeProcess terminated")
def create(factor):
# """
# Creates a figure.
# First argument is the title of the figure
# Second argument is the figure directory.
# """
# title = title.strip()
# file_name = title.replace(' ', '-').lower() + '.svg'
# figures = root + os.path.sep + 'figures'+os.path.sep
# figure_path = figures + file_name
# # If a file with this name already exists, append a '2'.
# if Path(figure_path).exists():
# title = title + '-2'
# create(title,root)
# else:
# figure_path = Path(figure_path).absolute()
# inkscape(figure_path)
"""
Creates a figure.
First argument is the title of the figure
Second argument is the figure directory.
"""
workspace.sub('figures')
log.debug("File name without extension " + factor['fileName'])
file_fullname = factor['fileName'] + '.svg'
log.debug("File name " + file_fullname)
figures_dir = Path(globe.workspace['sub']['figures'])
figure_path = figures_dir / file_fullname
# If a file with this name already exists, quit
#TODO: 查重工作应该放在paste中完成,也许可以将功能封装,放在util里
if figure_path.exists():
log.warning("{} already exists. Edit but not create.".format(str(figure_path)))
else:
copy(str(template), str(figure_path))
log.info("Template copied")
log.info("Starting Inkscape")
process_inkscape = Process(target=inkscape, args=(figure_path,))
process_inkscape.start()
return
| 32.132813 | 155 | 0.653538 |
f70a649a6c145f8eae91526b87cd9cfca92cdb65 | 679 | py | Python | pyglet/window/cocoa/systemcursor.py | seeminglee/pyglet64 | 3dd167b5b0d3ad132a157e404586e53c2bb21736 | [
"BSD-3-Clause"
] | 1 | 2016-01-09T03:47:39.000Z | 2016-01-09T03:47:39.000Z | pyglet/window/cocoa/systemcursor.py | seeminglee/pyglet64 | 3dd167b5b0d3ad132a157e404586e53c2bb21736 | [
"BSD-3-Clause"
] | null | null | null | pyglet/window/cocoa/systemcursor.py | seeminglee/pyglet64 | 3dd167b5b0d3ad132a157e404586e53c2bb21736 | [
"BSD-3-Clause"
] | null | null | null | from pyglet.libs.darwin.objc_runtime import *
# This class is a wrapper around NSCursor which prevents us from
# sending too many hide or unhide messages in a row. Apparently
# NSCursor treats them like retain/release messages, which can be
# problematic when we are e.g. switching between window & fullscreen.
class SystemCursor:
cursor_is_hidden = False
@classmethod
def hide(cls):
if not cls.cursor_is_hidden:
send_message('NSCursor', 'hide')
cls.cursor_is_hidden = True
@classmethod
def unhide(cls):
if cls.cursor_is_hidden:
send_message('NSCursor', 'unhide')
cls.cursor_is_hidden = False
| 35.736842 | 69 | 0.693667 |
f70a73e7ec769ceacef155b98ab3be8009c63172 | 5,229 | py | Python | models/project.py | jlgoh/labeldat | 057248a22c7f022110d712dbcb61befd40e62760 | [
"MIT"
] | 1 | 2021-09-07T06:34:54.000Z | 2021-09-07T06:34:54.000Z | models/project.py | wilsonteng97/labeldat | bdca5df0af55bdd460807808861de25d762b28da | [
"MIT"
] | 5 | 2021-09-08T02:44:59.000Z | 2022-02-27T10:55:29.000Z | models/project.py | wilsonteng97/labeldat | bdca5df0af55bdd460807808861de25d762b28da | [
"MIT"
] | 1 | 2020-12-31T11:03:39.000Z | 2020-12-31T11:03:39.000Z | from extensions import db
from models.item_data_type import ItemDataType
from models.label import Label
from models.task import Task
class Project(db.Model):
id = db.Column(db.String(80), primary_key=True, nullable=False)
# 1(Project)-to-1(organisation)
org_id = db.Column(db.String(80), db.ForeignKey('organisation.id'), nullable=False)
project_name = db.Column(db.String(80), nullable=False)
item_data_type = db.Column(db.Enum(ItemDataType), nullable=False)
layout = db.Column(db.JSON, nullable=False)
outsource_labelling = db.Column(db.Boolean, nullable=False)
created_at = db.Column(db.DateTime(), nullable=False)
# parent 1-to-many w Task
tasks = db.relationship('Task', backref='task', lazy=True)
# parent 1-to-many w ProjectManager
project_managers = db.relationship('ProjectManager', backref='project', lazy=True)
def __repr__(self):
return f"<Project {self.id} | {self.project_name} | Organisation : {self.org_id}>"
def to_response(self):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasks": [t.to_response_without_item_data() for t in self.tasks],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"created_at": self.created_at
}
def to_project_for_user_response(self, user_id):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasksLabelled": [t.to_response_with_labels_from_user(user_id)
for t in self.tasks_and_labels_from_user(user_id)],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"created_at": self.created_at
}
def to_created_project_response(self):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasks": [t.to_response_without_item_data() for t in self.tasks],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"tasksCount": self.calculate_number_of_tasks(),
"overallPercentage": self.calculate_tasks_labelled_percentage(),
"created_at": self.created_at
}
def to_contributed_project_response(self, user_id):
return {
"id": self.id,
"orgId": self.org_id,
"projectName": self.project_name,
"itemDataType": self.item_data_type.name,
"layout": self.layout,
"outsourceLabelling": self.outsource_labelling,
"tasks": [t.to_response_without_item_data() for t in self.tasks],
"projectManagers": [pm.to_response() for pm in self.project_managers],
"tasksCount": self.calculate_number_of_tasks(),
"overallPercentage": self.calculate_tasks_labelled_percentage(),
"contributionCount": self.calculate_tasks_labelled_by_user(user_id),
"contributionPercentage": self.calculate_tasks_labelled_percentage_by_user(user_id),
"created_at": self.created_at
}
def tasks_and_labels_from_user(self, user_id):
resulting_tasks = []
for task in self.tasks:
for label in task.labels:
if label.user_id == user_id:
resulting_tasks.append(task)
break
return resulting_tasks
def calculate_number_of_tasks(self):
return len(self.tasks)
def calculate_tasks_labelled_percentage(self):
"""
Count % of tasks that have >= 1 label
"""
number_of_tasks = self.calculate_number_of_tasks()
if not number_of_tasks: # When there are no tasks
return 0
num_labelled = len([task for task in self.tasks if len(task.labels) > 0])
return round(float((num_labelled / number_of_tasks * 100)), 1)
def calculate_tasks_labelled_percentage_by_user(self, user_id):
"""
Count % of tasks that a user has labelled
"""
number_of_tasks = self.calculate_number_of_tasks()
if not number_of_tasks: # When there are no tasks
return 0
num_labelled_by_user = self.calculate_tasks_labelled_by_user(user_id)
return round(float((num_labelled_by_user / number_of_tasks) * 100), 1)
def calculate_tasks_labelled_by_user(self, user_id):
"""
Count number of tasks that a user has labelled
"""
tasks_by_user = db.session.query(Task).filter_by(project_id=self.id).join(Label).filter_by(
user_id=user_id).all()
num_labelled = len(tasks_by_user)
return num_labelled
| 42.169355 | 99 | 0.634921 |
f70a87f89311f5320018937ff535733ef8e8f539 | 10,355 | py | Python | curtsies/formatstringarray.py | toolforger/curtsies | 7f86c07d95aa22b004db9acf8f787e1abf49b581 | [
"MIT"
] | 3 | 2015-07-13T12:53:40.000Z | 2018-01-21T20:38:46.000Z | curtsies/formatstringarray.py | toolforger/curtsies | 7f86c07d95aa22b004db9acf8f787e1abf49b581 | [
"MIT"
] | null | null | null | curtsies/formatstringarray.py | toolforger/curtsies | 7f86c07d95aa22b004db9acf8f787e1abf49b581 | [
"MIT"
] | 1 | 2018-01-21T20:38:03.000Z | 2018-01-21T20:38:03.000Z | """
Format String 2D array
2d array for compositing term-formated strings
-autoexpanding vertically
-interesting get_item behavior (renders fmtstrs)
-caching behavior eventually
>>> a = FSArray(10, 14)
>>> a.shape
(10, 14)
>>> a[1] = 'i'
>>> a[3:4, :] = ['i' * 14]
>>> a[16:17, :] = ['j' * 14]
>>> a.shape, a[16, 0]
((17, 14), ['j'])
>>> a[200, 1] = ['i']
>>> a[200, 1]
['i']
"""
import sys
import logging
from .formatstring import fmtstr
from .formatstring import normalize_slice
from .formatstring import FmtStr
from typing import (
Any,
Union,
Text,
List,
Sequence,
overload,
Tuple,
cast,
no_type_check,
)
actualize = str
logger = logging.getLogger(__name__)
# TODO check that strings used in arrays don't have tabs or spaces in them!
def slicesize(s):
# type: (slice) -> int
return int((s.stop - s.start) / (s.step if s.step else 1))
def fsarray(strings, *args, **kwargs):
# type: (List[Union[FmtStr, Text]], *Any, **Any) -> FSArray
"""fsarray(list_of_FmtStrs_or_strings, width=None) -> FSArray
Returns a new FSArray of width of the maximum size of the provided
strings, or width provided, and height of the number of strings provided.
If a width is provided, raises a ValueError if any of the strings
are of length greater than this width"""
strings = list(strings)
if "width" in kwargs:
width = kwargs["width"]
del kwargs["width"]
if strings and any(len(s) > width for s in strings):
raise ValueError(f"Those strings won't fit for width {width}")
else:
width = max(len(s) for s in strings) if strings else 0
fstrings = [
s if isinstance(s, FmtStr) else fmtstr(s, *args, **kwargs) for s in strings
]
arr = FSArray(len(fstrings), width, *args, **kwargs)
rows = [
fs.setslice_with_length(0, len(s), s, width)
for fs, s in zip(arr.rows, fstrings)
]
arr.rows = rows
return arr
class FSArray(Sequence):
"""A 2D array of colored text.
Internally represented by a list of FmtStrs of identical size."""
# TODO add constructor that takes fmtstrs instead of dims
def __init__(self, num_rows, num_columns, *args, **kwargs):
# type: (int, int, *Any, **Any) -> None
self.saved_args, self.saved_kwargs = args, kwargs
self.rows = [
fmtstr("", *args, **kwargs) for _ in range(num_rows)
] # type: List[FmtStr]
self.num_columns = num_columns
@overload
def __getitem__(self, slicetuple):
# type: (int) -> FmtStr
pass
@overload
def __getitem__(self, slicetuple):
# type: (slice) -> List[FmtStr]
pass
@overload
def __getitem__(self, slicetuple):
# type: (Tuple[Union[slice, int], Union[slice, int]]) -> List[FmtStr]
pass
def __getitem__(self, slicetuple):
# type: (Union[int, slice, Tuple[Union[int, slice], Union[int, slice]]]) -> Union[FmtStr, List[FmtStr]]
if isinstance(slicetuple, int):
if slicetuple < 0:
slicetuple = len(self.rows) - slicetuple
if slicetuple < 0 or slicetuple >= len(self.rows):
raise IndexError("out of bounds")
return self.rows[slicetuple]
if isinstance(slicetuple, slice):
rowslice = normalize_slice(len(self.rows), slicetuple)
return self.rows[rowslice]
(
row_slice_or_int,
col_slice_or_int,
) = slicetuple # type: Tuple[Union[int, slice], Union[int, slice]]
rowslice = normalize_slice(len(self.rows), row_slice_or_int)
colslice = normalize_slice(self.num_columns, col_slice_or_int)
# TODO clean up slices
return [fs[colslice] for fs in self.rows[rowslice]]
def __len__(self):
# type: () -> int
return len(self.rows)
@property
def shape(self):
# type: () -> Tuple[int, int]
"""Tuple of (len(rows, len(num_columns)) numpy-style shape"""
return len(self.rows), self.num_columns
@property
def height(self):
# type: () -> int
"""The number of rows"""
return len(self.rows)
@property
def width(self):
# type: () -> int
"""The number of columns"""
return self.num_columns
# TODO rework this next major version bump
@no_type_check
def __setitem__(self, slicetuple, value):
"""Place a FSArray in a FSArray"""
logger.debug("slice: %r", slicetuple)
if isinstance(slicetuple, slice):
rowslice, colslice = slicetuple, slice(None)
if isinstance(value, (bytes, str)):
raise ValueError(
"if slice is 2D, value must be 2D as in of list type []"
)
elif isinstance(slicetuple, int):
normalize_slice(self.height, slicetuple)
self.rows[slicetuple] = value
return
else:
rowslice, colslice = slicetuple
# temp shim to allow numpy arrays as values
if value.__class__.__name__ == "ndarray":
value = [fmtstr("".join(line)) for line in value]
rowslice = normalize_slice(sys.maxsize, rowslice)
additional_rows = max(0, rowslice.stop - len(self.rows))
self.rows.extend(
[
fmtstr("", *self.saved_args, **self.saved_kwargs)
for _ in range(additional_rows)
]
)
logger.debug("num columns: %r", self.num_columns)
logger.debug("colslice: %r", colslice)
colslice = normalize_slice(self.num_columns, colslice)
if slicesize(colslice) == 0 or slicesize(rowslice) == 0:
return
if slicesize(colslice) > 1 and isinstance(value, str):
raise ValueError(
"""You cannot replace a multi column slice with a
string please use a list [] with strings for the
contents of each row"""
)
if slicesize(rowslice) != len(value):
area = slicesize(rowslice) * slicesize(colslice)
val_len = sum(len(i) for i in value)
grid_value = [fmtstr(" ", bg="cyan") * slicesize(colslice)] * slicesize(
rowslice
)
grid_fsarray = (
self.rows[: rowslice.start]
+ [
fs.setslice_with_length(
colslice.start, colslice.stop, v, self.num_columns
)
for fs, v in zip(self.rows[rowslice], grid_value)
]
+ self.rows[rowslice.stop :]
)
msg = "You are trying to fit this value {} into the region {}: {}".format(
fmtstr("".join(value), bg="cyan"),
fmtstr("").join(grid_value),
"\n ".join(grid_fsarray[x] for x in range(len(self.rows))),
)
raise ValueError(
"""Error you are trying to replace a region of {} rows by {}
columns for and area of {} with a value of len {}. The value
used to replace the region must equal the area of the region
replace.
{}""".format(
rowslice.stop - rowslice.start,
colslice.stop - colslice.start,
area,
val_len,
msg,
)
)
self.rows = (
self.rows[: rowslice.start]
+ [
fs.setslice_with_length(
colslice.start, colslice.stop, v, self.num_columns
)
for fs, v in zip(self.rows[rowslice], value)
]
+ self.rows[rowslice.stop :]
)
def dumb_display(self):
# type: () -> None
"""Prints each row followed by a newline without regard for the terminal window size"""
for line in self.rows:
print(line)
@classmethod
def diff(cls, a, b, ignore_formatting=False):
# type: (FSArray, FSArray, bool) -> Text
"""Returns two FSArrays with differences underlined"""
def underline(x):
# type: (Text) -> Text
return f"\x1b[4m{x}\x1b[0m"
def blink(x):
# type: (Text) -> Text
return f"\x1b[5m{x}\x1b[0m"
a_rows = []
b_rows = []
max_width = max([len(row) for row in a] + [len(row) for row in b])
a_lengths = []
b_lengths = []
for a_row, b_row in zip(a, b):
a_lengths.append(len(a_row))
b_lengths.append(len(b_row))
extra_a = "`" * (max_width - len(a_row))
extra_b = "`" * (max_width - len(b_row))
a_line = ""
b_line = ""
for a_char, b_char in zip(a_row + extra_a, b_row + extra_b):
if ignore_formatting:
a_char_for_eval = a_char.s if isinstance(a_char, FmtStr) else a_char
b_char_for_eval = b_char.s if isinstance(b_char, FmtStr) else b_char
else:
a_char_for_eval = a_char
b_char_for_eval = b_char
if a_char_for_eval == b_char_for_eval:
a_line += actualize(a_char)
b_line += actualize(b_char)
else:
a_line += underline(blink(actualize(a_char)))
b_line += underline(blink(actualize(b_char)))
a_rows.append(a_line)
b_rows.append(b_line)
hdiff = "\n".join(
a_line + " %3d | %3d " % (a_len, b_len) + b_line
for a_line, b_line, a_len, b_len in zip(
a_rows, b_rows, a_lengths, b_lengths
)
)
return hdiff
def simple_format(x):
# type: (Union[FSArray, List[FmtStr]]) -> Text
return "\n".join(actualize(l) for l in x)
if __name__ == "__main__":
a = FSArray(3, 14, bg="blue")
a[0:2, 5:11] = cast(
Tuple[FmtStr, ...],
(fmtstr("hey", "on_blue") + " " + fmtstr("yo", "on_red"), fmtstr("qwe qw")),
)
a.dumb_display()
a = fsarray(["hey", "there"], bg="cyan")
a.dumb_display()
print(FSArray.diff(a, fsarray(["hey", "there "]), ignore_formatting=True))
| 33.29582 | 111 | 0.547562 |
f70acf373ecdb330c0ce11c3f2115bd7f4f066b1 | 6,494 | py | Python | toolbox/sampling/__init__.py | keunhong/toolbox | e8d1dadab4d9ccf8d78fe86ea933819ac6a07fca | [
"MIT"
] | null | null | null | toolbox/sampling/__init__.py | keunhong/toolbox | e8d1dadab4d9ccf8d78fe86ea933819ac6a07fca | [
"MIT"
] | null | null | null | toolbox/sampling/__init__.py | keunhong/toolbox | e8d1dadab4d9ccf8d78fe86ea933819ac6a07fca | [
"MIT"
] | null | null | null | import logging
import random
from typing import List, Tuple
import numpy as np
from skimage.transform import resize
from scipy.ndimage import zoom
from toolbox import images
from toolbox.images import crop, mask_bbox
from .poisson_disk import sample_poisson_uniform
logger = logging.getLogger(__name__)
class PatchType:
S2F_MASKED_BLACK = 'cropped_scaled_to_fit'
S2F_MASKED_WHITE = 'cropped_scaled_to_fit_white'
S2F = 'scaled_to_fit'
RANDOM = 'random2'
def sample_poisson_mask(mask, r, k):
ymin, ymax, xmin, xmax = mask_bbox(mask)
height = ymax - ymin
width = xmax - xmin
points = np.array(sample_poisson_uniform(height, width, r, k,
mask[ymin:ymax, xmin:xmax]))
points[:, 0] += ymin
points[:, 1] += xmin
points = np.floor(points).astype(int)
return points
def generate_dense_bboxes(
mask: np.ndarray,
scale=0.23,
min_dist=0.091):
mask_height, mask_width = mask.shape
min_length = min(mask_height, mask_width)
patch_sample_size = scale * min_length
centers = sample_poisson_mask(mask, min_length * min_dist, 1000)
half = int(patch_sample_size / 2)
bboxes = []
for center in centers:
ycent, xcent = center
bbox = (ycent - half,
ycent + half + 1,
xcent - half,
xcent + half + 1)
if (bbox[0] >= 0 and bbox[1] < mask_height
and bbox[2] >= 0 and bbox[3] < mask_width):
bboxes.append(bbox)
print('bboxes={} centers={}, mask_size={}, min_dist={}'.format(
len(bboxes), len(centers), mask.shape, min_length * min_dist))
return bboxes
def random_crops(image, patch_size, num_crops):
border_mask = np.ones(image.shape[:2], dtype=bool)
left = patch_size/2
right = image.shape[1] - patch_size/2
top = patch_size/2
bottom = image.shape[0] - patch_size/2
border_mask[:, :left] = False
border_mask[:, right:] = False
border_mask[:top, :] = False
border_mask[bottom:, :] = False
yinds, xinds = np.where(border_mask)
bboxes = []
for i in range(num_crops):
point_idx = np.random.randint(0, len(yinds))
ycent, xcent = yinds[point_idx], xinds[point_idx]
half = int(patch_size / 2)
# Just squash the patch if it's out of bounds.
bbox = (ycent - half,
ycent + half + 1,
xcent - half,
xcent + half + 1)
bboxes.append(bbox)
return bboxes_to_patches(image, bboxes, patch_size)
def generate_random_bboxes(mask: np.ndarray, scale_range=(1.0, 1.0),
num_patches=5, fixed_size=None):
"""
Generates random bounding boxes at random scales with centroid within the
mask.
:param mask: The contrained area for the centroid of the patch.
:param min_scale: The min scale (multiple of the minimum length of the
input mask) of the sampling.
:param max_scale: The max scale (multiple of the minimum length of the
input mask) of the sampling.
:param num_patches: Number of patches to generate.
:return: Bounding boxes.
"""
mask_height, mask_width = mask.shape[:2]
min_length = min(mask_height, mask_width)
yinds, xinds = np.where(mask)
patch_bboxes = []
patch_scales = []
tries = 0
while len(patch_bboxes) < num_patches:
scale = random.uniform(*scale_range)
patch_scales.append(scale)
patch_size = scale * fixed_size if fixed_size else int(scale * min_length)
point_idx = np.random.randint(0, len(yinds))
ycent, xcent = yinds[point_idx], xinds[point_idx]
half = int(patch_size / 2)
# Just squash the patch if it's out of bounds.
if (ycent - half < 0 or ycent + half > mask.shape[0] or
xcent - half < 0 or xcent + half > mask.shape[1]):
if tries < 100:
tries += 1
continue
bbox = (max(ycent - half, 0),
min(ycent + half + 1, mask.shape[0]),
max(xcent - half, 0),
min(xcent + half + 1, mask.shape[1]))
patch_bboxes.append(bbox)
return patch_bboxes, patch_scales
def bboxes_to_patches(im: np.ndarray,
bboxes: List[Tuple[int, int, int, int]],
patch_size: int, use_pil=False):
"""
Converts bounding boxes to actual patches. Patches are all resized to the
patch size regardless of the original bounding box size.
:param im: To crop patch from.
:param bboxes: Boxes defining the patch.
:param patch_size: Patch size to return.
:return: Image patches.
"""
patches = []
for bbox in bboxes:
cropped = crop(im, bbox)
if cropped.shape[0] != patch_size or cropped.shape[1] != patch_size:
scale = [patch_size/cropped.shape[0], patch_size/cropped.shape[1]]
if len(im.shape) == 3:
scale.append(1.0)
if use_pil:
cropped = resize(cropped, (patch_size, patch_size)) \
.astype(dtype=np.float32)
else:
cropped = zoom(cropped, scale, im.dtype, order=1)
patches.append(cropped)
return patches
def compute_mask_tight_patch(im: np.ndarray,
mask: np.ndarray,
patch_size: int):
"""
Computes a patch which contains all the pixels active in the mask scaled to
the patch size.
:param im:
:param mask:
:param patch_size:
:return:
"""
bbox = images.compute_mask_bbox(mask)
cropped = images.crop(im, bbox)
resized = imresize(cropped, (patch_size, patch_size, cropped.shape[2]))
return resized
def compute_minmax_thickness(mask):
max_width = 0
max_height = 0
for row_id in range(mask.shape[0]):
row = mask[row_id, :]
split_locs = np.where(np.diff(row) != 0)[0] + 1
for segment in (np.split(row, split_locs)):
if segment[0] != 0:
max_width = max(max_width, len(segment))
for col_id in range(mask.shape[1]):
col = mask[:, col_id]
split_locs = np.where(np.diff(col) != 0)[0] + 1
for segment in (np.split(col, split_locs)):
if segment[0] != 0:
max_height = max(max_height, len(segment))
return min(max_width, max_height), max(max_width, max_height)
| 33.474227 | 82 | 0.59963 |
f70b03c8718a2d81744520d6a0d9e0abea8b40a2 | 124 | py | Python | Florence/FiniteElements/Assembly/__init__.py | jdlaubrie/florence | 830dca4a34be00d6e53cbec3007c10d438b27f57 | [
"MIT"
] | 65 | 2017-08-04T10:21:13.000Z | 2022-02-21T21:45:09.000Z | Florence/FiniteElements/Assembly/__init__.py | jdlaubrie/florence | 830dca4a34be00d6e53cbec3007c10d438b27f57 | [
"MIT"
] | 6 | 2018-06-03T02:29:20.000Z | 2022-01-18T02:30:22.000Z | Florence/FiniteElements/Assembly/__init__.py | jdlaubrie/florence | 830dca4a34be00d6e53cbec3007c10d438b27f57 | [
"MIT"
] | 10 | 2018-05-30T09:44:10.000Z | 2021-05-18T08:06:51.000Z | from .Assembly import Assemble, AssembleForces, AssembleInternalTractionForces, AssembleExplicit, AssembleMass, AssembleForm | 124 | 124 | 0.887097 |
f70b18b4b2bf16ceeb39c12757922047f07bde3e | 241 | py | Python | Chapter_04/actions/admin.py | codingEzio/code_py_book_django2_by_example | d215d0c87a557685824286822186966b06fa8d59 | [
"Unlicense"
] | 1 | 2021-04-23T16:35:45.000Z | 2021-04-23T16:35:45.000Z | Chapter_04/actions/admin.py | codingEzio/code_py_book_django2_by_example | d215d0c87a557685824286822186966b06fa8d59 | [
"Unlicense"
] | null | null | null | Chapter_04/actions/admin.py | codingEzio/code_py_book_django2_by_example | d215d0c87a557685824286822186966b06fa8d59 | [
"Unlicense"
] | null | null | null | from django.contrib import admin
from .models import Action
@admin.register(Action)
class ActionAdmin(admin.ModelAdmin):
list_display = ('user', 'verb', 'target', 'created')
list_filter = ('created',)
search_fields = ('verb',) | 24.1 | 56 | 0.697095 |
f70b36ff11e294f9ba8cdf3e7c715b9161f3372a | 9,632 | py | Python | model_tools/activations/hooks.py | BonnerLab/model-tools | ac90617cd79bb70a308e34a1e834971498329fb0 | [
"MIT"
] | null | null | null | model_tools/activations/hooks.py | BonnerLab/model-tools | ac90617cd79bb70a308e34a1e834971498329fb0 | [
"MIT"
] | null | null | null | model_tools/activations/hooks.py | BonnerLab/model-tools | ac90617cd79bb70a308e34a1e834971498329fb0 | [
"MIT"
] | null | null | null | from abc import ABC, abstractmethod
import logging
import os
from typing import Optional, Union, Iterable, Dict
import h5py
import numpy as np
import torch
from PIL import Image
from tqdm import tqdm
from brainio.stimuli import StimulusSet
from model_tools.activations import ActivationsModel
from model_tools.activations.core import flatten, change_dict
from model_tools.utils import fullname, s3
from model_tools.utils.pca import IncrementalPCAPytorch, PCAPytorch
from result_caching import store_dict
Stimuli = Union[Iterable[str], StimulusSet, Iterable[os.PathLike]]
BasePCA = Union[IncrementalPCAPytorch, PCAPytorch]
class LayerHookBase(ABC):
def __init__(self, activations_extractor: ActivationsModel, identifier: Optional[str] = None):
self._extractor = activations_extractor
self.identifier = identifier
self.handle = None
def __call__(self, batch_activations: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
self.setup(batch_activations)
return change_dict(batch_activations, self.layer_apply, keep_name=True,
multithread=os.getenv('MT_MULTITHREAD', '1') == '1')
@classmethod
def hook(cls, activations_extractor: ActivationsModel, identifier: Optional[str] = None, **kwargs):
hook = cls(activations_extractor=activations_extractor, identifier=identifier, **kwargs)
assert not cls.is_hooked(activations_extractor), f"{cls.__name__} is already hooked"
handle = activations_extractor.register_batch_activations_hook(hook)
hook.handle = handle
return handle
@classmethod
def is_hooked(cls, activations_extractor: ActivationsModel) -> bool:
return any(isinstance(hook, cls) for hook in
activations_extractor._extractor._batch_activations_hooks.values())
def setup(self, batch_activations: Dict[str, np.ndarray]) -> None:
pass
@abstractmethod
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
pass
class LayerGlobalMaxPool2d(LayerHookBase):
def __init__(self, *args, identifier: Optional[str] = None, **kwargs):
if identifier is None:
identifier = 'maxpool'
super(LayerGlobalMaxPool2d, self).__init__(*args, **kwargs, identifier=identifier)
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
if activations.ndim != 4:
return activations
return np.max(activations, axis=(2, 3))
class LayerRandomProjection(LayerHookBase):
def __init__(self, *args,
n_components: int = 1000,
force: bool = False,
identifier: Optional[str] = None,
**kwargs):
if identifier is None:
identifier = f'randproj_ncomponents={n_components}_force={force}'
super(LayerRandomProjection, self).__init__(*args, **kwargs, identifier=identifier)
self._n_components = n_components
self._force = force
self._layer_ws = {}
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
activations = flatten(activations)
if activations.shape[1] <= self._n_components and not self._force:
return activations
if layer not in self._layer_ws:
w = np.random.normal(size=(activations.shape[-1], self._n_components)) / np.sqrt(self._n_components)
self._layer_ws[layer] = w
else:
w = self._layer_ws[layer]
activations = activations @ w
return activations
class LayerPCA(LayerHookBase):
def __init__(self, *args,
n_components: int = 1000,
force: bool = False,
stimuli: Optional[Stimuli] = None,
stimuli_identifier: Optional[str] = None,
identifier: Optional[str] = None,
batch_size: Optional[int] = None,
device: Optional[Union[str, torch.device]] = None,
**kwargs):
if stimuli is None:
# Default to ImageNet validation with 1 image per class
stimuli = _get_imagenet_val(n_components)
stimuli_identifier = 'brainscore-imagenetval'
if isinstance(stimuli, StimulusSet) and stimuli_identifier is None and hasattr(stimuli, 'identifier'):
stimuli_identifier = stimuli.identifier
if stimuli_identifier is None:
raise ValueError('If passing a list of paths for stimuli '
'or a StimulusSet without an identifier attribute, '
'you must provide a stimuli_identifier')
if identifier is None:
identifier = f'pca_ncomponents={n_components}_force={force}_stimuli_identifier={stimuli_identifier}'
super(LayerPCA, self).__init__(*args, **kwargs, identifier=identifier)
self._n_components = n_components
self._force = force
self._stimuli_identifier = stimuli_identifier
self._stimuli = stimuli
self._batch_size = batch_size
self._device = device
self._logger = logging.getLogger(fullname(self))
self._layer_pcas = {}
def setup(self, batch_activations) -> None:
layers = batch_activations.keys()
missing_layers = [layer for layer in layers if layer not in self._layer_pcas]
if len(missing_layers) == 0:
return
layer_pcas = self._pcas(identifier=self._extractor.identifier,
layers=missing_layers,
n_components=self._n_components,
force=self._force,
stimuli_identifier=self._stimuli_identifier)
self._layer_pcas = {**self._layer_pcas, **layer_pcas}
def layer_apply(self, layer: str, activations: np.ndarray) -> np.ndarray:
pca = self._layer_pcas[layer]
activations = flatten(activations)
if pca is None:
return activations
return pca.transform(torch.from_numpy(activations).to(self._device))
@store_dict(dict_key='layers', identifier_ignore=['layers'])
def _pcas(self, identifier, layers, n_components, force, stimuli_identifier) -> Dict[str, BasePCA]:
self._logger.debug(f'Retrieving {stimuli_identifier} activations')
self.handle.disable()
activations = self._extractor(self._stimuli, layers=layers, stimuli_identifier=False)
activations = {layer: activations.sel(layer=layer).values
for layer in np.unique(activations['layer'])}
assert len(set(layer_activations.shape[0] for layer_activations in activations.values())) == 1, "stimuli differ"
self.handle.enable()
self._logger.debug(f'Computing {stimuli_identifier} principal components')
progress = tqdm(total=len(activations), desc="layer principal components", leave=False)
def init_and_progress(layer, activations):
activations = flatten(activations)
if activations.shape[1] <= n_components and not force:
self._logger.debug(f"Not computing principal components for {layer} "
f"activations {activations.shape} as shape is small enough already")
progress.update(1)
return None
n_components_ = n_components if activations.shape[1] > n_components else activations.shape[1]
if self._batch_size is None:
pca = PCAPytorch(n_components_, device=self._device)
pca.fit(torch.from_numpy(activations).to(self._device))
else:
pca = IncrementalPCAPytorch(n_components_, device=self._device)
for i in range(0, activations.shape[0], self._batch_size):
activations_batch = torch.from_numpy(activations[i:i + self._batch_size]).to(self._device)
pca.fit_partial(activations_batch)
return pca
layer_pcas = change_dict(activations, init_and_progress, keep_name=True,
multithread=os.getenv('MT_MULTITHREAD', '1') == '1')
progress.close()
return layer_pcas
def _get_imagenet_val(num_images):
_logger = logging.getLogger(fullname(_get_imagenet_val))
num_classes = 1000
num_images_per_class = (num_images - 1) // num_classes
base_indices = np.arange(num_images_per_class).astype(int)
indices = []
for i in range(num_classes):
indices.extend(50 * i + base_indices)
for i in range((num_images - 1) % num_classes + 1):
indices.extend(50 * i + np.array([num_images_per_class]).astype(int))
framework_home = os.path.expanduser(os.getenv('MT_HOME', '~/.model-tools'))
imagenet_filepath = os.getenv('MT_IMAGENET_PATH', os.path.join(framework_home, 'imagenet2012.hdf5'))
imagenet_dir = f"{imagenet_filepath}-files"
os.makedirs(imagenet_dir, exist_ok=True)
if not os.path.isfile(imagenet_filepath):
os.makedirs(os.path.dirname(imagenet_filepath), exist_ok=True)
_logger.debug(f"Downloading ImageNet validation to {imagenet_filepath}")
s3.download_file("imagenet2012-val.hdf5", imagenet_filepath)
filepaths = []
with h5py.File(imagenet_filepath, 'r') as f:
for index in indices:
imagepath = os.path.join(imagenet_dir, f"{index}.png")
if not os.path.isfile(imagepath):
image = np.array(f['val/images'][index])
Image.fromarray(image).save(imagepath)
filepaths.append(imagepath)
return filepaths
| 43.781818 | 120 | 0.653758 |
f70b4d49fd7c2428414fde8a0fcb3a392c5d7289 | 7,023 | py | Python | deepsim/deepsim/core/link_state.py | aws-deepracer/deepsim | cad2639f525c2f94ec5c03d8b855cc65b0b8ee55 | [
"Apache-2.0"
] | 1 | 2022-03-25T07:20:49.000Z | 2022-03-25T07:20:49.000Z | deepsim/deepsim/core/link_state.py | aws-deepracer/deepsim | cad2639f525c2f94ec5c03d8b855cc65b0b8ee55 | [
"Apache-2.0"
] | null | null | null | deepsim/deepsim/core/link_state.py | aws-deepracer/deepsim | cad2639f525c2f94ec5c03d8b855cc65b0b8ee55 | [
"Apache-2.0"
] | null | null | null | #################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). #
# You may not use this file except in compliance with the License. #
# You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, #
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #
# See the License for the specific language governing permissions and #
# limitations under the License. #
#################################################################################
"""A class for link state."""
from typing import Optional
from deepsim.core.pose import Pose
from deepsim.core.twist import Twist
from gazebo_msgs.msg import LinkState as ROSLinkState
class LinkState:
"""
LinkState class
"""
def __init__(self,
link_name: Optional[str] = None,
pose: Optional[Pose] = None,
twist: Optional[Twist] = None,
reference_frame: Optional[str] = None):
"""
Initialize LinkState class
Args:
link_name (Optional[str]): link name
pose (Optional[Pose]): desired pose in reference frame
twist (Optional[Twist]): desired twist in reference frame
reference_frame (Optional[str]): set pose/twist relative to the frame of this entity (Body/Model)
leave empty or "world" or "map" defaults to world-frame
"""
self._link_name = link_name
self._pose = pose.copy() if pose else Pose()
self._twist = twist.copy() if twist else Twist()
self._reference_frame = reference_frame or ''
@property
def link_name(self) -> str:
"""
Returns the link name
Returns:
str: link name
"""
return self._link_name
@link_name.setter
def link_name(self, value: str) -> None:
"""
Set link name
Args:
value (str): link name
"""
self._link_name = value
@property
def pose(self) -> Pose:
"""
Returns the copy of pose.
Returns:
Pose: the copy of pose of the link
"""
return self._pose.copy()
@pose.setter
def pose(self, value: Pose) -> None:
"""
Set the pose.
Args:
value (Pose): the pose
"""
self._pose = value.copy()
@property
def twist(self) -> Twist:
"""
Return the copy of twist.
Returns:
Twist: the copy of twist
"""
return self._twist.copy()
@twist.setter
def twist(self, value: Twist) -> None:
"""
Set the twist.
Args:
value (Twist): the twist
"""
self._twist = value.copy()
@property
def reference_frame(self) -> str:
"""
Returns the reference frame
Returns:
str: the reference frame
"""
return self._reference_frame
@reference_frame.setter
def reference_frame(self, value: str) -> None:
"""
Set the reference frame
Args:
value (str): the reference frame
"""
self._reference_frame = value
def to_ros(self) -> ROSLinkState:
"""
Return the ROS LinkState object created from this link state.
Returns:
gazebo_msgs.msg.LinkState: ROS LinkState
"""
ros_link_state = ROSLinkState()
if self.link_name:
ros_link_state.link_name = self.link_name
if self._pose:
ros_link_state.pose = self._pose.to_ros()
if self._twist:
ros_link_state.twist = self._twist.to_ros()
if self.reference_frame:
ros_link_state.reference_frame = self.reference_frame
return ros_link_state
@staticmethod
def from_ros(value: ROSLinkState) -> 'LinkState':
"""
Returns new LinkState object created from ROS LinkState
Args:
value (ROSLinkState): ROS LinkState
Returns:
LinkState: new LinkState object created from ROS LinkState
"""
return LinkState(link_name=value.link_name,
pose=Pose.from_ros(value.pose),
twist=Twist.from_ros(value.twist),
reference_frame=value.reference_frame)
def copy(self) -> 'LinkState':
"""
Returns a copy.
Returns:
LinkState: the copied link state
"""
return LinkState(link_name=self.link_name,
pose=self._pose,
twist=self._twist,
reference_frame=self.reference_frame)
def __eq__(self, other: 'LinkState') -> bool:
"""
Equality of LinkState.
Args:
other (LinkState): other to compare
Returns:
bool: True if the differences of all components are within epsilon, Otherwise False.
"""
return (self.link_name == other.link_name and self.reference_frame == other.reference_frame
and self._pose == other._pose and self._twist == other._twist)
def __ne__(self, other: 'LinkState') -> bool:
"""
Inequality of points is inequality of any coordinates
Args:
other (LinkState): other to compare
Returns:
bool: False if the differences of all components are within epsilon, Otherwise True.
"""
return not self.__eq__(other)
def __str__(self) -> str:
"""
String representation of a link state
Returns:
str: String representation of a link state
"""
return "(link_name=%s, pose=%s, twist=%s, reference_frame=%s)" % (self.link_name,
repr(self._pose),
repr(self._twist),
self.reference_frame)
def __repr__(self) -> str:
"""
String representation including class
Returns:
str: String representation including class
"""
return "LinkState" + str(self)
| 31.922727 | 109 | 0.508472 |
f70b569498b82d470320a1d9546d114427f688b4 | 695 | py | Python | cinderclient/v3/qos_specs.py | deepanshhu/python-cinderclient | 2c0f74c708fd09c5ae813255aaa671073f2fe250 | [
"Apache-1.1"
] | null | null | null | cinderclient/v3/qos_specs.py | deepanshhu/python-cinderclient | 2c0f74c708fd09c5ae813255aaa671073f2fe250 | [
"Apache-1.1"
] | null | null | null | cinderclient/v3/qos_specs.py | deepanshhu/python-cinderclient | 2c0f74c708fd09c5ae813255aaa671073f2fe250 | [
"Apache-1.1"
] | null | null | null | # Copyright (c) 2013 eBay Inc.
# Copyright (c) OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
QoS Specs interface.
"""
from cinderclient.v2.qos_specs import * # noqa
| 30.217391 | 69 | 0.748201 |
f70bd2ee450be0f82157aa65881304ad6a24cb47 | 1,884 | py | Python | dask_kubernetes/conftest.py | ddelange/dask-kubernetes | 42bcf9817ea963bf048f9dd06caec1622656302a | [
"BSD-3-Clause"
] | 1 | 2022-01-20T12:38:27.000Z | 2022-01-20T12:38:27.000Z | dask_kubernetes/conftest.py | ddelange/dask-kubernetes | 42bcf9817ea963bf048f9dd06caec1622656302a | [
"BSD-3-Clause"
] | null | null | null | dask_kubernetes/conftest.py | ddelange/dask-kubernetes | 42bcf9817ea963bf048f9dd06caec1622656302a | [
"BSD-3-Clause"
] | null | null | null | import pytest
import pathlib
import os
import subprocess
import tempfile
from kopf.testing import KopfRunner
from dask_kubernetes.common.utils import check_dependency
DIR = pathlib.Path(__file__).parent.absolute()
check_dependency("helm")
check_dependency("kubectl")
check_dependency("docker")
@pytest.fixture()
async def kopf_runner(k8s_cluster):
yield KopfRunner(["run", "-m", "dask_kubernetes.operator", "--verbose"])
@pytest.fixture(scope="session")
def docker_image():
image_name = "dask-kubernetes:dev"
subprocess.check_output(["docker", "build", "-t", image_name, "./ci/"])
return image_name
@pytest.fixture(scope="session")
def k8s_cluster(kind_cluster, docker_image):
os.environ["KUBECONFIG"] = str(kind_cluster.kubeconfig_path)
kind_cluster.load_docker_image(docker_image)
yield kind_cluster
del os.environ["KUBECONFIG"]
@pytest.fixture(scope="session")
def ns(k8s_cluster):
return "default"
def run_generate(crd_path, patch_path, temp_path):
subprocess.run(
["k8s-crd-resolver", "-r", "-j", patch_path, crd_path, temp_path],
check=True,
env={**os.environ},
)
@pytest.fixture(scope="session", autouse=True)
def customresources(k8s_cluster):
temp_dir = tempfile.TemporaryDirectory()
crd_path = os.path.join(DIR, "operator", "customresources")
run_generate(
os.path.join(crd_path, "daskcluster.yaml"),
os.path.join(crd_path, "daskcluster.patch.yaml"),
os.path.join(temp_dir.name, "daskcluster.yaml"),
)
run_generate(
os.path.join(crd_path, "daskworkergroup.yaml"),
os.path.join(crd_path, "daskworkergroup.patch.yaml"),
os.path.join(temp_dir.name, "daskworkergroup.yaml"),
)
k8s_cluster.kubectl("apply", "-f", temp_dir.name)
yield
k8s_cluster.kubectl("delete", "-f", temp_dir.name)
temp_dir.cleanup()
| 25.808219 | 76 | 0.701168 |
f70bf6ee38f2719e916cda8cb70d9a8dda8c9666 | 8,303 | py | Python | whoville/cloudbreak/models/reinstall_request_v2.py | mikchaos/whoville | 6eabaea4b74ac0b632c03db8252590131c6ce63b | [
"Apache-2.0"
] | null | null | null | whoville/cloudbreak/models/reinstall_request_v2.py | mikchaos/whoville | 6eabaea4b74ac0b632c03db8252590131c6ce63b | [
"Apache-2.0"
] | null | null | null | whoville/cloudbreak/models/reinstall_request_v2.py | mikchaos/whoville | 6eabaea4b74ac0b632c03db8252590131c6ce63b | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Cloudbreak API
Cloudbreak is a powerful left surf that breaks over a coral reef, a mile off southwest the island of Tavarua, Fiji. Cloudbreak is a cloud agnostic Hadoop as a Service API. Abstracts the provisioning and ease management and monitoring of on-demand clusters. SequenceIQ's Cloudbreak is a RESTful application development platform with the goal of helping developers to build solutions for deploying Hadoop YARN clusters in different environments. Once it is deployed in your favourite servlet container it exposes a REST API allowing to span up Hadoop clusters of arbitary sizes and cloud providers. Provisioning Hadoop has never been easier. Cloudbreak is built on the foundation of cloud providers API (Amazon AWS, Microsoft Azure, Google Cloud Platform, Openstack), Apache Ambari, Docker lightweight containers, Swarm and Consul. For further product documentation follow the link: <a href=\"http://hortonworks.com/apache/cloudbreak/\">http://hortonworks.com/apache/cloudbreak/</a>
OpenAPI spec version: 2.7.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class ReinstallRequestV2(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'instance_groups': 'list[InstanceGroupsV2]',
'ambari_stack_details': 'AmbariStackDetails',
'blueprint_name': 'str',
'kerberos_password': 'str',
'kerberos_principal': 'str'
}
attribute_map = {
'instance_groups': 'instanceGroups',
'ambari_stack_details': 'ambariStackDetails',
'blueprint_name': 'blueprintName',
'kerberos_password': 'kerberosPassword',
'kerberos_principal': 'kerberosPrincipal'
}
def __init__(self, instance_groups=None, ambari_stack_details=None, blueprint_name=None, kerberos_password=None, kerberos_principal=None):
"""
ReinstallRequestV2 - a model defined in Swagger
"""
self._instance_groups = None
self._ambari_stack_details = None
self._blueprint_name = None
self._kerberos_password = None
self._kerberos_principal = None
if instance_groups is not None:
self.instance_groups = instance_groups
if ambari_stack_details is not None:
self.ambari_stack_details = ambari_stack_details
self.blueprint_name = blueprint_name
if kerberos_password is not None:
self.kerberos_password = kerberos_password
if kerberos_principal is not None:
self.kerberos_principal = kerberos_principal
@property
def instance_groups(self):
"""
Gets the instance_groups of this ReinstallRequestV2.
collection of instance groupst
:return: The instance_groups of this ReinstallRequestV2.
:rtype: list[InstanceGroupsV2]
"""
return self._instance_groups
@instance_groups.setter
def instance_groups(self, instance_groups):
"""
Sets the instance_groups of this ReinstallRequestV2.
collection of instance groupst
:param instance_groups: The instance_groups of this ReinstallRequestV2.
:type: list[InstanceGroupsV2]
"""
self._instance_groups = instance_groups
@property
def ambari_stack_details(self):
"""
Gets the ambari_stack_details of this ReinstallRequestV2.
details of the Ambari stack
:return: The ambari_stack_details of this ReinstallRequestV2.
:rtype: AmbariStackDetails
"""
return self._ambari_stack_details
@ambari_stack_details.setter
def ambari_stack_details(self, ambari_stack_details):
"""
Sets the ambari_stack_details of this ReinstallRequestV2.
details of the Ambari stack
:param ambari_stack_details: The ambari_stack_details of this ReinstallRequestV2.
:type: AmbariStackDetails
"""
self._ambari_stack_details = ambari_stack_details
@property
def blueprint_name(self):
"""
Gets the blueprint_name of this ReinstallRequestV2.
blueprint name for the cluster
:return: The blueprint_name of this ReinstallRequestV2.
:rtype: str
"""
return self._blueprint_name
@blueprint_name.setter
def blueprint_name(self, blueprint_name):
"""
Sets the blueprint_name of this ReinstallRequestV2.
blueprint name for the cluster
:param blueprint_name: The blueprint_name of this ReinstallRequestV2.
:type: str
"""
if blueprint_name is None:
raise ValueError("Invalid value for `blueprint_name`, must not be `None`")
self._blueprint_name = blueprint_name
@property
def kerberos_password(self):
"""
Gets the kerberos_password of this ReinstallRequestV2.
kerberos admin password
:return: The kerberos_password of this ReinstallRequestV2.
:rtype: str
"""
return self._kerberos_password
@kerberos_password.setter
def kerberos_password(self, kerberos_password):
"""
Sets the kerberos_password of this ReinstallRequestV2.
kerberos admin password
:param kerberos_password: The kerberos_password of this ReinstallRequestV2.
:type: str
"""
if kerberos_password is not None and len(kerberos_password) > 50:
raise ValueError("Invalid value for `kerberos_password`, length must be less than or equal to `50`")
if kerberos_password is not None and len(kerberos_password) < 5:
raise ValueError("Invalid value for `kerberos_password`, length must be greater than or equal to `5`")
self._kerberos_password = kerberos_password
@property
def kerberos_principal(self):
"""
Gets the kerberos_principal of this ReinstallRequestV2.
kerberos principal
:return: The kerberos_principal of this ReinstallRequestV2.
:rtype: str
"""
return self._kerberos_principal
@kerberos_principal.setter
def kerberos_principal(self, kerberos_principal):
"""
Sets the kerberos_principal of this ReinstallRequestV2.
kerberos principal
:param kerberos_principal: The kerberos_principal of this ReinstallRequestV2.
:type: str
"""
self._kerberos_principal = kerberos_principal
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, ReinstallRequestV2):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 34.168724 | 984 | 0.648802 |
f70c054e7f9c75daf40ce7a574ccf0b3546d13eb | 3,655 | py | Python | iotronic_lightningrod/modules/utils.py | Zakaria-Ben/iotronic-lightning-rod | 4a3eff68bd1db2d57beee0e8c51fbb14fcc0877a | [
"Apache-2.0"
] | null | null | null | iotronic_lightningrod/modules/utils.py | Zakaria-Ben/iotronic-lightning-rod | 4a3eff68bd1db2d57beee0e8c51fbb14fcc0877a | [
"Apache-2.0"
] | null | null | null | iotronic_lightningrod/modules/utils.py | Zakaria-Ben/iotronic-lightning-rod | 4a3eff68bd1db2d57beee0e8c51fbb14fcc0877a | [
"Apache-2.0"
] | 1 | 2018-05-18T13:01:03.000Z | 2018-05-18T13:01:03.000Z | # Copyright 2017 MDSLAB - University of Messina
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__author__ = "Nicola Peditto <npeditto@unime.it"
import asyncio
import inspect
import pkg_resources
from six import moves
from stevedore import extension
import sys
from iotronic_lightningrod.config import entry_points_name
from iotronic_lightningrod.lightningrod import SESSION
from iotronic_lightningrod.modules import Module
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
def getFuncName():
return inspect.stack()[1][3]
def refresh_stevedore(namespace=None):
"""Trigger reload of entry points.
Useful to have dynamic loading/unloading of stevedore modules.
"""
# NOTE(sheeprine): pkg_resources doesn't support reload on python3 due to
# defining basestring which is still there on reload hence executing
# python2 related code.
try:
del sys.modules['pkg_resources'].basestring
except AttributeError:
# python2, do nothing
pass
# Force working_set reload
moves.reload_module(sys.modules['pkg_resources'])
# Clear stevedore cache
cache = extension.ExtensionManager.ENTRY_POINT_CACHE
if namespace:
if namespace in cache:
del cache[namespace]
else:
cache.clear()
class Utility(Module.Module):
def __init__(self, board, session):
super(Utility, self).__init__("Utility", board)
def finalize(self):
pass
def restore(self):
pass
async def hello(self, client_name, message):
import random
s = random.uniform(0.5, 3.0)
await asyncio.sleep(s)
result = "Hello by board to Conductor " + client_name + \
" that said me " + message + " - Time: " + '%.2f' % s
LOG.info("DEVICE hello result: " + str(result))
return result
async def plug_and_play(self, new_module, new_class):
LOG.info("LR modules loaded:\n\t" + new_module)
# Updating entry_points
with open(entry_points_name, 'a') as entry_points:
entry_points.write(
new_module +
'= iotronic_lightningrod.modules.' + new_module + ':'
+ new_class
)
# Reload entry_points
refresh_stevedore('s4t.modules')
LOG.info("New entry_points loaded!")
# Reading updated entry_points
named_objects = {}
for ep in pkg_resources.iter_entry_points(group='s4t.modules'):
named_objects.update({ep.name: ep.load()})
await named_objects
SESSION.disconnect()
return str(named_objects)
async def changeConf(self, conf):
await self.board.getConf(conf)
self.board.setUpdateTime()
result = "Board configuration changed!"
LOG.info("PROVISIONING RESULT: " + str(result))
return result
async def destroyNode(self, conf):
await self.board.setConf(conf)
result = "Board configuration cleaned!"
LOG.info("DESTROY RESULT: " + str(result))
return result
| 28.554688 | 78 | 0.661012 |
f70c38682f59465a9fb9eb7311497596f5bc838a | 1,201 | py | Python | operators/clip.py | ngiambla/nnflex | 7c8bf46218ea70c6dad1efedf9e2069e41c4c3fa | [
"MIT"
] | null | null | null | operators/clip.py | ngiambla/nnflex | 7c8bf46218ea70c6dad1efedf9e2069e41c4c3fa | [
"MIT"
] | null | null | null | operators/clip.py | ngiambla/nnflex | 7c8bf46218ea70c6dad1efedf9e2069e41c4c3fa | [
"MIT"
] | null | null | null | ''' clip.py:
Implement's the clip ONNX node as a flexnode (for use with any accelerator)
'''
import uuid
import numpy as np
from operators.flexnode import FlexNode
from core.defines import Operator
from core.messaging import Message
class Clip(FlexNode):
def __init__(self, onnx_node, inputs, outputs):
FlexNode.__init__(self, onnx_node, inputs, outputs)
self._min = -3.402823466e+38
self._max = 3.402823466e+38
if len(inputs) != 1 and len(inputs) != 3:
raise ValueError("Clip can only have 1 or 3 inputs.")
self._input = inputs[0]
if len(inputs) == 3:
self._min = inputs[1]
self._max = inputs[2]
def map(self, memory_mapper):
pass
def unmap(self, memory_mapper):
pass
def _inputs2mem(self, memory_xfer_engine):
pass
def _mem2output(self, memory_xfer_engine):
pass
def compile(self, source, destinations):
tile_commands = list()
# Here, we are NOT generating tile_commands, (although, this is not difficult.)
np.copyto(self._outputs[0], np.clip(self._input, self._min, self._max))
return tile_commands
| 24.02 | 87 | 0.636969 |
f70c3dfba447061d7e08725b7b0184cabf89a717 | 162 | py | Python | python-port/speedclue/cards.py | sadakatsu/SpeedClueContest | f670e4e594b35e4a5111492dde31414429865ade | [
"MIT"
] | 1 | 2017-10-20T14:24:06.000Z | 2017-10-20T14:24:06.000Z | python-port/speedclue/cards.py | sadakatsu/SpeedClueContest | f670e4e594b35e4a5111492dde31414429865ade | [
"MIT"
] | null | null | null | python-port/speedclue/cards.py | sadakatsu/SpeedClueContest | f670e4e594b35e4a5111492dde31414429865ade | [
"MIT"
] | null | null | null | CARDS = (
(('Gr', 'Mu', 'Pe', 'Pl', 'Sc', 'Wh')),
(('Ca', 'Kn', 'Pi', 'Re', 'Ro', 'Wr')),
(('Ba', 'Bi', 'Co', 'Di', 'Ha', 'Ki', 'Li', 'Lo', 'St')),
)
| 27 | 61 | 0.290123 |
f70c3fb1ca6b1d6c60f6960e42fcf6161e4fb84e | 5,473 | py | Python | cnn_bin_to_csv_converter/cnn_bin_file_to_csv_converter.py | Riteshbansal/BigDataTextSummarization | 463ebc7d70d4829f4d92c33d2180eb3ae6031c71 | [
"BSD-3-Clause"
] | 1 | 2018-12-06T17:41:36.000Z | 2018-12-06T17:41:36.000Z | cnn_bin_to_csv_converter/cnn_bin_file_to_csv_converter.py | Riteshbansal/BigDataTextSummarization | 463ebc7d70d4829f4d92c33d2180eb3ae6031c71 | [
"BSD-3-Clause"
] | null | null | null | cnn_bin_to_csv_converter/cnn_bin_file_to_csv_converter.py | Riteshbansal/BigDataTextSummarization | 463ebc7d70d4829f4d92c33d2180eb3ae6031c71 | [
"BSD-3-Clause"
] | 2 | 2018-11-09T15:20:24.000Z | 2018-11-21T06:34:01.000Z | import glob, struct, random, csv
from tensorflow.core.example import example_pb2
# <s> and </s> are used in the data files to segment the abstracts into sentences. They don't receive vocab ids.
SENTENCE_START = '<s>'
SENTENCE_END = '</s>'
PAD_TOKEN = '[PAD]' # This has a vocab id, which is used to pad the encoder input, decoder input and target sequence
UNKNOWN_TOKEN = '[UNK]' # This has a vocab id, which is used to represent out-of-vocabulary words
START_DECODING = '[START]' # This has a vocab id, which is used at the start of every decoder input sequence
STOP_DECODING = '[STOP]' # This has a vocab id, which is used at the end of untruncated target sequences
# Note: none of <s>, </s>, [PAD], [UNK], [START], [STOP] should appear in the vocab file.
def example_generator(data_path, single_pass):
"""Generates tf.Examples from data files.
Binary data format: <length><blob>. <length> represents the byte size
of <blob>. <blob> is serialized tf.Example proto. The tf.Example contains
the tokenized article text and summary.
Args:
data_path:
Path to tf.Example data files. Can include wildcards, e.g. if you have several training data chunk files train_001.bin, train_002.bin, etc, then pass data_path=train_* to access them all.
single_pass:
Boolean. If True, go through the dataset exactly once, generating examples in the order they appear, then return. Otherwise, generate random examples indefinitely.
Yields:
Deserialized tf.Example.
"""
while True:
filelist = glob.glob(data_path) # get the list of datafiles
assert filelist, ('Error: Empty filelist at %s' % data_path) # check filelist isn't empty
if single_pass:
filelist = sorted(filelist)
else:
random.shuffle(filelist)
for f in filelist:
reader = open(f, 'rb')
while True:
len_bytes = reader.read(8)
if not len_bytes: break # finished reading this file
str_len = struct.unpack('q', len_bytes)[0]
example_str = struct.unpack('%ds' % str_len, reader.read(str_len))[0]
yield example_pb2.Example.FromString(example_str)
if single_pass:
print "example_generator completed reading all datafiles. No more data."
break
def abstract2sents(abstract):
"""Splits abstract text from datafile into list of sentences.
Args:
abstract: string containing <s> and </s> tags for starts and ends of sentences
Returns:
sents: List of sentence strings (no tags)"""
cur = 0
sents = []
while True:
try:
start_p = abstract.index(SENTENCE_START, cur)
end_p = abstract.index(SENTENCE_END, start_p + 1)
cur = end_p + len(SENTENCE_END)
sents.append(abstract[start_p + len(SENTENCE_START):end_p])
except ValueError as e: # no more sentences
return sents
def text_generator(example_generator):
"""Generates article and abstract text from tf.Example.
Args:
example_generator: a generator of tf.Examples from file. See data.example_generator"""
while True:
e = example_generator.next() # e is a tf.Example
try:
article_text = e.features.feature['article'].bytes_list.value[
0] # the article text was saved under the key 'article' in the data files
abstract_text = e.features.feature['abstract'].bytes_list.value[
0] # the abstract text was saved under the key 'abstract' in the data files
except ValueError:
# tf.logging.error('Failed to get article or abstract from example')
continue
else:
yield (article_text, abstract_text)
def read_bin_files(input_bin_path, output_csv_path,single_pass):
"""Reads data from file and processes into Examples which are then placed into the example queue."""
input_gen = text_generator(example_generator(input_bin_path, single_pass))
with open(output_csv_path, mode='w') as output_file:
output_writer = csv.writer(output_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
while True:
try:
(article,
abstract) = input_gen.next() # read the next example from file. article and abstract are both strings.
except StopIteration: # if there are no more examples:
# tf.logging.info("The example generator for this example queue filling thread has exhausted data.")
if single_pass:
# tf.logging.info("single_pass mode is on, so we've finished reading dataset. This thread is stopping.")
# self._finished_reading = True
break
else:
raise Exception("single_pass mode is off but the example generator is out of data; error.")
# Use the <s> and </s> tags in abstract to get a list of sentences.
abstract_sentences = [sent.strip() for sent in abstract2sents(abstract)]
output_writer.writerow(['. '.join(abstract_sentences), article])
if __name__ == "__main__":
input_bin_path = '/home/sampanna/Study/BDTS/modified-keras-text-summarization/files/cnn/finished_files/chunked/train_*.bin'
output_csv_path = 'cnn_summary_dataset.csv'
read_bin_files(input_bin_path, output_csv_path,True)
| 45.231405 | 195 | 0.655582 |
f70c3ff4abf853d2e0f73c630cdcf9d40b4f5ab7 | 983 | py | Python | Assets/GameSparks/Editor/post_process.py | dgeisert/MiniJam72AdventureDeath | 8cb7eea2111984f6f63486c54dadb7950adf9ff3 | [
"Unlicense"
] | null | null | null | Assets/GameSparks/Editor/post_process.py | dgeisert/MiniJam72AdventureDeath | 8cb7eea2111984f6f63486c54dadb7950adf9ff3 | [
"Unlicense"
] | null | null | null | Assets/GameSparks/Editor/post_process.py | dgeisert/MiniJam72AdventureDeath | 8cb7eea2111984f6f63486c54dadb7950adf9ff3 | [
"Unlicense"
] | null | null | null | import os
import re
from sys import argv
from mod_pbxproj import XcodeProject
path = argv[1]
print path
project = XcodeProject.Load(path +'/Unity-iPhone.xcodeproj/project.pbxproj')
project.add_file_if_doesnt_exist('System/Library/Frameworks/Security.framework', tree='SDKROOT')
project.add_file_if_doesnt_exist('usr/lib/libicucore.dylib', tree='SDKROOT')
# regex for adjust sdk files
re_adjust_files = re.compile(r"SRWebSocket\.m")
# iterate all objects in the unity Xcode iOS project file
for key in project.get_ids():
obj = project.get_obj(key)
name = obj.get('name')
adjust_file_match = re_adjust_files.match(name if name else "")
if (adjust_file_match):
build_files = project.get_build_files(key)
for build_file in build_files:
# add the ARC compiler flag to the adjust file if doesn't exist
build_file.add_compiler_flag('-fobjc-arc')
if project.modified:
project.backup()
project.save()
| 27.305556 | 96 | 0.720244 |
f70c4348b188e43d79cf8b756f4fb1b4466cb021 | 2,025 | py | Python | indy-tests/utils/utils.py | NgoAnhKhoi/indy-testcase | 1f85d2b7e77a5bb9637379286d7f7f142c2c626e | [
"MIT"
] | null | null | null | indy-tests/utils/utils.py | NgoAnhKhoi/indy-testcase | 1f85d2b7e77a5bb9637379286d7f7f142c2c626e | [
"MIT"
] | null | null | null | indy-tests/utils/utils.py | NgoAnhKhoi/indy-testcase | 1f85d2b7e77a5bb9637379286d7f7f142c2c626e | [
"MIT"
] | null | null | null | '''
Created on Nov 9, 2017
@author: khoi.ngo
'''
def generate_random_string(prefix="", suffix="", size=20):
"""
Generate random string .
:param prefix: (optional) Prefix of a string.
:param suffix: (optional) Suffix of a string.
:param length: (optional) Max length of a string (include prefix and suffix)
:return: The random string.
"""
import random
import string
left_size = size - len(prefix) - len(suffix)
random_str = ""
if left_size > 0:
random_str = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(left_size))
else:
print("Warning: Length of prefix and suffix more than %s chars" % str(size))
result = str(prefix) + random_str + str(suffix)
return result
def create_step(size):
from utils.step import Step
lst_step = []
for i in range(0, size):
step = Step(i, "")
lst_step.append(step)
return lst_step
def handle_exception(code):
if isinstance(code, IndexError or Exception):
raise code
else:
return code
async def perform(step, func, *agrs):
from indy.error import IndyError
from utils.report import Status
result = None
try:
result = await func(*agrs)
step.set_status(Status.PASSED)
except IndyError as E:
print("Indy error" + str(E))
step.set_message(str(E))
return E
except Exception as Ex:
print("Exception" + str(Ex))
step.set_message(str(E))
return Ex
return result
async def perform_with_expected_code(step, func, *agrs, expected_code=0):
from indy.error import IndyError
from utils.report import Status
try:
await func(*agrs)
except IndyError as E:
if E == expected_code:
step.set_status(Status.PASSED)
else:
print("Indy error" + str(E))
step.set_message(str(E))
return E
except Exception as Ex:
print("Exception" + str(Ex))
return Ex
| 25.961538 | 109 | 0.620247 |
f70c4510e1c769c5ddba6263ecda35d921c80b8e | 4,335 | py | Python | lightly/openapi_generated/swagger_client/models/job_status_data_result.py | Tekrific/lightly | 75a1d56b4cee77f68e0f3166e3a412711d0dbb2d | [
"MIT"
] | 1,515 | 2020-10-05T13:04:17.000Z | 2022-03-31T16:14:55.000Z | lightly/openapi_generated/swagger_client/models/job_status_data_result.py | Tekrific/lightly | 75a1d56b4cee77f68e0f3166e3a412711d0dbb2d | [
"MIT"
] | 628 | 2020-10-14T11:38:51.000Z | 2022-03-31T14:40:54.000Z | lightly/openapi_generated/swagger_client/models/job_status_data_result.py | Tekrific/lightly | 75a1d56b4cee77f68e0f3166e3a412711d0dbb2d | [
"MIT"
] | 108 | 2020-10-17T08:31:06.000Z | 2022-03-20T16:44:22.000Z | # coding: utf-8
"""
Lightly API
Lightly.ai enables you to do self-supervised learning in an easy and intuitive way. The lightly.ai OpenAPI spec defines how one can interact with our REST API to unleash the full potential of lightly.ai # noqa: E501
OpenAPI spec version: 1.0.0
Contact: support@lightly.ai
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from lightly.openapi_generated.swagger_client.configuration import Configuration
class JobStatusDataResult(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'type': 'JobResultType',
'data': 'GeneralJobResult'
}
attribute_map = {
'type': 'type',
'data': 'data'
}
def __init__(self, type=None, data=None, _configuration=None): # noqa: E501
"""JobStatusDataResult - a model defined in Swagger""" # noqa: E501
if _configuration is None:
_configuration = Configuration()
self._configuration = _configuration
self._type = None
self._data = None
self.discriminator = None
self.type = type
if data is not None:
self.data = data
@property
def type(self):
"""Gets the type of this JobStatusDataResult. # noqa: E501
:return: The type of this JobStatusDataResult. # noqa: E501
:rtype: JobResultType
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this JobStatusDataResult.
:param type: The type of this JobStatusDataResult. # noqa: E501
:type: JobResultType
"""
if self._configuration.client_side_validation and type is None:
raise ValueError("Invalid value for `type`, must not be `None`") # noqa: E501
self._type = type
@property
def data(self):
"""Gets the data of this JobStatusDataResult. # noqa: E501
:return: The data of this JobStatusDataResult. # noqa: E501
:rtype: GeneralJobResult
"""
return self._data
@data.setter
def data(self, data):
"""Sets the data of this JobStatusDataResult.
:param data: The data of this JobStatusDataResult. # noqa: E501
:type: GeneralJobResult
"""
self._data = data
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(JobStatusDataResult, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, JobStatusDataResult):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, JobStatusDataResult):
return True
return self.to_dict() != other.to_dict()
| 28.708609 | 220 | 0.582699 |
f70c7c8d3e72df63f8d1547bb698e8f8588b13ff | 3,574 | py | Python | rindr/settings.py | claird160/rindr | 0ab9d77edf6258ab8f304fd4f1c5f92d96ff7a60 | [
"MIT"
] | null | null | null | rindr/settings.py | claird160/rindr | 0ab9d77edf6258ab8f304fd4f1c5f92d96ff7a60 | [
"MIT"
] | null | null | null | rindr/settings.py | claird160/rindr | 0ab9d77edf6258ab8f304fd4f1c5f92d96ff7a60 | [
"MIT"
] | null | null | null | """
Django settings for rindr project.
Generated by 'django-admin startproject' using Django 3.2.8.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.2/ref/settings/
"""
from pathlib import Path
from decouple import config
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = config("SECRET_KEY")
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'type',
'ticket',
'django_bootstrap5',
'jquery',
'dashboard',
'mathfilters',
'BI'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'rindr.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'rindr.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.2/ref/settings/#databases
#DATABASES = {
# 'default': {
# 'ENGINE': 'django.db.backends.sqlite3',
# 'NAME': BASE_DIR / 'db.sqlite3',
# }
#}
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'rindr',
'USER': 'rindr',
'PASSWORD': 'freya',
'HOST': '10.100.102.161',
'PORT': ''
}
}
# Password validation
# https://docs.djangoproject.com/en/3.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.2/howto/static-files/
STATIC_URL = '/static/'
# Default primary key field type
# https://docs.djangoproject.com/en/3.2/ref/settings/#default-auto-field
DEFAULT_AUTO_FIELD = 'django.db.models.BigAutoField'
LOGIN_URL="/login"
| 24.648276 | 91 | 0.678232 |
f70c7cead4990040f286a584fa949ea7edd561a9 | 1,347 | py | Python | apysc/_display/flip_interface_helper.py | ynsnf/apysc | b10ffaf76ec6beb187477d0a744fca00e3efc3fb | [
"MIT"
] | 16 | 2021-04-16T02:01:29.000Z | 2022-01-01T08:53:49.000Z | apysc/_display/flip_interface_helper.py | ynsnf/apysc | b10ffaf76ec6beb187477d0a744fca00e3efc3fb | [
"MIT"
] | 613 | 2021-03-24T03:37:38.000Z | 2022-03-26T10:58:37.000Z | apysc/_display/flip_interface_helper.py | simon-ritchie/apyscript | c319f8ab2f1f5f7fad8d2a8b4fc06e7195476279 | [
"MIT"
] | 2 | 2021-06-20T07:32:58.000Z | 2021-12-26T08:22:11.000Z | """The helper module for the flip interfaces.
"""
from enum import Enum
from apysc._type.boolean import Boolean
class Axis(Enum):
X = 'x'
Y = 'y'
def make_flip_update_expression(
*, before_value: Boolean, after_value: Boolean,
axis: Axis, interface_variable_name: str) -> str:
"""
Make a flipping value updating expression.
Parameters
----------
before_value : Boolean
Before updating flipping value.
after_value : Boolean
After updating flipping value.
axis : Axis
X or y axis value.
interface_variable_name : str
Interface instance variable name.
Returns
-------
expression : str
Made expression string.
"""
from apysc._type import value_util
before_value_str: str = value_util.get_value_str_for_expression(
value=before_value)
after_value_str: str = value_util.get_value_str_for_expression(
value=after_value)
expression: str = (
f'if ({before_value_str}) {{'
f'\n {interface_variable_name}.flip("{axis.value}");'
'\n}'
f'\nif ({after_value_str}) {{'
f'\n {interface_variable_name}.flip("{axis.value}");'
'\n}'
f'\n{before_value_str} = {after_value_str};'
)
return expression
| 26.411765 | 69 | 0.604306 |
f70cb404ff70014a8e6b2a47aa2bfba1304d3c59 | 31,360 | py | Python | hd_recognition/GUI.py | Seledriac/A-small-python-library-for-deep-learning | c041287b04ba217910f621d34c7739365c36ad48 | [
"MIT"
] | 1 | 2020-06-11T05:04:08.000Z | 2020-06-11T05:04:08.000Z | hd_recognition/GUI.py | Seledriac/A-small-python-library-for-deep-learning | c041287b04ba217910f621d34c7739365c36ad48 | [
"MIT"
] | 3 | 2020-06-14T10:26:57.000Z | 2020-06-14T10:37:58.000Z | hd_recognition/GUI.py | Seledriac/A-small-python-library-for-deep-learning | c041287b04ba217910f621d34c7739365c36ad48 | [
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
"""Handwritten digits recognition Graphic interface module : training done with the mnist dataset"""
# Third-party gui/system/plotting Libraries
import numpy as np
import tkinter as tk
import tkinter.font as tkFont
from tkinter import messagebox
from tkinter import filedialog
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from PIL import ImageTk, Image
from PyQt5.QtWidgets import QApplication, QMainWindow, QLabel
from PyQt5.QtGui import QPainter, QPixmap, QPen, QScreen
import pickle
import webbrowser
import os
import sys
sys.path.insert(1, str(os.getcwd()))
# Neural network module
import network
# ------------------------------------------------------------------------------tkinter GUI---------------------------------------------------------------------------------------------
class Interface(tk.Frame):
"""graphic interface class"""
# ------------------------------------------------------------------------------__init__------------------------------------------------------------------------------------------------
def __init__(self, window, **kwargs):
"""Displays the main menu"""
# Fonts
self.big_font_button = tkFont.Font(family='Calibri', size=20, weight='bold')
self.medium_large_font_button = tkFont.Font(family='Calibri', size=16, weight='bold')
self.medium_font_button = tkFont.Font(family='Calibri', size=14, weight='bold')
self.font_title = tkFont.Font(family='Calibri', size=36, weight='bold')
self.number_button_font = tkFont.Font(family='Calibri', size=25, weight='bold')
# Display main menu
self.main_menu(window, **kwargs)
# ------------------------------------------------------------------------------Main Menu Interface--------------------------------------------------------------------------------------
def main_menu(self, window, **kwargs):
"""Main menu Frame"""
# Frame creation
if hasattr(self, 'children'):
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
# Github Button
img_github = ImageTk.PhotoImage(Image.open("hd_recognition/assets/github.jpg").resize((50,50)))
btn_github = tk.Button(self, image=img_github, command=lambda: webbrowser.open("https://github.com/Seledriac/A-small-pedagogic-python-library-for-supervised-neural-networks/"))
btn_github.img = img_github
btn_github.grid(column=0, row=0, padx=50, pady=(0,50))
# Title
title = tk.Label(self, text="Supervised neural networks\n applied to handwritten digits recognition", bg="#fff2f2", font=self.font_title)
title.grid(column=1, row=0, pady=25)
# Readme Button
img_readme = ImageTk.PhotoImage(Image.open("hd_recognition/assets/readme.png").resize((50,50)))
btn_readme = tk.Button(self, image=img_readme, command=lambda: os.startfile("README.md"))
btn_readme.img = img_readme
btn_readme.grid(column=2, row=0, padx=60, pady=(0,50))
# Button selection frame
btns_frames = tk.LabelFrame(self, padx=50, pady=50, borderwidth=5)
btns_frames.grid(row=1, column=1, columnspan=3, pady=(65,80), padx=(0,180))
# Menu Buttons
create_model_button = tk.Button(btns_frames, text="Create a model", font=self.big_font_button, command=lambda: self.create_model(window, **kwargs))
create_model_button.grid(column=0, row=0, padx=10, pady=10)
train_model_button = tk.Button(btns_frames, text="Train a model", font=self.big_font_button, command=lambda: self.train_model(window, **kwargs))
train_model_button.grid(column = 1, row = 0, padx=10, pady=10)
evaluate_button = tk.Button(btns_frames, text="Accuracy Ladder", font=self.big_font_button, command=lambda: self.models_ladder(window, **kwargs))
evaluate_button.grid(column = 0, row = 1, padx=10, pady=10)
predict_button = tk.Button(btns_frames, text="Predict", font=self.big_font_button, command=lambda: self.choose_prediction(window, **kwargs))
predict_button.grid(column = 1, row = 1, padx=10, pady=10)
# ------------------------------------------------------------------------------Model Creation Interface------------------------------------------------------------------------------------
def create_model(self, window, **kwargs):
"""Model creation Frame"""
# Frame creation
self.destroy()
if hasattr(self, 'hidden_layers_label'):
delattr(self, 'hidden_layers_label')
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0)
# Title
title = tk.Label(self, text="Model Creation", bg="#fff2f2", font=self.font_title)
title.grid(column=1, row=0)
# Model Validation frame
model_creation_validation_frame = tk.LabelFrame(self, borderwidth=3)
model_creation_validation_frame.grid(row=0, column=2, pady=(20,0))
model_creation_validation_label = tk.Label(model_creation_validation_frame, text="Model name", font=self.medium_font_button)
model_creation_validation_label.pack()
self.model_creation_validation_entry = tk.Entry(model_creation_validation_frame)
self.model_creation_validation_entry.pack()
model_creation_validation_button = tk.Button(model_creation_validation_frame, text="Create Model", font=self.medium_font_button, command=self.model_creation_validation)
model_creation_validation_button.pack()
# Model customization frame
creation_custom_frame = tk.LabelFrame(self, padx=50, pady=50, borderwidth=5)
creation_custom_frame.grid(row=1, column=0, columnspan=3, pady=(30,0))
# Input layer Frame
input_layer_frame = tk.LabelFrame(creation_custom_frame)
input_layer_frame.grid(row=0, column=0)
input_layer_label = tk.Label(input_layer_frame, text="Input Layer", font=self.medium_font_button)
input_layer_label.pack()
self.input_layer_number = tk.Entry(input_layer_frame)
self.input_layer_number.insert(0,784)
self.input_layer_number.pack()
# Hidden layers Frame
self.hidden_layers = []
self.hidden_layers_frame = tk.LabelFrame(creation_custom_frame)
self.hidden_layers_frame.grid(row=0, column=1)
self.add_hidden_layer()
self.add_hidden_layer()
# Output layer Frame
output_layer_frame = tk.LabelFrame(creation_custom_frame)
output_layer_frame.grid(row=0, column=2, padx=70)
output_layer_label = tk.Label(output_layer_frame, text="Output Layer", font=self.medium_font_button)
output_layer_label.pack()
self.output_layer_number = tk.Entry(output_layer_frame)
self.output_layer_number.insert(0,10)
self.output_layer_number.pack()
# Hidden layer adding/deleting buttons
add_hidden_layer_button = tk.Button(creation_custom_frame, text="Add a hidden layer", font=self.medium_font_button, command=self.add_hidden_layer)
add_hidden_layer_button.grid(column = 0, row = 1, padx=50, pady=40)
del_hidden_layer_button = tk.Button(creation_custom_frame, text="Delete the last hidden layer", font=self.medium_font_button, command=self.del_hidden_layer)
del_hidden_layer_button.grid(column = 1, row = 1, padx=50, pady=40, columnspan=2)
def add_hidden_layer(self):
"""Add a hidden layer in the model creation Frame"""
if not hasattr(self, 'hidden_layers_label'):
self.hidden_layers_label = tk.Label(self.hidden_layers_frame, text="Hidden Layer(s)", font=self.medium_font_button)
self.hidden_layers_label.grid(row=0, column=0, columnspan=10)
if len(self.hidden_layers) < 5:
new_hidden_layer = tk.Scale(self.hidden_layers_frame, from_=1, to=128, length=150)
new_hidden_layer.grid(row=1,column=len(self.hidden_layers), padx=(0,20))
self.hidden_layers.append(new_hidden_layer)
def del_hidden_layer(self):
"""Delete a hidden layer in the model creation Frame"""
if len(self.hidden_layers) > 1:
self.hidden_layers[-1].destroy()
del self.hidden_layers[-1]
elif hasattr(self, 'hidden_layers_label'):
self.hidden_layers[-1].destroy()
del self.hidden_layers[-1]
self.hidden_layers_label.destroy()
delattr(self, 'hidden_layers_label')
def model_creation_validation(self):
"""This method is executed when the model creation validation button is clicked. It creates the model, serlializes it, and shows a recap od the model in a message box to the user"""
model_name = self.model_creation_validation_entry.get()
try:
input_number = int(self.input_layer_number.get())
output_number = int(self.output_layer_number.get())
except ValueError:
messagebox.showerror("Error", "Error : enter a number of neurons for all the layers")
if model_name and input_number and output_number:
sizes = [input_number]
msg = "Model \"{}\" successfully created.\n\nInput layer : {} neurons\n".format(str(self.model_creation_validation_entry.get()), str(input_number))
for i,layer in enumerate(self.hidden_layers):
nb_neurons = int(layer.get())
sizes.append(nb_neurons)
msg = msg + "Hidden layer {} : {} neurons\n".format(str(i + 1), str(nb_neurons))
sizes.append(output_number)
msg = msg + "Output layer : {} neurons\n\nActivation function : sigmoid (by default)".format(str(output_number))
net = network.Network(model_name, sizes)
with open("models/hd_recognition/{}.pickle".format(model_name), "wb") as fic:
pickler = pickle.Pickler(fic)
pickler.dump(net)
messagebox.showinfo("Model Info", msg)
else:
messagebox.showerror("Error", "Error : missing required fields")
# ------------------------------------------------------------------------------Model Training Interface------------------------------------------------------------------------------------
def train_model(self, window, **kwargs):
"""Model training specs Frame"""
# Frame creation
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
# Chosing the model which we will train
self.open_model_file()
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0, padx=(25,0))
# Title
title = tk.Label(self, text="Model Training\n(mnist dataset)", bg="#fff2f2", font=self.font_title)
title.grid(column=1, row=0, pady=80, padx=(200,0))
# Model training validation frame
model_training_validation_frame = tk.LabelFrame(self, borderwidth=3)
model_training_validation_frame.grid(row=0, column=2, padx=(200,0), pady=(10,0))
model_training_validation_button = tk.Button(model_training_validation_frame, text="Train", font=self.medium_large_font_button, command=lambda: self.model_training(window, **kwargs))
model_training_validation_button.pack()
# Model training customization frame
training_custom_frame = tk.LabelFrame(self, padx=50, pady=50, borderwidth=5)
training_custom_frame.grid(row=1, column=0, columnspan=100, padx=(0,15))
# Epochs Frame
epochs_frame = tk.LabelFrame(training_custom_frame)
epochs_frame.grid(row=0, column=0)
epochs_label = tk.Label(epochs_frame, text="Epochs", font=self.medium_font_button)
epochs_label.pack()
self.epochs_number = tk.Entry(epochs_frame)
self.epochs_number.insert(0,3)
self.epochs_number.pack()
# Batch size Frame
batch_size_frame = tk.LabelFrame(training_custom_frame)
batch_size_frame.grid(row=0, column=2, padx=70)
batch_size_label = tk.Label(batch_size_frame, text="batch size", font=self.medium_font_button)
batch_size_label.pack()
self.batch_size_number = tk.Entry(batch_size_frame)
self.batch_size_number.insert(0,10)
self.batch_size_number.pack()
# Display weights checkbox
display_weights_frame = tk.LabelFrame(training_custom_frame)
display_weights_frame.grid(row=0, column=3)
self.display_weights_value = tk.IntVar()
display_weights_cb = tk.Checkbutton(display_weights_frame, text="Dynamically display the weights of the first layer", font=self.medium_font_button, variable=self.display_weights_value)
display_weights_cb.pack()
def model_training(self, window, **kwargs):
"""Model training Frame"""
# Training values retrieving
disp_weights = bool(self.display_weights_value.get())
try:
epochs = int(self.epochs_number.get())
batch_size = int(self.batch_size_number.get())
except ValueError:
messagebox.showerror("Error", "Error : please enter a numeric value for each field")
if epochs and batch_size:
# Frame creation
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0)
# Training trigger button
doIt = tk.Button(self, text="Start the Training", command=lambda: self.start_training(epochs, batch_size, disp_weights), font=self.big_font_button)
doIt.grid(row=0, column=1, pady=20)
# Training logs textbox
textbox_frame = tk.LabelFrame(self)
textbox_frame.grid(row=1, column=0, columnspan=2)
self.output = tk.Text(textbox_frame, width=110, height=30, bg='black', fg='white')
self.output.pack(side=tk.LEFT)
# Scrollbar
scrollbar = tk.Scrollbar(textbox_frame, orient="vertical", command = self.output.yview)
scrollbar.pack(side=tk.RIGHT, fill="y")
self.output['yscrollcommand'] = scrollbar.set
self.pack()
else:
messagebox.showerror("Error", "Error : missing required fields")
def start_training(self, epochs, batch_size, disp_weights):
"""This method executes the SGD training method on a given model"""
# Importing the mnist dataset
import mnist_loader
training_data, validation_data, test_data = mnist_loader.load_data_wrapper()
training_data = list(training_data)
validation_data = list(validation_data)
test_data = list(test_data)
# Model training via SGD
net = self.model_file
self.output.insert(tk.END, "\n" + str(net) + "\n")
self.update_idletasks()
net.SGD(training_data, epochs, batch_size, test_data=test_data, display_weights=disp_weights, gui=self)
# Model saving
with open("models/hd_recognition/{}.pickle".format(net.id), "wb") as saving:
saver = pickle.Pickler(saving)
saver.dump(net)
# Performance test of the network on the validation data
accuracy = str(100 * net.evaluate(validation_data) / 10000)
self.output.insert(tk.END, "\nTest on the validation data -> Accuracy : {0}%\n".format(accuracy))
self.update_idletasks()
self.output.see("end")
# Ladder update
with open("models/hd_recognition/accuracy_ladder.md", "a") as ladder:
adding = str(net) + " --> accuracy = " + accuracy + "\n"
ladder.write(adding)
with open("models/hd_recognition/accuracy_ladder.md", "r") as ladder:
shove_percent = ladder.read().replace("%", "")
content = [net.split("= ") for net in shove_percent.split('\n')]
content.pop()
content_updated = sorted([(acc,net) for net,acc in content], reverse = True)
tostring = "%\n".join(["= ".join((net,acc)) for acc,net in content_updated]) + "%\n"
with open("models/hd_recognition/accuracy_ladder.md", "w") as ladder:
ladder.write(tostring)
# ------------------------------------------------------------------------------Models Ladder Interface------------------------------------------------------------------------------------
def models_ladder(self, window, **kwargs):
"""Models ladder frame"""
# Frame creation
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0)
# Ladder label
ladder_label = tk.Label(self, text="Models Accuracy Ladder", font=self.font_title, bg="#fff2f2")
ladder_label.grid(row=0, column=1, padx=(0,150), pady=20)
# Ladder textbox
textbox_frame = tk.LabelFrame(self)
textbox_frame.grid(row=1, column=0, columnspan=2)
output = tk.Text(textbox_frame, width=100, height=20, font=self.medium_font_button)
output.pack(side=tk.LEFT)
with open("models/hd_recognition/accuracy_ladder.md", "r") as ladder:
content = ladder.read()
output.insert(tk.END, content)
self.update_idletasks()
output.see("end")
# Scrollbar
scrollbar = tk.Scrollbar(textbox_frame, orient="vertical", command = output.yview)
scrollbar.pack(side=tk.RIGHT, fill="y")
output['yscrollcommand'] = scrollbar.set
self.pack()
# ------------------------------------------------------------------------------Prediction Interface---------------------------------------------------------------------------------------
def choose_prediction(self, window, **kwargs):
"""Prediction style choice frame"""
# Frame creation
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
# Opening the model which will predict
self.open_model_file()
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0, padx=(0,125), pady=(15,100))
# Ladder label
choice_label = tk.Label(self, text="Choose the prediction style", font=self.font_title, bg="#fff2f2")
choice_label.grid(row=0, column=1, columnspan=10, padx=(50,250), pady=50)
# Choice buttons
choice_custom = tk.Button(self, text="Predict with custom test images", font=self.big_font_button, command=lambda: self.custom_prediction_frame(window, **kwargs))
choice_custom.grid(row=1, column=1, padx=(0,0), pady=(100))
choice_live = tk.Button(self, text="Live prediction", font=self.big_font_button, command=lambda: self.live_prediction_frame(window, **kwargs))
choice_live.grid(row=1, column=2, padx=(50,200), pady=(100))
def custom_prediction_frame(self, window, **kwargs):
"""Custom images prediction frame"""
# Frame creation
self.destroy()
tk.Frame.__init__(self, window, width=1180, height=620, bg="#fff2f2", **kwargs)
self.pack()
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0, pady=(10,30))
# Title label
title_label = tk.Label(self, text="Custom images prediction\nChoose the number to predict", font=self.number_button_font, bg="#fff2f2")
title_label.grid(row=0, column=1, columnspan=2, padx=(0,150), pady=10)
# Number buttons Frame
number_buttons_frame = tk.LabelFrame(self, borderwidth=3, bg='white', pady=10)
number_buttons_frame.grid(row=1,column=1, columnspan=2, padx=(0,150))
# Number buttons
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="0", command=lambda: self.number_button_click(0))
btn_home.grid(column=0, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="1", command=lambda: self.number_button_click(1))
btn_home.grid(column=1, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="2", command=lambda: self.number_button_click(2))
btn_home.grid(column=2, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="3", command=lambda: self.number_button_click(3))
btn_home.grid(column=3, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="4", command=lambda: self.number_button_click(4))
btn_home.grid(column=4, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="5", command=lambda: self.number_button_click(5))
btn_home.grid(column=5, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="6", command=lambda: self.number_button_click(6))
btn_home.grid(column=6, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="7", command=lambda: self.number_button_click(7))
btn_home.grid(column=7, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="8", command=lambda: self.number_button_click(8))
btn_home.grid(column=8, row=1, padx=15)
btn_home = tk.Button(number_buttons_frame, font=self.number_button_font, text="9", command=lambda: self.number_button_click(9))
btn_home.grid(column=9, row=1, padx=15)
def number_button_click(self, number):
"""This method is executed when a number button is clicked. It displays the model's prediction on a matplotlib figure"""
# Opening the corresponding custom image
img_filename_bmp = "hd_recognition/custom_test_images/test_image_"+str(number)+".bmp"
test_image = Image.open(img_filename_bmp)
# Predicting based on the custom image
image_array = 1 - (np.array(test_image).reshape(784,1) / 255)
model_activations = self.model_file.feedforward(image_array)
# Custom image display
img_filename_png = "hd_recognition/custom_test_images/test_image_"+str(number)+".png"
custom_image = ImageTk.PhotoImage(Image.open(img_filename_png))
custom_image_label = tk.Label(self, image=custom_image, relief='ridge')
custom_image_label.image=custom_image
custom_image_label.grid(row=2, column=1, padx=10, pady=(5,5))
# Prediction plot frame
prediction_frame = tk.LabelFrame(self)
prediction_frame.grid(row=2,column=2, padx=(10,150), pady=(5,5))
# Plotting the model activations
self.plot_model_activation(model_activations, prediction_frame)
def live_prediction_frame(self, window, **kwargs):
"""Live prediction of the numbers drew by the user"""
# Frame creation
self.destroy()
window.geometry("1500x800")
tk.Frame.__init__(self, window, width=1500, height=800, bg="#fff2f2", **kwargs)
self.pack()
# Main menu Button
img_home = ImageTk.PhotoImage(Image.open("hd_recognition/assets/home.png").resize((95,50)))
btn_home = tk.Button(self, image=img_home, command=lambda: self.main_menu(window, **kwargs))
btn_home.img = img_home
btn_home.grid(column=0, row=0, padx=100)
# Title
title = tk.Label(self, text="Live prediction\nDraw the number to predict", bg="#fff2f2", font=self.font_title)
title.grid(column=1, row=0, pady=80)
# Start button frame
live_prediction_starting_frame = tk.LabelFrame(self, borderwidth=3)
live_prediction_starting_frame.grid(row=0, column=2, padx=100)
live_prediction_starting_button = tk.Button(live_prediction_starting_frame, text="Start", font=self.medium_large_font_button, command=lambda: self.start_live_prediction(window))
live_prediction_starting_button.pack()
def start_live_prediction(self, window):
"""Live prediction Qt drawing window display"""
# DrawingWindow creation
App = QApplication(sys.argv)
QtWindow = DrawingWindow(App, self)
QtWindow.setWindowTitle("Digit drawing window")
QtWindow.show()
sys.exit(App.exec())
# ------------------------------------------------------------------------------Miscellaneous Methods--------------------------------------------------------------------------------------
def open_model_file(self):
"""Prompts the user to choose a model file"""
re = True
while re:
try:
# Model file opening prompt
self.model_filename = filedialog.askopenfilename(initialdir="models/hd_recognition", title="Choose the model", filetypes=(("pickle files","*.pickle"), ("model files","*.model"), ("all files", "*.*")))
assert self.model_filename
re = False
except:
messagebox.showerror("Error", "Error : please select a model file")
with open(self.model_filename, "rb") as fic:
unpickler = pickle.Unpickler(fic)
self.model_file = unpickler.load()
def plot_model_activation(self, model_activations, frame):
"""Plots the current model activations in a given frame (in a prediction context)"""
fig = Figure(figsize = (4, 4))
fig.clf()
fig.add_subplot(111).plot(range(10), model_activations)
fig.suptitle("corresponding model activations")
axes = fig.gca()
axes.set_xlabel("digit")
axes.set_ylabel("activation")
axes.set_ylim([0, 1])
axes.set_xticks(range(10))
axes.set_yticks(np.array(range(11))/10)
canvas = FigureCanvasTkAgg(fig, master=frame)
canvas.draw()
canvas.flush_events()
canvas.get_tk_widget().grid(row=0, column=1)
self.annot_max(range(10), model_activations, axes)
def annot_max(x, y, ax):
"""Max network activation anotation for a number image"""
xmax = x[np.argmax(y)]
ymax = y.max()
text = "digit = {}, activation = {:.3f}".format(xmax,ymax)
if xmax <= 4:
orientation = str((1 / abs(5 - (xmax + 1))) / 10)
else:
orientation = str(-(1 / abs(5 - (xmax + 1))) / 10)
bbox_props = dict(boxstyle="square,pad=0.3", fc="w", ec="k", lw=1)
arrowprops=dict(arrowstyle="-|>",connectionstyle="arc3,rad="+orientation)
kw = dict(xycoords='data',textcoords="axes fraction",
arrowprops=arrowprops, bbox=bbox_props, ha="right", va="top")
# ax.annotate(text, xy=(xmax, ymax), xytext=(xmax/10 - 0.1, ymax - 0.1), **kw)
ax.annotate(text, xy=(xmax, ymax), xytext=(0.8, 0.5), **kw)
annot_max = staticmethod(annot_max)
# ------------------------------------------------------------------------------PyQt drawing window----------------------------------------------------------------------------------------
class DrawingWindow(QMainWindow):
"""Drawing window for live model prediction"""
def __init__(self, App, tkinter_root):
"""Initialization of the Drawing Window : we create a label centered in the window, in which we put a blank pixmap"""
super().__init__()
self.label = QLabel()
self.blank()
self.setCentralWidget(self.label)
self.App = App
self.tkinter_root = tkinter_root
self.last_x, self.last_y = None, None
def blank(self):
"""This method clears the QtWindow, setting the content of the centered label to a white pixmap"""
self.label.setPixmap(QPixmap("hd_recognition/assets/white.png"))
def mouseMoveEvent(self, e):
"""This method is executed while the click button is held"""
if self.last_x is None:
self.last_x = e.x()
self.last_y = e.y()
return
painter = QPainter(self.label.pixmap())
painter.drawLine(self.last_x, self.last_y, e.x(), e.y())
painter.end()
self.update()
# Updating the origin for next time
self.last_x = e.x()
self.last_y = e.y()
# Saving the screenshot and compressing it to a 28x28 image
QScreen.grabWindow(self.App.primaryScreen(), self.winId()).save("hd_recognition/tmp/screenshot.png", 'png')
resize_img = Image.open("hd_recognition/tmp/screenshot.png")
resize_img = resize_img.resize((28,28))
resize_img.save("hd_recognition/tmp/screenshot.png", 'png')
# Converting from standard png to greyscale
img_array = np.array(Image.open("hd_recognition/tmp/screenshot.png"))
img_array = np.array([[pixel[0] for pixel in line] for line in img_array])
image_array = 1 - (img_array.reshape(784,1) / 255)
# Predicting the number
model_activations = self.tkinter_root.model_file.feedforward(image_array)
# Prediction plot frame
prediction_frame = tk.LabelFrame(self.tkinter_root)
prediction_frame.grid(row=2,column=2)
# Plotting the model activations
self.tkinter_root.plot_model_activation(model_activations, prediction_frame)
def mouseReleaseEvent(self, e):
self.last_x = None
self.last_y = None
# -----------------------------------------------------------------------------Tkinter Window creation-------------------------------------------------------------------------------------
window = tk.Tk()
window.geometry("1180x620")
window.title("Neural Networks")
window.configure(bg="#fff2f2")
interface = Interface(window)
interface.mainloop()
| 48.395062 | 216 | 0.625797 |
f70cbee84274bde26aeddaa7ebed722c81efeeab | 22,746 | py | Python | sdk/storage/azure-storage-blob/azure/storage/blob/_shared/uploads.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/storage/azure-storage-blob/azure/storage/blob/_shared/uploads.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/storage/azure-storage-blob/azure/storage/blob/_shared/uploads.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=no-self-use
from concurrent import futures
from io import (BytesIO, IOBase, SEEK_CUR, SEEK_END, SEEK_SET, UnsupportedOperation)
from threading import Lock
from itertools import islice
from math import ceil
import six
from azure.core.tracing.common import with_current_context
from . import encode_base64, url_quote
from .request_handlers import get_length
from .response_handlers import return_response_headers
from .encryption import get_blob_encryptor_and_padder
_LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE = 4 * 1024 * 1024
_ERROR_VALUE_SHOULD_BE_SEEKABLE_STREAM = "{0} should be a seekable file-like/io.IOBase type stream object."
def _parallel_uploads(executor, uploader, pending, running):
range_ids = []
while True:
# Wait for some download to finish before adding a new one
done, running = futures.wait(running, return_when=futures.FIRST_COMPLETED)
range_ids.extend([chunk.result() for chunk in done])
try:
for _ in range(0, len(done)):
next_chunk = next(pending)
running.add(executor.submit(with_current_context(uploader), next_chunk))
except StopIteration:
break
# Wait for the remaining uploads to finish
done, _running = futures.wait(running)
range_ids.extend([chunk.result() for chunk in done])
return range_ids
def upload_data_chunks(
service=None,
uploader_class=None,
total_size=None,
chunk_size=None,
max_concurrency=None,
stream=None,
validate_content=None,
encryption_options=None,
progress_hook=None,
**kwargs):
if encryption_options:
encryptor, padder = get_blob_encryptor_and_padder(
encryption_options.get('cek'),
encryption_options.get('vector'),
uploader_class is not PageBlobChunkUploader)
kwargs['encryptor'] = encryptor
kwargs['padder'] = padder
parallel = max_concurrency > 1
if parallel and 'modified_access_conditions' in kwargs:
# Access conditions do not work with parallelism
kwargs['modified_access_conditions'] = None
uploader = uploader_class(
service=service,
total_size=total_size,
chunk_size=chunk_size,
stream=stream,
parallel=parallel,
validate_content=validate_content,
progress_hook=progress_hook,
**kwargs)
if parallel:
with futures.ThreadPoolExecutor(max_concurrency) as executor:
upload_tasks = uploader.get_chunk_streams()
running_futures = [
executor.submit(with_current_context(uploader.process_chunk), u)
for u in islice(upload_tasks, 0, max_concurrency)
]
range_ids = _parallel_uploads(executor, uploader.process_chunk, upload_tasks, running_futures)
else:
range_ids = [uploader.process_chunk(result) for result in uploader.get_chunk_streams()]
if any(range_ids):
return [r[1] for r in sorted(range_ids, key=lambda r: r[0])]
return uploader.response_headers
def upload_substream_blocks(
service=None,
uploader_class=None,
total_size=None,
chunk_size=None,
max_concurrency=None,
stream=None,
progress_hook=None,
**kwargs):
parallel = max_concurrency > 1
if parallel and 'modified_access_conditions' in kwargs:
# Access conditions do not work with parallelism
kwargs['modified_access_conditions'] = None
uploader = uploader_class(
service=service,
total_size=total_size,
chunk_size=chunk_size,
stream=stream,
parallel=parallel,
progress_hook=progress_hook,
**kwargs)
if parallel:
with futures.ThreadPoolExecutor(max_concurrency) as executor:
upload_tasks = uploader.get_substream_blocks()
running_futures = [
executor.submit(with_current_context(uploader.process_substream_block), u)
for u in islice(upload_tasks, 0, max_concurrency)
]
range_ids = _parallel_uploads(executor, uploader.process_substream_block, upload_tasks, running_futures)
else:
range_ids = [uploader.process_substream_block(b) for b in uploader.get_substream_blocks()]
if any(range_ids):
return sorted(range_ids)
return []
class _ChunkUploader(object): # pylint: disable=too-many-instance-attributes
def __init__(
self, service,
total_size,
chunk_size,
stream,
parallel,
encryptor=None,
padder=None,
progress_hook=None,
**kwargs):
self.service = service
self.total_size = total_size
self.chunk_size = chunk_size
self.stream = stream
self.parallel = parallel
# Stream management
self.stream_start = stream.tell() if parallel else None
self.stream_lock = Lock() if parallel else None
# Progress feedback
self.progress_total = 0
self.progress_lock = Lock() if parallel else None
self.progress_hook = progress_hook
# Encryption
self.encryptor = encryptor
self.padder = padder
self.response_headers = None
self.etag = None
self.last_modified = None
self.request_options = kwargs
def get_chunk_streams(self):
index = 0
while True:
data = b""
read_size = self.chunk_size
# Buffer until we either reach the end of the stream or get a whole chunk.
while True:
if self.total_size:
read_size = min(self.chunk_size - len(data), self.total_size - (index + len(data)))
temp = self.stream.read(read_size)
if not isinstance(temp, six.binary_type):
raise TypeError("Blob data should be of type bytes.")
data += temp or b""
# We have read an empty string and so are at the end
# of the buffer or we have read a full chunk.
if temp == b"" or len(data) == self.chunk_size:
break
if len(data) == self.chunk_size:
if self.padder:
data = self.padder.update(data)
if self.encryptor:
data = self.encryptor.update(data)
yield index, data
else:
if self.padder:
data = self.padder.update(data) + self.padder.finalize()
if self.encryptor:
data = self.encryptor.update(data) + self.encryptor.finalize()
if data:
yield index, data
break
index += len(data)
def process_chunk(self, chunk_data):
chunk_bytes = chunk_data[1]
chunk_offset = chunk_data[0]
return self._upload_chunk_with_progress(chunk_offset, chunk_bytes)
def _update_progress(self, length):
if self.progress_lock is not None:
with self.progress_lock:
self.progress_total += length
else:
self.progress_total += length
if self.progress_hook:
self.progress_hook(self.progress_total, self.total_size)
def _upload_chunk(self, chunk_offset, chunk_data):
raise NotImplementedError("Must be implemented by child class.")
def _upload_chunk_with_progress(self, chunk_offset, chunk_data):
range_id = self._upload_chunk(chunk_offset, chunk_data)
self._update_progress(len(chunk_data))
return range_id
def get_substream_blocks(self):
assert self.chunk_size is not None
lock = self.stream_lock
blob_length = self.total_size
if blob_length is None:
blob_length = get_length(self.stream)
if blob_length is None:
raise ValueError("Unable to determine content length of upload data.")
blocks = int(ceil(blob_length / (self.chunk_size * 1.0)))
last_block_size = self.chunk_size if blob_length % self.chunk_size == 0 else blob_length % self.chunk_size
for i in range(blocks):
index = i * self.chunk_size
length = last_block_size if i == blocks - 1 else self.chunk_size
yield index, SubStream(self.stream, index, length, lock)
def process_substream_block(self, block_data):
return self._upload_substream_block_with_progress(block_data[0], block_data[1])
def _upload_substream_block(self, index, block_stream):
raise NotImplementedError("Must be implemented by child class.")
def _upload_substream_block_with_progress(self, index, block_stream):
range_id = self._upload_substream_block(index, block_stream)
self._update_progress(len(block_stream))
return range_id
def set_response_properties(self, resp):
self.etag = resp.etag
self.last_modified = resp.last_modified
class BlockBlobChunkUploader(_ChunkUploader):
def __init__(self, *args, **kwargs):
kwargs.pop("modified_access_conditions", None)
super(BlockBlobChunkUploader, self).__init__(*args, **kwargs)
self.current_length = None
def _upload_chunk(self, chunk_offset, chunk_data):
# TODO: This is incorrect, but works with recording.
index = '{0:032d}'.format(chunk_offset)
block_id = encode_base64(url_quote(encode_base64(index)))
self.service.stage_block(
block_id,
len(chunk_data),
chunk_data,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
return index, block_id
def _upload_substream_block(self, index, block_stream):
try:
block_id = 'BlockId{}'.format("%05d" % (index/self.chunk_size))
self.service.stage_block(
block_id,
len(block_stream),
block_stream,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
finally:
block_stream.close()
return block_id
class PageBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def _is_chunk_empty(self, chunk_data):
# read until non-zero byte is encountered
# if reached the end without returning, then chunk_data is all 0's
return not any(bytearray(chunk_data))
def _upload_chunk(self, chunk_offset, chunk_data):
# avoid uploading the empty pages
if not self._is_chunk_empty(chunk_data):
chunk_end = chunk_offset + len(chunk_data) - 1
content_range = "bytes={0}-{1}".format(chunk_offset, chunk_end)
computed_md5 = None
self.response_headers = self.service.upload_pages(
body=chunk_data,
content_length=len(chunk_data),
transactional_content_md5=computed_md5,
range=content_range,
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
if not self.parallel and self.request_options.get('modified_access_conditions'):
self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
def _upload_substream_block(self, index, block_stream):
pass
class AppendBlobChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def __init__(self, *args, **kwargs):
super(AppendBlobChunkUploader, self).__init__(*args, **kwargs)
self.current_length = None
def _upload_chunk(self, chunk_offset, chunk_data):
if self.current_length is None:
self.response_headers = self.service.append_block(
body=chunk_data,
content_length=len(chunk_data),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
self.current_length = int(self.response_headers["blob_append_offset"])
else:
self.request_options['append_position_access_conditions'].append_position = \
self.current_length + chunk_offset
self.response_headers = self.service.append_block(
body=chunk_data,
content_length=len(chunk_data),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
def _upload_substream_block(self, index, block_stream):
pass
class DataLakeFileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def _upload_chunk(self, chunk_offset, chunk_data):
# avoid uploading the empty pages
self.response_headers = self.service.append_data(
body=chunk_data,
position=chunk_offset,
content_length=len(chunk_data),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
if not self.parallel and self.request_options.get('modified_access_conditions'):
self.request_options['modified_access_conditions'].if_match = self.response_headers['etag']
def _upload_substream_block(self, index, block_stream):
try:
self.service.append_data(
body=block_stream,
position=index,
content_length=len(block_stream),
cls=return_response_headers,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
finally:
block_stream.close()
class FileChunkUploader(_ChunkUploader): # pylint: disable=abstract-method
def _upload_chunk(self, chunk_offset, chunk_data):
length = len(chunk_data)
chunk_end = chunk_offset + length - 1
response = self.service.upload_range(
chunk_data,
chunk_offset,
length,
data_stream_total=self.total_size,
upload_stream_current=self.progress_total,
**self.request_options
)
return 'bytes={0}-{1}'.format(chunk_offset, chunk_end), response
# TODO: Implement this method.
def _upload_substream_block(self, index, block_stream):
pass
class SubStream(IOBase):
def __init__(self, wrapped_stream, stream_begin_index, length, lockObj):
# Python 2.7: file-like objects created with open() typically support seek(), but are not
# derivations of io.IOBase and thus do not implement seekable().
# Python > 3.0: file-like objects created with open() are derived from io.IOBase.
try:
# only the main thread runs this, so there's no need grabbing the lock
wrapped_stream.seek(0, SEEK_CUR)
except:
raise ValueError("Wrapped stream must support seek().")
self._lock = lockObj
self._wrapped_stream = wrapped_stream
self._position = 0
self._stream_begin_index = stream_begin_index
self._length = length
self._buffer = BytesIO()
# we must avoid buffering more than necessary, and also not use up too much memory
# so the max buffer size is capped at 4MB
self._max_buffer_size = (
length if length < _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE else _LARGE_BLOB_UPLOAD_MAX_READ_BUFFER_SIZE
)
self._current_buffer_start = 0
self._current_buffer_size = 0
super(SubStream, self).__init__()
def __len__(self):
return self._length
def close(self):
if self._buffer:
self._buffer.close()
self._wrapped_stream = None
IOBase.close(self)
def fileno(self):
return self._wrapped_stream.fileno()
def flush(self):
pass
def read(self, size=None):
if self.closed: # pylint: disable=using-constant-test
raise ValueError("Stream is closed.")
if size is None:
size = self._length - self._position
# adjust if out of bounds
if size + self._position >= self._length:
size = self._length - self._position
# return fast
if size == 0 or self._buffer.closed:
return b""
# attempt first read from the read buffer and update position
read_buffer = self._buffer.read(size)
bytes_read = len(read_buffer)
bytes_remaining = size - bytes_read
self._position += bytes_read
# repopulate the read buffer from the underlying stream to fulfill the request
# ensure the seek and read operations are done atomically (only if a lock is provided)
if bytes_remaining > 0:
with self._buffer:
# either read in the max buffer size specified on the class
# or read in just enough data for the current block/sub stream
current_max_buffer_size = min(self._max_buffer_size, self._length - self._position)
# lock is only defined if max_concurrency > 1 (parallel uploads)
if self._lock:
with self._lock:
# reposition the underlying stream to match the start of the data to read
absolute_position = self._stream_begin_index + self._position
self._wrapped_stream.seek(absolute_position, SEEK_SET)
# If we can't seek to the right location, our read will be corrupted so fail fast.
if self._wrapped_stream.tell() != absolute_position:
raise IOError("Stream failed to seek to the desired location.")
buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
else:
absolute_position = self._stream_begin_index + self._position
# It's possible that there's connection problem during data transfer,
# so when we retry we don't want to read from current position of wrapped stream,
# instead we should seek to where we want to read from.
if self._wrapped_stream.tell() != absolute_position:
self._wrapped_stream.seek(absolute_position, SEEK_SET)
buffer_from_stream = self._wrapped_stream.read(current_max_buffer_size)
if buffer_from_stream:
# update the buffer with new data from the wrapped stream
# we need to note down the start position and size of the buffer, in case seek is performed later
self._buffer = BytesIO(buffer_from_stream)
self._current_buffer_start = self._position
self._current_buffer_size = len(buffer_from_stream)
# read the remaining bytes from the new buffer and update position
second_read_buffer = self._buffer.read(bytes_remaining)
read_buffer += second_read_buffer
self._position += len(second_read_buffer)
return read_buffer
def readable(self):
return True
def readinto(self, b):
raise UnsupportedOperation
def seek(self, offset, whence=0):
if whence is SEEK_SET:
start_index = 0
elif whence is SEEK_CUR:
start_index = self._position
elif whence is SEEK_END:
start_index = self._length
offset = -offset
else:
raise ValueError("Invalid argument for the 'whence' parameter.")
pos = start_index + offset
if pos > self._length:
pos = self._length
elif pos < 0:
pos = 0
# check if buffer is still valid
# if not, drop buffer
if pos < self._current_buffer_start or pos >= self._current_buffer_start + self._current_buffer_size:
self._buffer.close()
self._buffer = BytesIO()
else: # if yes seek to correct position
delta = pos - self._current_buffer_start
self._buffer.seek(delta, SEEK_SET)
self._position = pos
return pos
def seekable(self):
return True
def tell(self):
return self._position
def write(self):
raise UnsupportedOperation
def writelines(self):
raise UnsupportedOperation
def writeable(self):
return False
class IterStreamer(object):
"""
File-like streaming iterator.
"""
def __init__(self, generator, encoding="UTF-8"):
self.generator = generator
self.iterator = iter(generator)
self.leftover = b""
self.encoding = encoding
def __len__(self):
return self.generator.__len__()
def __iter__(self):
return self.iterator
def seekable(self):
return False
def __next__(self):
return next(self.iterator)
next = __next__ # Python 2 compatibility.
def tell(self, *args, **kwargs):
raise UnsupportedOperation("Data generator does not support tell.")
def seek(self, *args, **kwargs):
raise UnsupportedOperation("Data generator is unseekable.")
def read(self, size):
data = self.leftover
count = len(self.leftover)
try:
while count < size:
chunk = self.__next__()
if isinstance(chunk, six.text_type):
chunk = chunk.encode(self.encoding)
data += chunk
count += len(chunk)
# This means count < size and what's leftover will be returned in this call.
except StopIteration:
self.leftover = b""
if count >= size:
self.leftover = data[size:]
return data[:size]
| 36.628019 | 116 | 0.622483 |
f70d082602ad18b88dc46b1a347ca8e1149f97e1 | 1,754 | py | Python | src/main/py/com/example/utils/commons.py | brijeshdhaker/spark-python-examples | bb3504d21c073448c336c228f74449de68853b8d | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-07-18T16:23:56.000Z | 2021-07-18T16:23:56.000Z | src/main/py/com/example/utils/commons.py | brijeshdhaker/spark-python-examples | bb3504d21c073448c336c228f74449de68853b8d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/main/py/com/example/utils/commons.py | brijeshdhaker/spark-python-examples | bb3504d21c073448c336c228f74449de68853b8d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | #
#
#
import re
import random
import time
COMMA_DELIMITER_1 = ',(?=([^"]*"[^"]*")*[^"]*$)'
COMMA_DELIMITER_2 = ',(?=([^"\\]*"\\[^"\\]*"\\)*[^"\\]*$)'
#
#
def print_separator():
print(" " * 30)
print(" #" * 30)
print(" " * 30)
#
# line2 = '1;"Goroka";"Goroka";"Papua New Guinea";"GKA";"AYGA";-6.081689;145.391881;5282;10;"U";"Pacific/Port_Moresby"'
# records = commons.split_csv(";", line2)
# print(float(records[6]) > 40)
#
#
def split_csv(d, x):
splits = re.split(r"{}".format(d), x)
return splits
#
# line = '1,"Goroka","Goroka","Papua New Guinea","GKA","AYGA",-6.081689,145.391881,5282,10,"U","Pacific/Port_Moresby"'
# cols = commons.split_csv_line(line)
#
def split_csv_line(line):
cols = re.split(r",(?![^(]*?\))\s*", line)
return cols
def str_time_prop(start, end, time_format, prop):
"""Get a time at a proportion of a range of two formatted times.
start and end should be strings specifying times formatted in the
given format (strftime-style), giving an interval [start, end].
prop specifies how a proportion of the interval to be taken after
start. The returned time will be in the specified format.
"""
stime = time.mktime(time.strptime(start, time_format))
etime = time.mktime(time.strptime(end, time_format))
ptime = stime + prop * (etime - stime)
return time.strftime(time_format, time.localtime(ptime))
def random_date(start, end, prop):
return str_time_prop(start, end, '%m/%d/%Y %I:%M %p', prop)
# We can test function by calling it.
if __name__ == "__main__":
line = '1,"Goroka","Goroka","Papua New Guinea","GKA","AYGA",-6.081689,145.391881,5282,10,"U","Pacific/Port_Moresby"'
cols = split_csv_line(line)
records = split_csv(",", line)
| 27.40625 | 120 | 0.63455 |
f70d1d8f1773a6a94c07b1b5735441fec456b1d2 | 1,840 | py | Python | tests/endtoend/dependency_functions/report_dependencies/__init__.py | anandagopal6/azure-functions-python-worker | e4adb351e5454c093fcefbf0fb84f200af32f386 | [
"MIT"
] | 277 | 2018-01-25T23:13:03.000Z | 2022-02-22T06:12:04.000Z | tests/endtoend/dependency_functions/report_dependencies/__init__.py | anandagopal6/azure-functions-python-worker | e4adb351e5454c093fcefbf0fb84f200af32f386 | [
"MIT"
] | 731 | 2018-01-18T18:54:38.000Z | 2022-03-29T00:01:46.000Z | tests/endtoend/dependency_functions/report_dependencies/__init__.py | anandagopal6/azure-functions-python-worker | e4adb351e5454c093fcefbf0fb84f200af32f386 | [
"MIT"
] | 109 | 2018-01-18T02:22:57.000Z | 2022-02-15T18:59:54.000Z | import sys
import os
import json
import azure.functions as func
import google.protobuf as proto
import grpc
# Load dependency manager from customer' context
from azure_functions_worker.utils.dependency import DependencyManager as dm
def main(req: func.HttpRequest) -> func.HttpResponse:
"""This function is an HttpTrigger to check if the modules are loaded from
customer's dependencies. We have mock a .python_packages/ folder in
this e2e test function app which contains the following stub package:
azure.functions==1.2.1
protobuf==3.9.0
grpc==1.35.0
If the version we check is the same as the one in local .python_packages/,
that means the isolate worker dependencies are working as expected.
"""
result = {
"sys.path": list(sys.path),
"dependency_manager": {
"cx_deps_path": dm._get_cx_deps_path(),
"cx_working_dir": dm._get_cx_working_dir(),
"worker_deps_path": dm._get_worker_deps_path(),
},
"libraries": {
"func.expected.version": "1.2.1",
"func.version": func.__version__,
"func.file": func.__file__,
"proto.expected.version": "3.9.0",
"proto.version": proto.__version__,
"proto.file": proto.__file__,
"grpc.expected.version": "1.35.0",
"grpc.version": grpc.__version__,
"grpc.file": grpc.__file__,
},
"environments": {
"PYTHON_ISOLATE_WORKER_DEPENDENCIES": (
os.getenv('PYTHON_ISOLATE_WORKER_DEPENDENCIES')
),
"AzureWebJobsScriptRoot": os.getenv('AzureWebJobsScriptRoot'),
"PYTHONPATH": os.getenv('PYTHONPATH'),
"HOST_VERSION": os.getenv('HOST_VERSION')
}
}
return func.HttpResponse(json.dumps(result))
| 35.384615 | 78 | 0.633152 |
f70d2f11b4758a9155d2ff127345b99d02d4e582 | 4,519 | py | Python | tensormonk/layers/routingcapsule.py | Tensor46/TensorMONK | 67617d3fdf8fde072ba9cab42de7d67c79b17494 | [
"MIT"
] | 29 | 2018-07-06T23:57:23.000Z | 2022-03-08T20:38:57.000Z | tensormonk/layers/routingcapsule.py | Johnson-yue/TensorMONK | 1785132b82c685c3b3fc05b00dec46b1fccfc948 | [
"MIT"
] | 3 | 2018-12-14T22:21:26.000Z | 2020-06-19T02:13:34.000Z | tensormonk/layers/routingcapsule.py | Johnson-yue/TensorMONK | 1785132b82c685c3b3fc05b00dec46b1fccfc948 | [
"MIT"
] | 8 | 2018-07-06T23:58:03.000Z | 2021-04-12T01:35:54.000Z | """ TensorMONK :: layers :: RoutingCapsule """
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
from ..activations import Activations
class RoutingCapsule(nn.Module):
r""" Routing capsule from Dynamic Routing Between Capsules.
Implemented -- https://arxiv.org/pdf/1710.09829.pdf
Args:
tensor_size: 5D shape of tensor from PrimaryCapsule
(None/any integer >0, capsule_length, height, width, n_capsules)
n_capsules (int, required): number of capsules, usually, number of
labels per paper
capsule_length (int, required): length of capsules
iterations (int, required): routing iterations, default = 3
Return:
3D torch.Tensor of shape
(None/any integer >0, n_capsules, capsule_length)
"""
def __init__(self,
tensor_size,
n_capsules: int = 10,
capsule_length: int = 32,
iterations: int = 3,
*args, **kwargs):
super(RoutingCapsule, self).__init__()
self.iterations = iterations
# Ex from paper
# For tensor_size=(1,32,6,6,8), n_capsules=10 and capsule_length=16
# weight_size = (tensor_size[1]*tensor_size[2]*tensor_size[3], \
# tensor_size[4], n_capsules*capsule_length)
# = (32*6*6, 8 , 10*16)
weight_size = (int(np.prod(tensor_size[1:-1])), tensor_size[-1],
n_capsules*capsule_length)
self.weight = nn.Parameter(torch.randn(*weight_size).normal_(0., 0.1))
self.activation = Activations((None, int(np.prod(tensor_size[1:-1])),
tensor_size[-1]), "squash")
self.tensor_size = (6, n_capsules, capsule_length)
def forward(self, tensor):
batch_size, primary_capsule_length, h, w, n_primary_capsules = \
tensor.size()
# Initial squash
tensor = tensor.view(batch_size, -1, n_primary_capsules)
tensor = self.activation(tensor)
# from the given example:
# tensor is of size _ x 32 x 6 x 6 x 8
# after matrix mulitplication the size of u is _x32x6x6x10x16
# essentially, each of the pixel from 8 primary capsules is project
# to a dimension of n_capsules x capsule_length
u = tensor.view(batch_size, -1, 1,
n_primary_capsules).matmul(self.weight)
u = u.view(*((batch_size, primary_capsule_length, h, w) +
self.tensor_size[1:]))
bias = torch.zeros(batch_size, primary_capsule_length, h, w,
self.tensor_size[1])
if tensor.is_cuda:
bias = bias.to(tensor.device)
# routing
for i in range(self.iterations):
# softmax
# initial softmax gives equal probabilities (since bias is
# initialized with zeros), eventually, bias updates will change
# the probabilities
c = F.softmax(bias, 4) # size = _ x 32 x 6 x 6 x 10
# could be done with a single sum after reorganizing the tensor's,
# however, retaining dimensions can explain better
# s size without sum's = _ x 32 x 6 x 6 x 10 x 16
# s size = _ x 10 x 16
s = (c.unsqueeze(5)*u).sum(3).sum(2).sum(1)
# squash -- v size = _ x 10 x 16
v = self.activation(s)
# bias update -- size = _ x 32 x 6 x 6 x 10
if i < self.iterations-1:
bias = bias + (u * v.view(batch_size, 1, 1, 1,
self.tensor_size[1],
self.tensor_size[2])).sum(5)
return v
def flops(self):
# activations
flops = self.activation.flops() * (1 + self.iterations)
# matmul
flops += np.prod(self.weight.shape) * self.weight.shape[1]
# softmax
flops += (self.weight.shape[0] * self.tensor_size[1] * 3) * \
self.iterations
# s computation
flops += (self.weight.shape[0] * (self.weight.shape[2] + 1)) * \
self.iterations
# bias update _x32x6x6x10x16
flops += self.weight.shape[0] * (self.weight.shape[2] + 2)
return flops
# from tensormonk.activations import Activations
# x = torch.rand(3, 32, 10, 10, 8)
# test = RoutingCapsule((3, 32, 10, 10, 8), 10, 16, 3,)
# test(x).size()
# test.flops()
| 41.458716 | 78 | 0.565833 |
f70d49016206483cc360ec860091db714fdee31c | 47,779 | py | Python | plotwrf.py | ksopan/Plotting_WRF_NetCDF | cbff4ad4310447db2f31b402202f428cef968f14 | [
"MIT"
] | 1 | 2021-09-01T00:42:32.000Z | 2021-09-01T00:42:32.000Z | plotwrf.py | ksopan/Plotting_WRF_NetCDF | cbff4ad4310447db2f31b402202f428cef968f14 | [
"MIT"
] | null | null | null | plotwrf.py | ksopan/Plotting_WRF_NetCDF | cbff4ad4310447db2f31b402202f428cef968f14 | [
"MIT"
] | 1 | 2019-06-10T01:21:26.000Z | 2019-06-10T01:21:26.000Z | """
Sopan Kurkute
University of Saskatchewan
plotwrf.py
Python 2.x
Python script to plot various WRF model output. Plots are saved as PNG.
example usage: plotwrf.py --infile filename.nc --sfc --tunit C --ppn -punit mm --td
Will plot surface chart and dewpoint in Celcius and precipitation in mm.
Use plotwrf.py --help to list all options
Last modified: 05/05/16
Skew-T plotting with the pyMeteo package available at: https://github.com/cwebster2/pyMeteo
Credit to Casey Webster
Skew-t plotting with SHARPpy package available at: https://github.com/sharppy/SHARPpy
Credit to: Patrick Marsh (SPC), Kelton Halbert (OU School of Meteorology), Greg Blumberg (OU/CIMMS), Tim Supinie (OU School of Meteorology)
"""
import matplotlib
#matplotlib.use('Agg') # UNCOMMENT THIS ONLY WHEN INVOKING FROM CRON SCRIPT
from scipy.io import netcdf # USE SCIPY MODULE
#from netCDF4 import Dataset # UNCOMMENT TO USE NETCDF 4 MODULE
from mpl_toolkits.basemap import Basemap
from matplotlib import cm
import matplotlib.pyplot as plt
from scipy.ndimage.filters import gaussian_filter
import numpy as np
import datetime
from optparse import OptionParser
import os.path
import sys
import conversions as conv
import calc_vars as calc
import plot_funcs as pltfuncs
import funcs
import colormaps as cmap
# option parser
usage="usage: %prog [options] \n example usage: plotwrf.py --infile filename.nc --sfc --tunit C --td --ppn --punit mm"
parser = OptionParser(usage=usage, version="%prog 6.0 by Sopan Kurkute")
parser.add_option("--sfc", dest="sfc",action="store_true",help="Plot surface chart with 2m temp, wind barbs and MSLP")
parser.add_option("--t2", dest="t2", action="store_true", help="Plot 2m temp and wind barbs only")
parser.add_option("--mslp", dest="mslp", action="store_true", help="Plot MSLP only")
parser.add_option("--ppnaccum", dest="ppnaccum", action="store_true", help="Plot total accumulated precipitation")
parser.add_option("--ppn", dest="ppn", action="store_true", help="Plot total precipitation")
parser.add_option("--convppn", dest="convppn", action="store_true", help="Plot convective precipitation")
parser.add_option("--td", dest="td", action="store_true", help="Plot 2m dew point temperature")
parser.add_option("--rh", dest="rh", action="store_true", help="Plot relative humidity")
parser.add_option("--snow", dest="snow", action="store_true", help="Plot snow accumulation")
parser.add_option("--hail", dest="hail", action="store_true", help="Plot hail accumulaton")
parser.add_option("--simdbz", dest="simdbz", action="store_true", help="Plot simulated reflectivity")
parser.add_option("--compdbz", dest="compdbz", action="store_true", help="Plot composite reflectivity")
parser.add_option("--lcl", dest="lcl", action="store_true", help="Plot LCL (lifted condensation level)")
parser.add_option("--thetae", dest="thetae", action="store_true", help="Plot Theta-e (equivalent potential temperature)")
parser.add_option("--ua", dest="ua", action="store_true", help="Plot geopotential height, temperature and wind barbs at given pressure levels (hPa), --lvl")
parser.add_option("--lvl", dest="lvl", help="Pressure levels to interpolate to for upper level charts option --ua, --vv. Comma seperated e.g 250,500", default="500")
parser.add_option("--run", dest="run", type="string", help="Model initialisation time", default="00")
parser.add_option("--indir", dest="indir", type="string", help="Directory of the NetCDF file", default="")
parser.add_option("--outdir", dest="outdir", type="string", help="Directory to save plots too", default="")
parser.add_option("--infile", dest="infile", type="string", help="NetCDF filename", default="")
parser.add_option("--thin", dest="thin", type="int", help="Thinning factor for wind barbs", default=5)
parser.add_option("--tunit", dest="tunit", type="string", help="Unit of temperature (C or F)", default="C")
parser.add_option("--punit", dest="punit", type="string", help="Unit of precipitation (mm or inches)", default="mm")
parser.add_option("--save", dest="save", action="store_true", help="Save plots as png files")
parser.add_option("--v", dest="verbose", action="store_true", help="Enable verbose")
parser.add_option("--auto", dest="auto", action="store_true", help="Enable auto file input for daily WRF runs")
parser.add_option("--barbsize", dest="barbsize", type="int", help="Set the length of the wind barbs", default=7)
parser.add_option("--75lr", dest="lr75", action="store_true", help="Plot the H7-H5 lapse rates")
parser.add_option("--vort500", dest="vort500", action="store_true", help="Plot the 500mb absolute vorticity")
parser.add_option("--shear06", dest="shear06", action="store_true", help="Plot the 0-6km shear")
parser.add_option("--vv", dest="vv", action="store_true", help="Plot vertical velocity at specified levels --lvl")
parser.add_option("--irtemp", dest="irtemp", action="store_true", help="Plot IR Brightness Temperature")
parser.add_option("--skewt", dest="skewt", action="store_true", help="Plot Skew-t for a location. Uses pyMeteo package.")
parser.add_option("--slat", dest="slat", type="int", help="Latitude for Skew-t")
parser.add_option("--slon", dest="slon", type="int", help="Longitude for Skew-t")
parser.add_option("--getij", dest="getij", action="store_true", help="Get i,j and nearest Lat/Lon for entered Lat/Lon")
parser.add_option("--skewt2", dest="skewt2", action="store_true", help="Plot Skew-t for a location using SHARPpy")
parser.add_option("--uh25", dest="uh25", action="store_true", help="Plot 2-5km Updraft Helicity")
(opt, arg) = parser.parse_args()
indir = opt.indir # dir of input file
filein = opt.infile
if opt.auto: # for auto file input for daily runs
run = opt.run # model init time
filein = 'wrfout_d01_'+datetime.datetime.utcnow().strftime('%Y-%m-%d')+'_'+run+':00:00' # auto filename for current days run
while os.path.isfile(indir+filein) is False and not opt.auto: #if file doesnt exist get filename
print "File", filein, "not found! in directory:", indir
indir = raw_input("Please enter a directory (blank for current dir): ")
filein = raw_input("Please enter a filename: ")
try: #check if file exists and read in
print "Reading in file: ", indir+filein
#nc = Dataset(indir+filein) # for netcdf 4
nc = netcdf.netcdf_file(indir+filein,'r') # for scipy
except: # quit if cant read file
print "Something went wrong reading in the file"
print "QUITTING"
sys.exit()
outdir = opt.outdir # output image dir
## BASEMAP STUFF
#thin factor for wind barbs
thin = opt.thin
#get lats and lons for map projection
cen_lat = float(nc.CEN_LAT)
cen_lon = float(nc.CEN_LON)
truelat1 = float(nc.TRUELAT1)
truelat2 = float(nc.TRUELAT2)
standlon = float(nc.STAND_LON)
xlat = nc.variables['XLAT']
xlong = nc.variables['XLONG']
map_proj = int(nc.MAP_PROJ)
# dimensions of domain
x_dim = len(xlat[0,0,:])
y_dim = len(xlong[0,:,0])
# Get dx and dy. Grid size
dx = float(nc.DX)
dy = float(nc.DY)
#calculate plot width and height from grid size * dimension. Domain size
width_meters = dx * (x_dim - 1)
height_meters = dy * (y_dim - 1)
# Define gridlines
parallels = np.arange(-90,90,10)
meridians = np.arange(0,360,10)
# find projection and create map. Only LCC tested.
if map_proj == 1: #lambert conformal.
proj = 'lcc'
projname = 'Lambert Conformal'
elif map_proj == 2: # polar stereographic
proj = 'npstere'
projname = 'Polar Stereographic'
elif map_proj == 3: # mercator
proj = 'merc'
projname = 'Mercator'
else: # not supported and quit
print "Projection ", map_proj, "unknown"
print "QUITTING"
sys.exit()
# make map
m = Basemap(resolution='i',projection=proj,width=width_meters,height=height_meters,lat_0=cen_lat,lon_0=cen_lon,lat_1=truelat1,lat_2=truelat2)
#m = Basemap(resolution='i',projection=proj,llcrnrlon=xlong[0,0,0],llcrnrlat=xlat[0,0,0],urcrnrlon=xlong[0,-1,-1],urcrnrlat=xlat[0,-1,-1],lat_0=cen_lat,lon_0=cen_lon)
#x, y = m(xlong[0,:,:],xlat[0,:,:])
# get lat/lons of ny by nx evenly space grid
# make lons, lats and x, y co ordinates
lons, lats = m.makegrid(x_dim, y_dim)
x, y = m(lons, lats) # compute map proj coordinates.
print "Using map projection: ", projname
## GET THIS DATA FOR NOW
times = nc.variables['Times'] #each time output in wrf nc file
t2 = nc.variables['T2'] #temp at 2m / Kelvin
u10 = nc.variables['U10'] #u10 wind / ms/s
v10 = nc.variables['V10'] #v10 wind / ms/s
psfc = nc.variables['PSFC'] #surface pressure / Pascals
rainc = nc.variables['RAINC'] # accumulated total cumulus precip
rainnc = nc.variables['RAINNC'] # accumulated total grid scale precip
thgt = nc.variables['HGT'] #terrain height
# general info
init = str(''.join(times[0])).replace('_',' ') # model init time
alltimes = [] #list to hold all times
### BEGIN PLOT FUNCTIONS ###
# savefile and makeplot and the functions for putting data on maps may stay here for now #
def savefile(filename): #save plot image as png
print "Saving file: ", filename
#print filename
plt.savefig(outdir+filename)
def makeplot(data,title,cblabel,clevs,cbticks,ftitle): # function to make plots
fig = plt.gcf() #get current fig
ax = plt.gca() #get current axis
#ax = fig.add_axes([0.1,0.1,0.8,0.8])
# draw parallels and meridians
m.drawparallels(parallels,labels=[1,0,0,0],fontsize=10)
m.drawmeridians(meridians,labels=[0,0,0,1],fontsize=10)
# draw coastlines, state and country boundaries
m.drawcoastlines()
m.drawstates()
m.drawcountries()
# set plot title
#ax.set_title(title+currtime)
ax.text(0,1.01*height_meters,title+'\nValid:'+currtime,fontsize=14)
ax.text(0.65*width_meters,1.01*height_meters,'Init: '+init, fontsize=12)
#fig.suptitle('Init: '+init+'', fontsize=12) #init title
if clevs is False:
# No color bar
pass
else: #create color bar
cbar = m.colorbar(data,location='bottom',pad="5%")
cbar.set_label(cblabel)
if cbticks:
cbar.set_ticks(clevs)
cbar.ax.tick_params(labelsize=8)
if opt.save:
#create filename for image and save file
filename = ftitle+filetime+'.png'
#filename = ftitle+str(time)+'.png' #save file with number instead of date and time
savefile(filename) #save image file
else:
plt.show()
def t2wind(): # plot t2 and wind barbs
# create figure
plt.figure(figsize=(8,8))
temps = t2[time] # temps in K
if opt.tunit == 'F':
t2f = conv.k_to_f(temps) # convert to F
clevs = np.arange(-30,115,5) # levels / degF
cs = m.contourf(x,y,t2f,clevs,cmap=cm.get_cmap('gist_ncar'))
elif opt.tunit == 'C':
t2c = conv.k_to_c(temps) # convert to C
clevs = np.arange(-40,55,5) # levels / degC
cs = m.contourf(x,y,t2c,clevs,cmap=cm.get_cmap('gist_ncar'))
#make x and y grid points for barbs
#yy = np.arange(0, len(y), 8)
#xx = np.arange(0, len(x), 8)
#gp = np.meshgrid(yy, xx)
#print x[::thin,::thin].shape #check x co-ord thinning
#print u[time,::thin,::thin].shape #check u10 thinning
#x_th,y_th = m(xlong[0,::thin,::thin],xlat[0,::thin,::thin]) #another method to thin barbs
#convert wind to kts
u10kts = conv.ms_to_kts(u10[time])
v10kts = conv.ms_to_kts(v10[time])
m.barbs(x[::thin,::thin], y[::thin,::thin], u10kts[::thin,::thin], v10kts[::thin,::thin],length=opt.barbsize) #plot barbs
title = "2m Temperature and Wind Barbs (kts)"
ftitle = "t2-wind-"
if opt.tunit == 'C':
cblabel = r'$\degree$C'
elif opt.tunit == 'F':
cblabel = r'$\degree$F'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def mslponly(): # plot MSLP only
#create figure
plt.figure(figsize=(8,8))
x, y = m(lons, lats)
psfchpa = conv.pa_to_hpa(psfc[time]) #convert Pa to hPa
mslp = calc.calc_mslp(psfchpa, thgt[0], t2[time]) # get mslp
mslp = gaussian_filter(mslp, sigma=3) #smooth wiggles
#find local min and local max
local_min, local_max = funcs.extrema(mslp, mode='wrap', window=50)
clevs = np.arange(900,1055,2.)
cs = m.contour(x,y,mslp,clevs,colors='k',linewidths=2.)
plt.clabel(cs, inline=True, fmt='%1.0f', fontsize=12, colors='k')
xlows = x[local_min]; xhighs = x[local_max]
ylows = y[local_min]; yhighs = y[local_max]
lowvals = mslp[local_min]; highvals = mslp[local_max]
# plot lows as blue L's, with min pressure value underneath.
xyplotted = []
# don't plot if there is already a L or H within dmin meters.
yoffset = 0.022*(m.ymax-m.ymin)
dmin = yoffset
for x,y,p in zip(xlows, ylows, lowvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'L',fontsize=14,fontweight='bold', ha='center',va='center',color='b')
plt.text(x,y-yoffset,repr(int(p)),fontsize=12, ha='center',va='top',color='b', bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
# plot highs as red H's, with max pressure value underneath.
xyplotted = []
for x,y,p in zip(xhighs, yhighs, highvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'H',fontsize=14,fontweight='bold', ha='center',va='center',color='r')
plt.text(x,y-yoffset,repr(int(p)),fontsize=12, ha='center',va='top',color='r', bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
title = "MSLP (hPa)"
ftitle = 'mslp-'
cblabel = ''
clevs = False # no color bar levels
cbticks = False
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def precipaccum(): # plot total precip accumulation
# create figure
plt.figure(figsize=(8,8))
ppn = rainc[time]+rainnc[time] #ppn / mm
if opt.punit == 'mm':
clevs = [0.1,0.5,1,2,5,10,15,20,30,40,50,80,100,200,300,500] #levels / mm
elif opt.punit == 'in':
clevs = [0.01, 0.1, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, \
6.0, 8.0, 10., 20.0] # levels / in
ppn = conv.mm_to_in(ppn) # convert ppn to inches
norm = matplotlib.colors.BoundaryNorm(clevs, 15) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,ppn,clevs,norm=norm,cmap=cmap.precip_colormap) #plot total
title = "Precipitation Accumulation"
ftitle = 'ppnaccum-'
if opt.punit == 'mm':
cblabel = 'mm'
elif opt.punit == 'in':
cblabel = 'inches'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def precip(): # plot current precip at each time
# create figure
plt.figure(figsize=(8,8))
ppn = rainc[time]+rainnc[time] # total ppn / mm
currppn = np.array(ppn.shape)
if time == 0: # initial amount
currppn = ppn
else: # current amount
prev = rainc[time-1]+rainnc[time-1]
currppn = ppn-prev
if opt.punit == 'mm':
clevs = [0.1,0.5,1,2,5,10,15,20,30,40,50,80,100,200,300,500] #levels / mm
elif opt.punit == 'in':
clevs = [0.01, 0.1, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, \
6.0, 8.0, 10., 20.0] # levels / in
currppn = conv.mm_to_in(currppn) # convert ppn to inches
norm = matplotlib.colors.BoundaryNorm(clevs, 15) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,currppn,clevs,norm=norm,cmap=cmap.precip_colormap) #plot total
title = "Precipitation"
ftitle = 'ppn-'
if opt.punit == 'mm':
cblabel = 'mm'
elif opt.punit == 'in':
cblabel = 'inches'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def convprecip(): # plot current convective precip at each time
# create figure
plt.figure(figsize=(8,8))
convppn = rainc[time] #ppn / mm
currppn = np.array(convppn.shape)
if time == 0:
currppn = convppn
else:
prev = rainc[time-1]
currppn = convppn-prev
if opt.punit == 'mm':
clevs = [0.1,0.5,1,2,5,10,15,20,30,40,50,80,100,200,300,500] #levels / mm
elif opt.punit == 'in':
clevs = [0.01, 0.1, 0.25, 0.50, 0.75, 1.0, 1.5, 2.0, 2.5, 3.0, 4.0, 5.0, \
6.0, 8.0, 10., 20.0] # levels / in
currppn = conv.mm_to_in(currppn) # convert ppn to inches
norm = matplotlib.colors.BoundaryNorm(clevs, 15) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,currppn,clevs,norm=norm,cmap=cmap.precip_colormap) #plot total
title = "Convective Precipitation"
ftitle = 'convppn-'
if opt.punit == 'mm':
cblabel = 'mm'
elif opt.punit == 'in':
cblabel = 'inches'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def tdrh(): # plot td and rh
# create figure
plt.figure(figsize=(8,8))
q2 = nc.variables['Q2'][time] # water vapour mixing ratio at 2m
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
psfchpa = conv.pa_to_hpa(psfc[time]) # pres to hPa
es = calc.calc_es(t2c[time]) # calc es
ws = calc.calc_ws(es, psfchpa) # calc ws
u10kts = conv.ms_to_kts(u10[time])
v10kts = conv.ms_to_kts(v10[time])
if opt.rh:
rh = calc.calc_rh(q2, ws) #calc rh
clevs = np.arange(0,105,5)
cs = m.contourf(x,y,rh,clevs,cmap=cm.get_cmap('jet')) #plot RH
cblabel='RH \ %'
title = "Relative Humidity \n Valid: "
ftitle = 'rh-'
cbticks = True
elif opt.td:
rh = calc.calc_rh(q2, ws) # calc rh
td = calc.calc_dewpoint(es, rh) # calc td (deg C)
title = "2m Dew Point"
ftitle = 'td-'
if opt.tunit == 'C':
clevs = np.arange(-30,65,5) # levels / degC
cblabel = r'$\degree$C'
elif opt.tunit == 'F':
clevs = np.arange(-20,125,5) # levels / degF
td = conv.c_to_f(td) #convert celcius to fahrenheit
cblabel = r'$\degree$F'
cs = m.contourf(x,y,td,clevs,cmap=cm.get_cmap('gist_ncar')) #plot Td
m.barbs(x[::thin,::thin], y[::thin,::thin], u10kts[::thin,::thin], v10kts[::thin,::thin],length=opt.barbsize) #plot barbs
cbticks=True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def upperair(): # plot upper air chart for given level. geopotential height, wind bards and temp
pb = nc.variables['PB'][time] #base state pressure, Pa
p = nc.variables['P'][time] # perturbation pressure, Pa
totalp = pb + p # total pressure in Pa
U = nc.variables['U'][time] # U wind component
V = nc.variables['V'][time] # V wind component
Unew = funcs.unstagger(U,'U') # unstagger u
Vnew = funcs.unstagger(V,'V') # unstagger v
ph = nc.variables['PH'][time] #perturbation geopotential
phb = nc.variables['PHB'][time] #base state geopotential
totalgp = phb + ph # total geopotential
totalgp = funcs.unstagger(totalgp,'Z') #total geopotential unstaggered
theta = nc.variables['T'][time] #perturbation potential temperature (theta-t0)
theta0 = nc.variables['T00'][0] #base state theta
totalTheta = theta + theta0 # total potential temp
totalT = conv.k_to_c(calc.theta_to_temp(totalTheta, totalp)) # calc temps in C
levels = opt.lvl.split(',') # get list of levels
for level in levels:
plt.figure(figsize=(8,8)) #create fig for each plot
level = int(level) # make it int
#interp data for level
gphgt = funcs.linear_interp(totalgp,totalp,level)
totalTfinal = funcs.linear_interp(totalT,totalp,level)
uinterp = funcs.linear_interp(Unew,totalp,level)
vinterp = funcs.linear_interp(Vnew,totalp,level)
Ufinal = conv.ms_to_kts(uinterp) #convert to kts
Vfinal = conv.ms_to_kts(vinterp)
#speed = calc.calc_wspeed(Ufinal, Vfinal)
gphgt = conv.gphgt_to_hgt(gphgt) # convert to height (m)
gphgt = gaussian_filter(gphgt, sigma=3) # smooth wiggles
totalTfinal = gaussian_filter(totalTfinal, sigma=2)
# set gpheight levels for common pressure levels
if level == 250:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),60)
elif level == 500:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),60)
elif level == 700:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),30)
elif level == 850:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),30)
elif level == 925:
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),30)
else: # use generic 30m spacing
gpclevs = np.arange(np.min(gphgt),np.max(gphgt),30)
#plot all this up
cs = m.contour(x,y,gphgt,gpclevs,colors='k',linewidths=2.)
plt.clabel(cs, inline=True, fmt='%1.0f', fontsize=12, colors='k')
tclevs = np.arange(np.min(totalTfinal),np.max(totalTfinal),4)
cs2 = m.contour(x,y,totalTfinal,tclevs,colors='r',linestyles='-',linewidths=2.)
plt.clabel(cs2,inline=True,fmt='%1.0f',fontsize=12,colors='r')
m.barbs(x[::thin,::thin], y[::thin,::thin], Ufinal[::thin,::thin], Vfinal[::thin,::thin],length=opt.barbsize) #plot barbs
level = str(level)
title = level+'mb Height (m), Temp (C), Wind Barbs (kts)'
ftitle = level+'mb-'
cblabel = 'kts'
clevs = False
cbticks = False
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def surface(): # plot surface chart. t2, wind barbs and mslp
# create figure
plt.figure(figsize=(8,8))
x, y = m(lons, lats)
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
if opt.tunit == 'F':
t2f = conv.c_to_f(t2c) #convert celcius to fahrenheit
clevs = np.arange(-30,115,5) # levels / degF
cs = m.contourf(x,y,t2f,clevs,cmap=cm.get_cmap('gist_ncar'))
cblabel = r'$\degree$F'
elif opt.tunit == 'C':
clevs = np.arange(-40,55,5) # levels / degC
cs = m.contourf(x,y,t2c,clevs,cmap=cm.get_cmap('gist_ncar'))
cblabel = r'$\degree$C'
cbticks = True
psfchpa = conv.pa_to_hpa(psfc[time]) #convert Pa to hPa
mslp = calc.calc_mslp(psfchpa, thgt[0], t2[time]) # get mslp
mslp = gaussian_filter(mslp, sigma=3) # smooth wiggles
local_min, local_max = funcs.extrema(mslp, mode='wrap', window=50)
#make x and y grid points for barbs
#yy = np.arange(0, len(y), 8)
#xx = np.arange(0, len(x), 8)
#gp = np.meshgrid(yy, xx)
#print x[::thin,::thin].shape #check x co-ord thinning
#print u[time,::thin,::thin].shape #check u10 thinning
#x_th,y_th = m(xlong[0,::thin,::thin],xlat[0,::thin,::thin]) #another method to thin barbs
#convert wind to kts
u10kts = conv.ms_to_kts(u10[time])
v10kts = conv.ms_to_kts(v10[time])
m.barbs(x[::thin,::thin], y[::thin,::thin], u10kts[::thin,::thin], v10kts[::thin,::thin],length=opt.barbsize) #plot barbs
title = "2m Temp, Wind Barbs (kts), MSLP (hPa)"
ftitle = 'sfc-'
pclevs = np.arange(900,1055,2.)
pcs = m.contour(x,y,mslp,pclevs,colors='k',linewidths=2.)
plt.clabel(pcs, inline=True, fmt='%1.0f', fontsize=12, colors='k')
xlows = x[local_min]; xhighs = x[local_max]
ylows = y[local_min]; yhighs = y[local_max]
lowvals = mslp[local_min]; highvals = mslp[local_max]
# plot lows as blue L's, with min pressure value underneath.
xyplotted = []
# don't plot if there is already a L or H within dmin meters.
yoffset = 0.022*(m.ymax-m.ymin)
dmin = yoffset
for x,y,p in zip(xlows, ylows, lowvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'L',fontsize=14,fontweight='bold', ha='center',va='center',color='b')
plt.text(x,y-yoffset,repr(int(p)),fontsize=12, ha='center',va='top',color='b', bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
# plot highs as red H's, with max pressure value underneath.
xyplotted = []
for x,y,p in zip(xhighs, yhighs, highvals):
if x < m.xmax and x > m.xmin and y < m.ymax and y > m.ymin:
dist = [np.sqrt((x-x0)**2+(y-y0)**2) for x0,y0 in xyplotted]
if not dist or min(dist) > dmin:
plt.text(x,y,'H',fontsize=14,fontweight='bold',
ha='center',va='center',color='r')
plt.text(x,y-yoffset,repr(int(p)),fontsize=12, ha='center',va='top',color='r', bbox = dict(boxstyle="square",ec='None',fc=(1,1,1,0.5)))
xyplotted.append((x,y))
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def snowaccum(): # plot snow accumulation
# create figure
plt.figure(figsize=(8,8))
snow = nc.variables['SNOWNC'][time] # total accumulated grid scale snow and ice / mm at each time
if opt.punit == 'mm':
clevs = [0,0.5,1,2.5,3,4,5,8,10,15,20,30,40,50,80,100,150,200,250,500]
cblabel = 'mm'
elif opt.punit == 'in':
snow = conv.mm_to_in(snow) # convert to inches
clevs = [0.25,0.5,0.75,1,1.5,2,2.5,3,4,5,6,8,10,12,14,16,18,20,22,24]
cblabel = 'inches'
cbticks = True
norm = matplotlib.colors.BoundaryNorm(clevs, 19) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,snow,clevs,norm=norm,cmap=cmap.snow_colormap)
title = "Snow Accumulation"
ftitle = 'snow-'
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def hailaccum(): # plot hail accumulation
# create figure
plt.figure(figsize=(8,8))
hail = nc.variables['HAILNC'][time] # accimulated total grid scale hail / mm at each time
if opt.punit == 'mm':
clevs = [0.5,1.,1.5,2.,2.5,3.,4.,5.,6.,7.,8.,9.,10.,11.,12.]
cblabel = 'mm'
elif opt.punit == 'in':
hail = conv.mm_to_in(hail) # convert to inches
clevs = [0.01,0.02,0.04,0.06,0.08,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5,0.55]
cblabel = 'inches'
cbticks = True
norm = matplotlib.colors.BoundaryNorm(clevs, 14) # set boundary of data by normalizing (0,1)
cs = m.contourf(x,y,hail,clevs,norm=norm,cmap=cmap.hail_colormap)
title = "Hail Accumulation"
ftitle = 'hail-'
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def simudbz(): # plot simulated reflectivity, mp_physics dependent
# create figure
plt.figure(figsize=(8,8))
qrain = nc.variables['QRAIN'] # rain water mixing ratio
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
rhoa = calc.calc_rhoa(psfc[time], t2[time])
Qrain = qrain[time,1] # rain mixing ratio
Qrain = np.nan_to_num(Qrain) # change NaN to zeroes, changge infs to nums
try: #depends on MP scheme
Qsn = nc.variables['QSNOW'] # try to get snow mixing ratio
except:
Qsn = np.zeros(np.shape(qrain)) # else create zeros array same shape as qrain
Qsnow = Qsn[time,1] # snow mixing ratio
Qsnow = np.nan_to_num(Qsnow) # change NaN to zeros
dBZ = calc.calc_dbz(t2c, rhoa, Qrain, Qsnow)
clevs = np.arange(0,85,5)
norm = matplotlib.colors.BoundaryNorm(clevs, 17) # normalize levels
cs = m.contourf(x,y,dBZ,clevs,norm=norm,cmap=cmap.dbz_colormap)
title = "Simulated Reflectivity"
ftitle = 'simdbz-'
cblabel = 'dBZ'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def compodbz(): # plot composite reflectivity, mp_physics dependent
# create figure
plt.figure(figsize=(8,8))
try: #get refl from do_radar_ref=1
refl = nc.variables['REFL_10CM'][time]
dBZ = np.zeros(refl[0,0].shape)
dBZ = np.max(refl, axis=0)
#for i in range(len(refl[1,:,1])):
# for j in range(len(refl[1,1,:])):
# dBZ[i,j]=np.max(refl[:,i,j])
except: # calculate reflectivity
Qrainall = nc.variables['QRAIN'][time] # rain water mixing ratio at all levels
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
rhoa = calc.calc_rhoa(psfc[time], t2[time])
try: # depends on MP scheme
Qsn = nc.variables['QSNOW'] # try to get snow mixing ratio
except:
Qsn = np.zeros(np.shape(Qrainall)) # else create zeros array same shape as qrain
Qsnowall = Qsn[time] # get all Qsnow values at all levels for each time
Qrainmax = np.max(Qrainall, axis=0) #max rain QV
Qsnowmax = np.max(Qsnowall, axis=0) #max snow QV
dBZ = calc.calc_dbz(t2c, rhoa, Qrainmax, Qsnowmax)
clevs = np.arange(0,85,5)
norm = matplotlib.colors.BoundaryNorm(clevs, 17) # normalize levels
cs = m.contourf(x,y,dBZ,clevs,norm=norm,cmap=cmap.dbz_colormap)
title = "Composite Reflectivity"
ftitle = 'compdbz-'
cblabel = 'dBZ'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def lclhgt(): # plot lcl height
# create figure
plt.figure(figsize=(8,8))
q2 = nc.variables['Q2'][time] # water vapour mixing ratio at 2m
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
psfchpa = conv.pa_to_hpa(psfc[time])
es = calc.calc_es(t2c)
ws = calc.calc_ws(es, psfchpa)
rh = calc.calc_rh(q2, ws)
td = calc.calc_dewpoint(es, rh)
lcl = calc.calc_lcl(t2c, td)
clevs = np.arange(0,6000,500)
cs = m.contourf(x,y,lcl,clevs,cmap=cmap.lcl_colormap)
title = "LCL Height"
ftitle = 'lcl-'
cblabel = 'm'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def thetaE(): # plot theta-e
# create figure
plt.figure(figsize=(8,8))
theta = nc.variables['T'][time] #perturbation potential temperature (theta-t0)
theta0 = nc.variables['T00'][0] #base state theta
theta = theta[0] + theta0 # total theta
psfchpa = conv.pa_to_hpa(psfc[time])
t2c = conv.k_to_c(t2[time]) #convert temp to celcius
es = calc.calc_es(t2c)
ws = calc.calc_ws(es, psfchpa)
thetae = calc.calc_thetae(theta, t2[time], ws)
clevs = np.arange(260,372,4) # set by max and min of data
cs = m.contourf(x,y,thetae,clevs,cmap=cm.get_cmap('gist_ncar'))
title = "Theta-e"
ftitle = 'thetae-'
cblabel = 'K'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def h75lr(): # 700-500mb lapse rates
# create figure
plt.figure(figsize=(8,8))
pb = nc.variables['PB'][time] #base state pressure, Pa
p = nc.variables['P'][time] # perturbation pressure, Pa
totalp = pb + p # total pressure in Pa
theta = nc.variables['T'][time] #perturbation potential temperature (theta-t0)
theta0 = nc.variables['T00'][0] #base state theta
totalTheta = theta + theta0 # total potential temp
totalT= conv.k_to_c(calc.theta_to_temp(totalTheta, totalp)) # calc temp in deg C
# interp temps to levels
totalT700 = funcs.linear_interp(totalT,totalp,700)
totalT500 = funcs.linear_interp(totalT,totalp,500)
# calc h7-h5 lapse rates
lr = totalT700 - totalT500
clevs = np.arange(5,10.5,.5) # conditionally unstable levels
cs = m.contourf(x,y,lr,clevs,cmap=cm.get_cmap('gist_ncar'))
title = "H7-H5 Lapse Rates"
ftitle = 'h75lr-'
cblabel = r'$\degree$C'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def absvort500(): # plot 500mb absolute vorticity
# create figure
plt.figure(figsize=(8,8))
pb = nc.variables['PB'][time] #base state pressure, Pa
p = nc.variables['P'][time] # perturbation pressure, Pa
totalp = pb + p # total pressure in Pa
U = funcs.unstagger(nc.variables['U'][time],'U') # U wind component UNSTAGGERED
V = funcs.unstagger(nc.variables['V'][time],'V') # V wind component
fcoriolis = calc.calc_fcoriolis(xlat[0])
uinterp = funcs.linear_interp(U,totalp,500) #interp to 500mb
vinterp = funcs.linear_interp(V,totalp,500)
vertvort = calc.calc_vertvort(uinterp, vinterp, dx)
avort = vertvort + fcoriolis # absolute vorticity
avort = np.multiply(avort, 1e5) # scale up for levels
clevs = np.arange(-6, 52, 2)
cs = m.contourf(x,y,avort,clevs,cmap=cm.get_cmap('gist_ncar'))
title = '500mb Absolute Vorticity'
ftitle = '500absvort-'
cblabel = r'$10^{-5} s^{-1}$'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def shr06(): # plot the 0-6km shear vector
# create figure
plt.figure(figsize=(8,8))
ph = nc.variables['PH'][time] #perturbation geopotential
phb = nc.variables['PHB'][time] #base state geopotential
totalgp = phb + ph # total geopotential
totalgp = funcs.unstagger(totalgp,'Z') #total geopotential unstaggered
U = funcs.unstagger(nc.variables['U'][time],'U') # U wind component # UNSTAGGERED
V = funcs.unstagger(nc.variables['V'][time],'V') # V wind component
u10kts = conv.ms_to_kts(u10[time]) # sfc wind in kts
v10kts = conv.ms_to_kts(v10[time])
u6 = funcs.interp_generic(6000, (totalgp/9.81), U) # interp to 6km
v6 = funcs.interp_generic(6000, (totalgp/9.81), V)
u6kts = conv.ms_to_kts(u6) # convert 6km wind to kts
v6kts = conv.ms_to_kts(v6)
#using 10m wind as sfc wind
ushr = u6kts - u10kts # calc 0-6 shr in kts
vshr = v6kts - v10kts
speed = calc.calc_wspeed(ushr, vshr)
# plot data
clevs = np.arange(20,145,5)
cs = m.contourf(x, y, speed, clevs, cmap=cm.get_cmap('gist_ncar'))
m.barbs(x[::thin,::thin], y[::thin,::thin], ushr[::thin,::thin], vshr[::thin,::thin],length=opt.barbsize) #plot barbs
title = '0-6km Shear'
ftitle = 'shr06-'
cblabel = 'kts'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def vertvol(): # plot the vertical velocity at levels. NEEDS CORRECTING TO VERTICAL MOTION OMEGA EQUATION
W = funcs.unstagger(nc.variables['W'][time],'W') # unstaggered vertical velocity
pb = nc.variables['PB'][time] #base state pressure, Pa
p = nc.variables['P'][time] # perturbation pressure, Pa
totalp = pb + p # total pressure in Pa
levels = opt.lvl.split(',') # get list of levels
for level in levels:
plt.figure(figsize=(8,8)) #create fig for each plot
level = int(level) # make it int
Wfinal = funcs.linear_interp(W,totalp,level) # interpolate W to levels
clevs = np.arange(-2.0,2.2,0.2)
cs = m.contourf(x,y,Wfinal,clevs,cmap=cm.get_cmap('gist_ncar'))
level = str(level)
title = level+'mb Vertical Velocity'
ftitle = level+'mbvv-'
cblabel = r'$ms^{-1}$'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def olr_to_temp(): # convert OLR to IR temp
plt.figure(figsize=(8,8))
olr = nc.variables['OLR'][time]
olrtemp = np.power(olr / 5.67e-8, 0.25) - 273.15 # calc temp using Stefan-Boltzman law and convert to deg C
clevs = np.arange(-80, 36 ,4)
cs = m.contourf(x,y,olrtemp,clevs,cmap=cmap.irsat_colormap)
title = 'IR Brightness Temp'
ftitle = 'irtemp-'
cblabel = r'$\degree$C'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
def pymeteo_skewt(): # uses pyMeteo package (https://github.com/cwebster2/pyMeteo) to plot skew-t for lat/lon. Credit Casey Webster
import pymeteo.skewt as skewt
try:
skewt.plot_wrf(filein,opt.slat,opt.slon,time,'skewt'+str(time)+'.png')
except:
print "LAT/LON NOT IN DOMAIN. QUITTING"
sys.exit()
def plot_skewt(): # plot skew-t by writing data to file and use SHARPpy available at: https://github.com/sharppy/SHARPpy
i, j = funcs.latlon_ij(opt.slat, opt.slon, xlat, xlong)
inlat = xlat[0,i,j]
inlon = xlong[0,i,j]
pb = nc.variables['PB'][time,:,i,j] #base state pressure, Pa
p = nc.variables['P'][time,:,i,j] # perturbation pressure, Pa
totalp = p + pb # total pressure
ph = nc.variables['PH'][time,:,i,j] #perturbation geopotential
phb = nc.variables['PHB'][time,:,i,j] #base state geopotential
totalgp = phb + ph # total geopotential
totalgp = funcs.unstagger(totalgp,'Z') #total geopotential unstaggered
U = nc.variables['U'][time,:,i,j] # U wind component
V = nc.variables['V'][time,:,i,j] # V wind component
theta = nc.variables['T'][time,:,i,j] #perturbation potential temperature (theta-t0)
theta0 = nc.variables['T00'][0] #base state theta
totaltheta = theta+theta0 # total potential temp
qvapor = nc.variables['QVAPOR'][time,:,i,j] #water vapor mixing ratio kg/kg
#need to calc these variables for skewt
level = conv.pa_to_hpa(totalp) # levels in hPa
height = conv.gphgt_to_hgt(totalgp) # heights in m
temps = calc.theta_to_temp(totaltheta, totalp) # temps in degK
tempc = conv.k_to_c(temps) # temps in degC
es = calc.calc_es(tempc) # calc es
ws = calc.calc_ws(es, level) # calc ws
rh = calc.calc_rh(qvapor, ws) # calc rh
dwpt = calc.calc_dewpoint(es, rh) # calc dewpoint in degC
winddir = calc.calc_wdir(U, V) # calc wind dir
wspd = conv.ms_to_kts(calc.calc_wspeed(U, V)) # calc wind spd
skewt_data = funcs.skewt_data(timestamp, level, height, tempc, dwpt, winddir, wspd, inlat, inlon) # write the data to SPC file format
pltfuncs.do_sharppy(skewt_data) # use SHARPpy to plot skew-t
def updraft_hel(): # plot the 2-5km updraft helicity
plt.figure(figsize=(8,8))
U = funcs.unstagger(nc.variables['U'][time],'U') # U wind component # UNSTAGGERED
V = funcs.unstagger(nc.variables['V'][time],'V') # V wind component
W = funcs.unstagger(nc.variables['W'][time],'W') # unstaggered vertical velocity
ph = nc.variables['PH'][time] #perturbation geopotential
phb = nc.variables['PHB'][time] #base state geopotential
totalgp = phb + ph # total geopotential
totalgp = funcs.unstagger(totalgp,'Z') #total geopotential unstaggered
heights = totalgp / 9.81
levels = 6 # no of levels in between bottom and top of a layer (add extra one to get to very top of layer)
depth = 1000 # depth of layer
dz = depth / (levels-1) # increment / m
#create arrays to hold all the values at each level
u2km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
v2km = np.zeros((levels, np.shape(V)[1], np.shape(V)[2]))
u3km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
v3km = np.zeros((levels, np.shape(V)[1], np.shape(V)[2]))
u4km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
v4km = np.zeros((levels, np.shape(V)[1], np.shape(V)[2]))
#u5km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
#v5km = np.zeros((levels, np.shape(V)[1], np.shape(V)[2]))
w2km = np.zeros((levels, np.shape(W)[1], np.shape(W)[2]))
w3km = np.zeros((levels, np.shape(W)[1], np.shape(W)[2]))
w4km = np.zeros((levels, np.shape(W)[1], np.shape(W)[2]))
#w5km = np.zeros((levels, np.shape(W)[1], np.shape(W)[2]))
zeta2km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
zeta3km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
zeta4km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
#zeta5km = np.zeros((levels, np.shape(U)[1], np.shape(U)[2]))
for i in range(0,levels): # loop through to interpolate to levels and store in array
print "Interpolating...doing loop ", i, "of ", (levels-1)
increment = i*dz
u2km[i] = funcs.interp_generic(2000+increment, heights, U)
v2km[i] = funcs.interp_generic(2000+increment, heights, V)
u3km[i] = funcs.interp_generic(3000+increment, heights, U)
v3km[i] = funcs.interp_generic(3000+increment, heights, V)
u4km[i] = funcs.interp_generic(4000+increment, heights, U)
v4km[i] = funcs.interp_generic(4000+increment, heights, V)
#u5km[i] = funcs.interp_generic(5000+increment, heights, U)
#v5km[i] = funcs.interp_generic(5000+increment, heights, V)
w2km[i] = funcs.interp_generic(2000+increment, heights, W)
w3km[i] = funcs.interp_generic(3000+increment, heights, W)
w4km[i] = funcs.interp_generic(4000+increment, heights, W)
#w5km[i] = funcs.interp_generic(2000+increment, heights, W)
zeta2km[i] = calc.calc_vertvort(u2km[i], v2km[i], dx)
zeta3km[i] = calc.calc_vertvort(u3km[i], v3km[i], dx)
zeta4km[i] = calc.calc_vertvort(u4km[i], v4km[i], dx)
#zeta5km[i] = calc.calc_vertvort(u5km[i], v5km[i], dx)
# calc the layer mean
w2to3 = np.mean(w2km, axis=0)
w3to4 = np.mean(w3km, axis=0)
w4to5 = np.mean(w4km, axis=0)
zeta2to3 = np.mean(zeta2km, axis=0)
zeta3to4 = np.mean(zeta3km, axis=0)
zeta4to5 = np.mean(zeta4km, axis=0)
# calc the 2-5km UH
UH = ( w2to3*zeta2to3 + w3to4*zeta3to4 + w4to5*zeta4to5 ) * 1000
#u2km = funcs.interp_generic(2000, heights, U)
#v2km = funcs.interp_generic(2000, heights, V)
#u3km = funcs.interp_generic(3000, heights, U)
#v3km = funcs.interp_generic(3000, heights, V)
#u4km = funcs.interp_generic(4000, heights, U)
#v4km = funcs.interp_generic(4000, heights, V)
#u5km = funcs.interp_generic(5000, heights, U)
#v5km = funcs.interp_generic(5000, heights, V)
#w2km = funcs.interp_generic(2000, heights, W)
#w3km = funcs.interp_generic(2000, heights, W)
#w4km = funcs.interp_generic(2000, heights, W)
#w5km = funcs.interp_generic(2000, heights, W)
#w2to3 = 0.5 * ( w2km + w3km )
#w3to4 = 0.5 * ( w3km + w4km )
#w4to5 = 0.5 * ( w4km + w5km )
#zeta2km = calc.calc_vertvort(u2km, v2km, dx)
#zeta3km = calc.calc_vertvort(u3km, v3km, dx)
#zeta4km = calc.calc_vertvort(u4km, v4km, dx)
#zeta5km = calc.calc_vertvort(u5km, v5km, dx)
#zeta2to3 = 0.5 * ( zeta2km + zeta3km )
#zeta3to4 = 0.5 * ( zeta3km + zeta4km )
#zeta4to5 = 0.5 * ( zeta4km + zeta5km )
#UH = ( w2to3*zeta2to3 + w3to4*zeta3to4 + w4to5*zeta4to5 ) * 1000
clevs = np.arange(0,210,10)
cs = m.contourf(x,y,UH,clevs,cmap=cmap.uh_colormap)
title = '2-5km Updraft Helicity'
ftitle = 'uh-'
cblabel = r'$m^{2}s^{-2}$'
cbticks = True
makeplot(cs,title,cblabel,clevs,cbticks,ftitle)
### END PLOT FUNCTIONS ###
flag = False # to check for plotting options
#### BEGIN TIME LOOP ####
for time in range(times.shape[0]):
currtime = str(''.join(times[time])).replace('_', ' ') #get current model time
filetime = currtime.translate(None, ':').replace(' ', '_') # time for filename
alltimes.append(currtime) # all times in output
timestamp = currtime[8:10]+currtime[5:7]+currtime[2:4]+'/'+currtime[11:13]+currtime[14:16]
if opt.t2: #plot 2m temp and wind barbs
print "Plotting Temperature and Wind Barbs for time: ", currtime
t2wind()
flag = True
if opt.mslp: #plot surface pressure only
print "Plotting MSLP for time: ", currtime
mslponly()
flag = True
if opt.ppnaccum: #plot total precipitation
print "Plotting Precipitation Accumulation for time: ", currtime
precipaccum()
flag = True
if opt.ppn: # plot current ppn
print "Plotting Precipitation for time: ", currtime
precip()
flag = True
if opt.convppn: # plot convective ppn
print "Plotting Convective Precipitation for time: ", currtime
convprecip()
flag = True
if opt.td or opt.rh: #plot dew point or RH
flag = True
if opt.td:
print "Plotting Dew Point for time: ", currtime
elif opt.rh:
print "Plotting RH for time: ", currtime
tdrh()
if opt.ua: #plot upper air charts
print "Plotting upper level chart for time: ", currtime
upperair()
flag = True
if opt.sfc: #plot surface chart. t2, wind and mslp
print "Plotting Surface Chart for time: ", currtime
surface()
flag = True
if opt.snow: #plot snow accumulation
print "Plotting Snow Accumulation for time: ", currtime
snowaccum()
flag = True
if opt.hail: #plot hail accumulation
print "Plotting Hail Accumulation for time: ", currtime
hailaccum()
flag = True
if opt.simdbz: #simulated reflectivity
print "Plotting Simulated Reflectivity for time: ", currtime
simudbz()
flag = True
if opt.compdbz: #composite reflectivity
print "Plotting Composite Reflectivity for time: ", currtime
compodbz()
flag = True
if opt.lcl: #plot LCL
print "Plotting LCL for time: ", currtime
lclhgt()
flag = True
if opt.thetae: #plot theta-e
print "Plotting Theta-e for time: ", currtime
thetaE()
flag= True
if opt.lr75: #plot h7-h5 lapse rates
print "Plotting H7-H5 lapse rates for time: ", currtime
h75lr()
flag = True
if opt.vort500: # plot 500mb absolute vorticity
print "Plotting 500mb absolute vorticity for time: ", currtime
absvort500()
flag = True
if opt.shear06:
print "Plotting 0-6km Shear for time: ", currtime
shr06()
flag = True
if opt.vv:
print "Plotting vertical velocity for time: ", currtime
vertvol()
flag = True
if opt.irtemp:
print "Plotting IR Brightness Temp for time: ", currtime
olr_to_temp()
flag = True
if opt.skewt:
print "Plotting Skew-t for time: ", currtime
pymeteo_skewt()
flag = True
if opt.getij:
print "Getting i, j for lat=",opt.slat, ', lon=',opt.slon
funcs.latlon_ij(opt.slat, opt.slon, xlat, xlong)
#print "A less accurate method:"
#funcs.latlon_ij2(opt.slat, opt.slon, xlat, xlong)
flag = True
sys.exit()
if opt.skewt2:
print "Plotting Skew-t for time: ", currtime
plot_skewt()
flag = True
if opt.uh25:
print "Plotting 2-5km Updraft Helicity for time: ", currtime
updraft_hel()
flag = True
if flag is False: # do this when no options given
print "Please provide options to plot. Use plotwrf.py --help"
print "QUITTING"
sys.exit()
#pass
#### END TIME LOOP ####
if opt.verbose: #verbose output
print "\n*VERBOSE OUTPUT*"
print "\nindir= ", indir
print "infile= ", filein
print "outdir=", outdir
print "Model initialisation time: ", init
print "Timestep: ", nc.variables['ITIMESTEP'][1]
print "Times in file: ", alltimes
print "west_east: ", x_dim
print "south_north: ", y_dim
print "Model dimentions (metres): ", width_meters, height_meters
print "dx, dy: ", dx, dy
print "Center lat: ", cen_lat
print "Center lon: ", cen_lon
print "Model top: ", nc.variables['P_TOP'][0]
print "Map projection: ", proj, '-' , projname
nc.close() # close netcdf file
| 45.590649 | 167 | 0.628037 |
f70d96bce35013ac957938e048f02f5425a2e2f2 | 569 | bzl | Python | example/third_party/org_eclipse_jgit.bzl | wix-playground/rules_maven_third_party | ff0b486df194779d7d8e6c9102cd12138e3305c3 | [
"Apache-2.0"
] | null | null | null | example/third_party/org_eclipse_jgit.bzl | wix-playground/rules_maven_third_party | ff0b486df194779d7d8e6c9102cd12138e3305c3 | [
"Apache-2.0"
] | null | null | null | example/third_party/org_eclipse_jgit.bzl | wix-playground/rules_maven_third_party | ff0b486df194779d7d8e6c9102cd12138e3305c3 | [
"Apache-2.0"
] | null | null | null | load("@rules_maven_third_party//:import_external.bzl", import_external = "import_external")
def dependencies():
import_external(
name = "org_eclipse_jgit_org_eclipse_jgit",
artifact = "org.eclipse.jgit:org.eclipse.jgit:5.11.0.202103091610-r",
artifact_sha256 = "b0f012105d67729a67c7fde546b6e89580f7ddc5bd73c6c7bae7084c50e36a37",
srcjar_sha256 = "23b4f2debe38b2e18cb925ada6639eb78cc029243060f8f8c080ba3e0e70ab71",
deps = [
"@com_googlecode_javaewah_JavaEWAH",
"@org_slf4j_slf4j_api",
],
)
| 40.642857 | 93 | 0.713533 |
f70dacbfa4317925c8e12fd040862ec22b3790b3 | 12,105 | py | Python | robinhoodbot/main.py | connorkerry/RobinhoodBot | 6a1e1733d900abfc00a8e6fff1cf48184af4edc3 | [
"MIT"
] | null | null | null | robinhoodbot/main.py | connorkerry/RobinhoodBot | 6a1e1733d900abfc00a8e6fff1cf48184af4edc3 | [
"MIT"
] | null | null | null | robinhoodbot/main.py | connorkerry/RobinhoodBot | 6a1e1733d900abfc00a8e6fff1cf48184af4edc3 | [
"MIT"
] | null | null | null | import robin_stocks as r
import pandas as pd
import numpy as np
import ta as ta
from pandas.plotting import register_matplotlib_converters
from ta import *
from misc import *
from tradingstats import *
#Log in to Robinhood
login = r.login('YOUR_EMAIL','YOUR_PASSWORD')
#Safe divide by zero division function
def safe_division(n, d):
return n / d if d else 0
def get_watchlist_symbols():
"""
Returns: the symbol for each stock in your watchlist as a list of strings
"""
my_list_names = []
symbols = []
for name in r.get_all_watchlists(info='name'):
my_list_names.append(name)
for name in my_list_names:
list = r.get_watchlist_by_name(name)
for item in list:
instrument_data = r.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
def get_portfolio_symbols():
"""
Returns: the symbol for each stock in your portfolio as a list of strings
"""
symbols = []
holdings_data = r.get_open_stock_positions()
for item in holdings_data:
if not item:
continue
instrument_data = r.get_instrument_by_url(item.get('instrument'))
symbol = instrument_data['symbol']
symbols.append(symbol)
return symbols
def get_position_creation_date(symbol, holdings_data):
"""Returns the time at which we bought a certain stock in our portfolio
Args:
symbol(str): Symbol of the stock that we are trying to figure out when it was bought
holdings_data(dict): dict returned by r.get_open_stock_positions()
Returns:
A string containing the date and time the stock was bought, or "Not found" otherwise
"""
instrument = r.get_instruments_by_symbols(symbol)
url = instrument[0].get('url')
for dict in holdings_data:
if(dict.get('instrument') == url):
return dict.get('created_at')
return "Not found"
def get_modified_holdings():
""" Retrieves the same dictionary as r.build_holdings, but includes data about
when the stock was purchased, which is useful for the read_trade_history() method
in tradingstats.py
Returns:
the same dict from r.build_holdings, but with an extra key-value pair for each
position you have, which is 'bought_at': (the time the stock was purchased)
"""
holdings = r.build_holdings()
holdings_data = r.get_open_stock_positions()
for symbol, dict in holdings.items():
bought_at = get_position_creation_date(symbol, holdings_data)
bought_at = str(pd.to_datetime(bought_at))
holdings[symbol].update({'bought_at': bought_at})
return holdings
def get_last_crossing(df, days, symbol="", direction=""):
"""Searches for a crossing between two indicators for a given stock
Args:
df(pandas.core.frame.DataFrame): Pandas dataframe with columns containing the stock's prices, both indicators, and the dates
days(int): Specifies the maximum number of days that the cross can occur by
symbol(str): Symbol of the stock we're querying. Optional, used for printing purposes
direction(str): "above" if we are searching for an upwards cross, "below" if we are searching for a downwaords cross. Optional, used for printing purposes
Returns:
1 if the short-term indicator crosses above the long-term one
0 if there is no cross between the indicators
-1 if the short-term indicator crosses below the long-term one
"""
prices = df.loc[:,"Price"]
shortTerm = df.loc[:,"Indicator1"]
LongTerm = df.loc[:,"Indicator2"]
dates = df.loc[:,"Dates"]
lastIndex = prices.size - 1
index = lastIndex
found = index
recentDiff = (shortTerm.at[index] - LongTerm.at[index]) >= 0
if((direction == "above" and not recentDiff) or (direction == "below" and recentDiff)):
return 0
index -= 1
while(index >= 0 and found == lastIndex and not np.isnan(shortTerm.at[index]) and not np.isnan(LongTerm.at[index]) \
and ((pd.Timestamp("now", tz='UTC') - dates.at[index]) <= pd.Timedelta(str(days) + " days"))):
if(recentDiff):
if((shortTerm.at[index] - LongTerm.at[index]) < 0):
found = index
else:
if((shortTerm.at[index] - LongTerm.at[index]) > 0):
found = index
index -= 1
if(found != lastIndex):
if((direction == "above" and recentDiff) or (direction == "below" and not recentDiff)):
print(symbol + ": Short SMA crossed" + (" ABOVE " if recentDiff else " BELOW ") + "Long SMA at " + str(dates.at[found]) \
+", which was " + str(pd.Timestamp("now", tz='UTC') - dates.at[found]) + " ago", ", price at cross: " + str(prices.at[found]) \
+ ", current price: " + str(prices.at[lastIndex]))
return (1 if recentDiff else -1)
else:
return 0
def five_year_check(stockTicker):
"""Figure out if a stock has risen or been created within the last five years.
Args:
stockTicker(str): Symbol of the stock we're querying
Returns:
True if the stock's current price is higher than it was five years ago, or the stock IPO'd within the last five years
False otherwise
"""
instrument = r.get_instruments_by_symbols(stockTicker)
list_date = instrument[0].get("list_date")
if ((pd.Timestamp("now") - pd.to_datetime(list_date)) < pd.Timedelta("5 Y")):
return True
fiveyear = r.get_historicals(stockTicker,span='5year',bounds='regular')
closingPrices = []
for item in fiveyear:
closingPrices.append(float(item['close_price']))
recent_price = closingPrices[len(closingPrices) - 1]
oldest_price = closingPrices[0]
return (recent_price > oldest_price)
def golden_cross(stockTicker, n1, n2, days, direction=""):
"""Determine if a golden/death cross has occured for a specified stock in the last X trading days
Args:
stockTicker(str): Symbol of the stock we're querying
n1(int): Specifies the short-term indicator as an X-day moving average.
n2(int): Specifies the long-term indicator as an X-day moving average.
(n1 should be smaller than n2 to produce meaningful results, e.g n1=50, n2=200)
days(int): Specifies the maximum number of days that the cross can occur by
direction(str): "above" if we are searching for an upwards cross, "below" if we are searching for a downwaords cross. Optional, used for printing purposes
Returns:
1 if the short-term indicator crosses above the long-term one
0 if there is no cross between the indicators
-1 if the short-term indicator crosses below the long-term one
False if direction == "above" and five_year_check(stockTicker) returns False, meaning that we're considering whether to
buy the stock but it hasn't risen overall in the last five years, suggesting it contains fundamental issues
"""
if(direction == "above" and not five_year_check(stockTicker)):
return False
history = r.get_historicals(stockTicker,span='year',bounds='regular')
closingPrices = []
dates = []
for item in history:
closingPrices.append(float(item['close_price']))
dates.append(item['begins_at'])
price = pd.Series(closingPrices)
dates = pd.Series(dates)
dates = pd.to_datetime(dates)
sma1 = ta.volatility.bollinger_mavg(price, n=int(n1), fillna=False)
sma2 = ta.volatility.bollinger_mavg(price, n=int(n2), fillna=False)
series = [price.rename("Price"), sma1.rename("Indicator1"), sma2.rename("Indicator2"), dates.rename("Dates")]
df = pd.concat(series, axis=1)
cross = get_last_crossing(df, days, symbol=stockTicker, direction=direction)
# if(cross):
# show_plot(price, sma1, sma2, dates, symbol=stockTicker, label1=str(n1)+" day SMA", label2=str(n2)+" day SMA")
return cross
def sell_holdings(symbol, holdings_data):
""" Place an order to sell all holdings of a stock.
Args:
symbol(str): Symbol of the stock we want to sell
holdings_data(dict): dict obtained from get_modified_holdings() method
"""
shares_owned = int(float(holdings_data[symbol].get("quantity")))
r.order_sell_market(symbol, shares_owned)
print("####### Selling " + str(shares_owned) + " shares of " + symbol + " #######")
def buy_holdings(potential_buys, profile_data, holdings_data):
""" Places orders to buy holdings of stocks. This method will try to order
an appropriate amount of shares such that your holdings of the stock will
roughly match the average for the rest of your portfoilio. If the share
price is too high considering the rest of your holdings and the amount of
buying power in your account, it will not order any shares.
Args:
potential_buys(list): List of strings, the strings are the symbols of stocks we want to buy
symbol(str): Symbol of the stock we want to sell
holdings_data(dict): dict obtained from r.build_holdings() or get_modified_holdings() method
"""
cash = float(profile_data.get('cash'))
portfolio_value = float(profile_data.get('equity')) - cash
ideal_position_size = (safe_division(portfolio_value, len(holdings_data))+cash/len(potential_buys))/(2 * len(potential_buys))
prices = r.get_latest_price(potential_buys)
for i in range(0, len(potential_buys)):
stock_price = float(prices[i])
if(ideal_position_size < stock_price < ideal_position_size*1.5):
num_shares = int(ideal_position_size*1.5/stock_price)
elif (stock_price < ideal_position_size):
num_shares = int(ideal_position_size/stock_price)
else:
print("####### Tried buying shares of " + potential_buys[i] + ", but not enough buying power to do so#######")
break
print("####### Buying " + str(num_shares) + " shares of " + potential_buys[i] + " #######")
r.order_buy_market(potential_buys[i], num_shares)
def scan_stocks():
""" The main method. Sells stocks in your portfolio if their 50 day moving average crosses
below the 200 day, and buys stocks in your watchlist if the opposite happens.
###############################################################################################
WARNING: Comment out the sell_holdings and buy_holdings lines if you don't actually want to execute the trade.
###############################################################################################
If you sell a stock, this updates tradehistory.txt with information about the position,
how much you've earned/lost, etc.
"""
print("----- Starting scan... -----\n")
register_matplotlib_converters()
watchlist_symbols = get_watchlist_symbols()
portfolio_symbols = get_portfolio_symbols()
holdings_data = get_modified_holdings()
potential_buys = []
sells = []
print("Current Portfolio: " + str(portfolio_symbols) + "\n")
print("Current Watchlist: " + str(watchlist_symbols) + "\n")
print("----- Scanning portfolio for stocks to sell -----\n")
for symbol in portfolio_symbols:
cross = golden_cross(symbol, n1=50, n2=200, days=30, direction="below")
if(cross == -1):
sell_holdings(symbol, holdings_data)
sells.append(symbol)
profile_data = r.build_user_profile()
print("\n----- Scanning watchlist for stocks to buy -----\n")
for symbol in watchlist_symbols:
if(symbol not in portfolio_symbols):
cross = golden_cross(symbol, n1=50, n2=200, days=10, direction="above")
if(cross == 1):
potential_buys.append(symbol)
if(len(potential_buys) > 0):
buy_holdings(potential_buys, profile_data, holdings_data)
if(len(sells) > 0):
update_trade_history(sells, holdings_data, "tradehistory.txt")
print("----- Scan over -----\n")
#execute the scan
scan_stocks()
| 45.852273 | 162 | 0.656258 |
f70dca3c878f2c608ee4decf93cecab1952362b2 | 27,393 | py | Python | tests/keras/layers/recurrent_test.py | mduranmustafa/keras | d4a14ee54728ac8ea6c5ffbf41f559662dcfba46 | [
"MIT"
] | 75 | 2018-08-03T01:10:36.000Z | 2022-02-25T05:08:39.000Z | tests/keras/layers/recurrent_test.py | coderclear/ConvGRU | c458024d5c379ef990f72b6f6b738301e1895cff | [
"MIT"
] | 9 | 2018-08-14T14:33:58.000Z | 2021-09-06T07:04:14.000Z | tests/keras/layers/recurrent_test.py | coderclear/ConvGRU | c458024d5c379ef990f72b6f6b738301e1895cff | [
"MIT"
] | 19 | 2018-08-11T20:44:42.000Z | 2021-12-01T00:41:52.000Z | import pytest
import numpy as np
from numpy.testing import assert_allclose
import keras
from keras.utils.test_utils import layer_test
from keras.utils.test_utils import keras_test
from keras.layers import recurrent
from keras.layers import embeddings
from keras.models import Sequential
from keras.models import Model
from keras.engine.topology import Input
from keras.layers.core import Masking
from keras import regularizers
from keras import backend as K
num_samples, timesteps, embedding_dim, units = 2, 5, 4, 3
embedding_num = 12
@keras_test
def rnn_test(f):
"""
All the recurrent layers share the same interface,
so we can run through them with a single function.
"""
f = keras_test(f)
return pytest.mark.parametrize('layer_class', [
recurrent.SimpleRNN,
recurrent.GRU,
recurrent.LSTM
])(f)
@rnn_test
def test_return_sequences(layer_class):
layer_test(layer_class,
kwargs={'units': units,
'return_sequences': True},
input_shape=(num_samples, timesteps, embedding_dim))
@rnn_test
def test_dynamic_behavior(layer_class):
layer = layer_class(units, input_shape=(None, embedding_dim))
model = Sequential()
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.random((num_samples, timesteps, embedding_dim))
y = np.random.random((num_samples, units))
model.train_on_batch(x, y)
@rnn_test
def test_stateful_invalid_use(layer_class):
layer = layer_class(units,
stateful=True,
batch_input_shape=(num_samples,
timesteps,
embedding_dim))
model = Sequential()
model.add(layer)
model.compile('sgd', 'mse')
x = np.random.random((num_samples * 2, timesteps, embedding_dim))
y = np.random.random((num_samples * 2, units))
with pytest.raises(ValueError):
model.fit(x, y)
with pytest.raises(ValueError):
model.predict(x, batch_size=num_samples + 1)
@rnn_test
@pytest.mark.skipif((K.backend() == 'cntk'),
reason='Not yet supported.')
def test_dropout(layer_class):
for unroll in [True, False]:
layer_test(layer_class,
kwargs={'units': units,
'dropout': 0.1,
'recurrent_dropout': 0.1,
'unroll': unroll},
input_shape=(num_samples, timesteps, embedding_dim))
# Test that dropout is applied during training
x = K.ones((num_samples, timesteps, embedding_dim))
layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5,
input_shape=(timesteps, embedding_dim))
y = layer(x)
assert y._uses_learning_phase
y = layer(x, training=True)
assert not getattr(y, '_uses_learning_phase')
# Test that dropout is not applied during testing
x = np.random.random((num_samples, timesteps, embedding_dim))
layer = layer_class(units, dropout=0.5, recurrent_dropout=0.5,
unroll=unroll,
input_shape=(timesteps, embedding_dim))
model = Sequential([layer])
assert model.uses_learning_phase
y1 = model.predict(x)
y2 = model.predict(x)
assert_allclose(y1, y2)
@rnn_test
def test_statefulness(layer_class):
model = Sequential()
model.add(embeddings.Embedding(embedding_num, embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(units, return_sequences=False,
stateful=True,
weights=None)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
out1 = model.predict(np.ones((num_samples, timesteps)))
assert(out1.shape == (num_samples, units))
# train once so that the states change
model.train_on_batch(np.ones((num_samples, timesteps)),
np.ones((num_samples, units)))
out2 = model.predict(np.ones((num_samples, timesteps)))
# if the state is not reset, output should be different
assert(out1.max() != out2.max())
# check that output changes after states are reset
# (even though the model itself didn't change)
layer.reset_states()
out3 = model.predict(np.ones((num_samples, timesteps)))
assert(out2.max() != out3.max())
# check that container-level reset_states() works
model.reset_states()
out4 = model.predict(np.ones((num_samples, timesteps)))
assert_allclose(out3, out4, atol=1e-5)
# check that the call to `predict` updated the states
out5 = model.predict(np.ones((num_samples, timesteps)))
assert(out4.max() != out5.max())
@rnn_test
def test_masking_correctness(layer_class):
# Check masking: output with left padding and right padding
# should be the same.
model = Sequential()
model.add(embeddings.Embedding(embedding_num, embedding_dim,
mask_zero=True,
input_length=timesteps,
batch_input_shape=(num_samples, timesteps)))
layer = layer_class(units, return_sequences=False)
model.add(layer)
model.compile(optimizer='sgd', loss='mse')
left_padded_input = np.ones((num_samples, timesteps))
left_padded_input[0, :1] = 0
left_padded_input[1, :2] = 0
out6 = model.predict(left_padded_input)
right_padded_input = np.ones((num_samples, timesteps))
right_padded_input[0, -1:] = 0
right_padded_input[1, -2:] = 0
out7 = model.predict(right_padded_input)
assert_allclose(out7, out6, atol=1e-5)
@rnn_test
def test_implementation_mode(layer_class):
for mode in [1, 2]:
# Without dropout
layer_test(layer_class,
kwargs={'units': units,
'implementation': mode},
input_shape=(num_samples, timesteps, embedding_dim))
# With dropout
layer_test(layer_class,
kwargs={'units': units,
'implementation': mode,
'dropout': 0.1,
'recurrent_dropout': 0.1},
input_shape=(num_samples, timesteps, embedding_dim))
# Without bias
layer_test(layer_class,
kwargs={'units': units,
'implementation': mode,
'use_bias': False},
input_shape=(num_samples, timesteps, embedding_dim))
@rnn_test
def test_regularizer(layer_class):
layer = layer_class(units, return_sequences=False, weights=None,
input_shape=(timesteps, embedding_dim),
kernel_regularizer=regularizers.l1(0.01),
recurrent_regularizer=regularizers.l1(0.01),
bias_regularizer='l2')
layer.build((None, None, embedding_dim))
assert len(layer.losses) == 3
assert len(layer.cell.losses) == 3
layer = layer_class(units, return_sequences=False, weights=None,
input_shape=(timesteps, embedding_dim),
activity_regularizer='l2')
assert layer.activity_regularizer
x = K.variable(np.ones((num_samples, timesteps, embedding_dim)))
layer(x)
assert len(layer.cell.get_losses_for(x)) == 0
assert len(layer.get_losses_for(x)) == 1
@rnn_test
def test_trainability(layer_class):
layer = layer_class(units)
layer.build((None, None, embedding_dim))
assert len(layer.weights) == 3
assert len(layer.trainable_weights) == 3
assert len(layer.non_trainable_weights) == 0
layer.trainable = False
assert len(layer.weights) == 3
assert len(layer.trainable_weights) == 0
assert len(layer.non_trainable_weights) == 3
layer.trainable = True
assert len(layer.weights) == 3
assert len(layer.trainable_weights) == 3
assert len(layer.non_trainable_weights) == 0
@keras_test
def test_masking_layer():
''' This test based on a previously failing issue here:
https://github.com/fchollet/keras/issues/1567
'''
inputs = np.random.random((6, 3, 4))
targets = np.abs(np.random.random((6, 3, 5)))
targets /= targets.sum(axis=-1, keepdims=True)
model = Sequential()
model.add(Masking(input_shape=(3, 4)))
model.add(recurrent.SimpleRNN(units=5, return_sequences=True, unroll=False))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(inputs, targets, epochs=1, batch_size=100, verbose=1)
model = Sequential()
model.add(Masking(input_shape=(3, 4)))
model.add(recurrent.SimpleRNN(units=5, return_sequences=True, unroll=True))
model.compile(loss='categorical_crossentropy', optimizer='adam')
model.fit(inputs, targets, epochs=1, batch_size=100, verbose=1)
@rnn_test
def test_from_config(layer_class):
stateful_flags = (False, True)
for stateful in stateful_flags:
l1 = layer_class(units=1, stateful=stateful)
l2 = layer_class.from_config(l1.get_config())
assert l1.get_config() == l2.get_config()
@rnn_test
def test_specify_initial_state_keras_tensor(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
# Test with Keras tensor
inputs = Input((timesteps, embedding_dim))
initial_state = [Input((units,)) for _ in range(num_states)]
layer = layer_class(units)
if len(initial_state) == 1:
output = layer(inputs, initial_state=initial_state[0])
else:
output = layer(inputs, initial_state=initial_state)
assert initial_state[0] in layer.inbound_nodes[0].input_tensors
model = Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.fit([inputs] + initial_state, targets)
@rnn_test
def test_specify_initial_state_non_keras_tensor(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
# Test with non-Keras tensor
inputs = Input((timesteps, embedding_dim))
initial_state = [K.random_normal_variable((num_samples, units), 0, 1)
for _ in range(num_states)]
layer = layer_class(units)
output = layer(inputs, initial_state=initial_state)
model = Model(inputs, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
targets = np.random.random((num_samples, units))
model.fit(inputs, targets)
@rnn_test
def test_reset_states_with_values(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
layer = layer_class(units, stateful=True)
layer.build((num_samples, timesteps, embedding_dim))
layer.reset_states()
assert len(layer.states) == num_states
assert layer.states[0] is not None
np.testing.assert_allclose(K.eval(layer.states[0]),
np.zeros(K.int_shape(layer.states[0])),
atol=1e-4)
state_shapes = [K.int_shape(state) for state in layer.states]
values = [np.ones(shape) for shape in state_shapes]
if len(values) == 1:
values = values[0]
layer.reset_states(values)
np.testing.assert_allclose(K.eval(layer.states[0]),
np.ones(K.int_shape(layer.states[0])),
atol=1e-4)
# Test fit with invalid data
with pytest.raises(ValueError):
layer.reset_states([1] * (len(layer.states) + 1))
@rnn_test
def test_initial_states_as_other_inputs(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
# Test with Keras tensor
main_inputs = Input((timesteps, embedding_dim))
initial_state = [Input((units,)) for _ in range(num_states)]
inputs = [main_inputs] + initial_state
layer = layer_class(units)
output = layer(inputs)
assert initial_state[0] in layer.inbound_nodes[0].input_tensors
model = Model(inputs, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
main_inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.train_on_batch([main_inputs] + initial_state, targets)
@rnn_test
def test_specify_state_with_masking(layer_class):
''' This test based on a previously failing issue here:
https://github.com/fchollet/keras/issues/1567
'''
num_states = 2 if layer_class is recurrent.LSTM else 1
inputs = Input((timesteps, embedding_dim))
_ = Masking()(inputs)
initial_state = [Input((units,)) for _ in range(num_states)]
output = layer_class(units)(inputs, initial_state=initial_state)
model = Model([inputs] + initial_state, output)
model.compile(loss='categorical_crossentropy', optimizer='adam')
inputs = np.random.random((num_samples, timesteps, embedding_dim))
initial_state = [np.random.random((num_samples, units))
for _ in range(num_states)]
targets = np.random.random((num_samples, units))
model.fit([inputs] + initial_state, targets)
@rnn_test
def test_return_state(layer_class):
num_states = 2 if layer_class is recurrent.LSTM else 1
inputs = Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = layer_class(units, return_state=True, stateful=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
assert len(state) == num_states
model = Model(inputs, state[0])
inputs = np.random.random((num_samples, timesteps, embedding_dim))
state = model.predict(inputs)
np.testing.assert_allclose(K.eval(layer.states[0]), state, atol=1e-4)
@rnn_test
def test_state_reuse(layer_class):
inputs = Input(batch_shape=(num_samples, timesteps, embedding_dim))
layer = layer_class(units, return_state=True, return_sequences=True)
outputs = layer(inputs)
output, state = outputs[0], outputs[1:]
output = layer_class(units)(output, initial_state=state)
model = Model(inputs, output)
inputs = np.random.random((num_samples, timesteps, embedding_dim))
outputs = model.predict(inputs)
@keras_test
def test_minimal_rnn_cell_non_layer():
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = units
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output = states[0]
output = keras.backend.dot(inputs, self.kernel) + prev_output
return output, [output]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = recurrent.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(32, 8),
MinimalRNNCell(32, 32)]
layer = recurrent.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
@keras_test
def test_minimal_rnn_cell_non_layer_multiple_states():
class MinimalRNNCell(object):
def __init__(self, units, input_dim):
self.units = units
self.state_size = (units, units)
self.kernel = keras.backend.variable(
np.random.random((input_dim, units)))
def call(self, inputs, states):
prev_output_1 = states[0]
prev_output_2 = states[1]
output = keras.backend.dot(inputs, self.kernel)
output += prev_output_1
output -= prev_output_2
return output, [output * 2, output * 3]
# Basic test case.
cell = MinimalRNNCell(32, 5)
x = keras.Input((None, 5))
layer = recurrent.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacking.
cells = [MinimalRNNCell(8, 5),
MinimalRNNCell(16, 8),
MinimalRNNCell(32, 16)]
layer = recurrent.RNN(cells)
assert layer.cell.state_size == (32, 32, 16, 16, 8, 8)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
@keras_test
def test_minimal_rnn_cell_layer():
class MinimalRNNCell(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(MinimalRNNCell, self).__init__(**kwargs)
def build(self, input_shape):
self.kernel = self.add_weight(shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.built = True
def call(self, inputs, states):
prev_output = states[0]
h = keras.backend.dot(inputs, self.kernel)
output = h + keras.backend.dot(prev_output, self.recurrent_kernel)
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(MinimalRNNCell, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
cell = MinimalRNNCell(32)
layer = recurrent.RNN(cell)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = recurrent.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
assert_allclose(y_np, y_np_2, atol=1e-4)
# Test stacking.
cells = [MinimalRNNCell(8),
MinimalRNNCell(12),
MinimalRNNCell(32)]
layer = recurrent.RNN(cells)
y = layer(x)
model = keras.models.Model(x, y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(np.zeros((6, 5, 5)), np.zeros((6, 32)))
# Test stacked RNN serialization.
x_np = np.random.random((6, 5, 5))
y_np = model.predict(x_np)
weights = model.get_weights()
config = layer.get_config()
with keras.utils.CustomObjectScope({'MinimalRNNCell': MinimalRNNCell}):
layer = recurrent.RNN.from_config(config)
y = layer(x)
model = keras.models.Model(x, y)
model.set_weights(weights)
y_np_2 = model.predict(x_np)
assert_allclose(y_np, y_np_2, atol=1e-4)
@keras_test
def test_stacked_rnn_attributes():
cells = [recurrent.LSTMCell(3),
recurrent.LSTMCell(3, kernel_regularizer='l2')]
layer = recurrent.RNN(cells)
layer.build((None, None, 5))
# Test regularization losses
assert len(layer.losses) == 1
# Test weights
assert len(layer.trainable_weights) == 6
cells[0].trainable = False
assert len(layer.trainable_weights) == 3
assert len(layer.non_trainable_weights) == 3
# Test `get_losses_for`
x = keras.Input((None, 5))
y = K.sum(x)
cells[0].add_loss(y, inputs=x)
assert layer.get_losses_for(x) == [y]
@rnn_test
def test_batch_size_equal_one(layer_class):
inputs = Input(batch_shape=(1, timesteps, embedding_dim))
layer = layer_class(units)
outputs = layer(inputs)
model = Model(inputs, outputs)
model.compile('sgd', 'mse')
x = np.random.random((1, timesteps, embedding_dim))
y = np.random.random((1, units))
model.train_on_batch(x, y)
def test_rnn_cell_with_constants_layer():
class RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise TypeError('expects constants shape')
[input_shape, constant_shape] = input_shape
# will (and should) raise if more than one constant passed
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(constant_shape[-1], self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
cell = RNNCellWithConstants(32)
layer = recurrent.RNN(cell)
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = recurrent.RNN.from_config(config.copy())
y = layer(x, constants=c)
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, c_np])
assert_allclose(y_np, y_np_2, atol=1e-4)
# test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = recurrent.RNN.from_config(config.copy())
y = layer([x, c])
model = keras.models.Model([x, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, c_np])
assert_allclose(y_np, y_np_3, atol=1e-4)
def test_rnn_cell_with_constants_layer_passing_initial_state():
class RNNCellWithConstants(keras.layers.Layer):
def __init__(self, units, **kwargs):
self.units = units
self.state_size = units
super(RNNCellWithConstants, self).__init__(**kwargs)
def build(self, input_shape):
if not isinstance(input_shape, list):
raise TypeError('expects constants shape')
[input_shape, constant_shape] = input_shape
# will (and should) raise if more than one constant passed
self.input_kernel = self.add_weight(
shape=(input_shape[-1], self.units),
initializer='uniform',
name='kernel')
self.recurrent_kernel = self.add_weight(
shape=(self.units, self.units),
initializer='uniform',
name='recurrent_kernel')
self.constant_kernel = self.add_weight(
shape=(constant_shape[-1], self.units),
initializer='uniform',
name='constant_kernel')
self.built = True
def call(self, inputs, states, constants):
[prev_output] = states
[constant] = constants
h_input = keras.backend.dot(inputs, self.input_kernel)
h_state = keras.backend.dot(prev_output, self.recurrent_kernel)
h_const = keras.backend.dot(constant, self.constant_kernel)
output = h_input + h_state + h_const
return output, [output]
def get_config(self):
config = {'units': self.units}
base_config = super(RNNCellWithConstants, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
# Test basic case.
x = keras.Input((None, 5))
c = keras.Input((3,))
s = keras.Input((32,))
cell = RNNCellWithConstants(32)
layer = recurrent.RNN(cell)
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.compile(optimizer='rmsprop', loss='mse')
model.train_on_batch(
[np.zeros((6, 5, 5)), np.zeros((6, 32)), np.zeros((6, 3))],
np.zeros((6, 32))
)
# Test basic case serialization.
x_np = np.random.random((6, 5, 5))
s_np = np.random.random((6, 32))
c_np = np.random.random((6, 3))
y_np = model.predict([x_np, s_np, c_np])
weights = model.get_weights()
config = layer.get_config()
custom_objects = {'RNNCellWithConstants': RNNCellWithConstants}
with keras.utils.CustomObjectScope(custom_objects):
layer = recurrent.RNN.from_config(config.copy())
y = layer(x, initial_state=s, constants=c)
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_2 = model.predict([x_np, s_np, c_np])
assert_allclose(y_np, y_np_2, atol=1e-4)
# verify that state is used
y_np_2_different_s = model.predict([x_np, s_np + 10., c_np])
with pytest.raises(AssertionError):
assert_allclose(y_np, y_np_2_different_s, atol=1e-4)
# test flat list inputs
with keras.utils.CustomObjectScope(custom_objects):
layer = recurrent.RNN.from_config(config.copy())
y = layer([x, s, c])
model = keras.models.Model([x, s, c], y)
model.set_weights(weights)
y_np_3 = model.predict([x_np, s_np, c_np])
assert_allclose(y_np, y_np_3, atol=1e-4)
if __name__ == '__main__':
pytest.main([__file__])
| 35.807843 | 80 | 0.631694 |
f70ddadd8c534ce341100c123ca2bae91c5488da | 3,605 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/deploymentscripts/v2019_10_01_preview/aio/_configuration.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/deploymentscripts/v2019_10_01_preview/aio/_configuration.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/deploymentscripts/v2019_10_01_preview/aio/_configuration.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy
from .._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials_async import AsyncTokenCredential
class DeploymentScriptsClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for DeploymentScriptsClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials_async.AsyncTokenCredential
:param subscription_id: Subscription Id which forms part of the URI for every service call.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2019-10-01-preview". Note that overriding
this default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "AsyncTokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(DeploymentScriptsClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2019-10-01-preview") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs: Any
) -> None:
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.AsyncRetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.AsyncRedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = AsyncARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| 49.383562 | 130 | 0.705687 |
f70dddfd53fd99825c38c174683fd5bfb96c6f8f | 15,270 | py | Python | nets/block.py | tarepan/mutated_DVC | 7fbbf4754285944387ec5d5108ed5f3d473d4f81 | [
"MIT"
] | null | null | null | nets/block.py | tarepan/mutated_DVC | 7fbbf4754285944387ec5d5108ed5f3d473d4f81 | [
"MIT"
] | null | null | null | nets/block.py | tarepan/mutated_DVC | 7fbbf4754285944387ec5d5108ed5f3d473d4f81 | [
"MIT"
] | 1 | 2019-06-05T16:03:32.000Z | 2019-06-05T16:03:32.000Z | import math
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from .sn_convolution_2d import SNConvolution2D, SNDeconvolution2D
from .sn_linear import SNLinear
def _upsample(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, 2, outsize=(h * 2, w * 2))
def _downsample(x):
return F.average_pooling_2d(x, 2)
def upsample_conv(x, conv):
return conv(_upsample(x))
def _upsample_frq(x):
h, w = x.shape[2:]
return F.unpooling_2d(x, (1,2), outsize=(h, w * 2))
def _downsample_frq(x):
return F.average_pooling_2d(x, (1,2))
def upsample_conv_frq(x, conv):
return conv(_upsample_frq(x))
class ResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, ksize=3, pad=1, activation=F.leaky_relu, mode='none', bn=False, dr=None):
super(ResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.mode = _downsample if mode == 'down' else _upsample if mode == 'up' else None
self.learnable_sc = in_channels != out_channels
self.dr = dr
self.bn = bn
with self.init_scope():
self.c1 = L.Convolution2D(in_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
self.c2 = L.Convolution2D(out_channels, out_channels, ksize=ksize, pad=pad, initialW=initializer, nobias=bn)
if bn:
self.b1 = L.BatchNormalization(out_channels)
self.b2 = L.BatchNormalization(out_channels)
if self.learnable_sc:
self.c_sc = L.Convolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.c1(h)
if self.bn:
h = self.b1(h)
if self.activation:
h = self.activation(h)
if self.mode:
h = self.mode(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c2(h)
if self.bn:
h = self.b2(h)
if self.activation:
h = self.activation(h)
return h
def shortcut(self, x):
if self.mode:
x = self.mode(x)
if self.learnable_sc:
x = self.c_sc(x)
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class ConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(ConvBlock, self).__init__()
# initializer = chainer.initializers.GlorotUniform()
initializer = chainer.initializers.HeUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = L.Deconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = L.Convolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
elif mode == 'pad':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=2, initialW=initializer, nobias=bn)
elif mode == 'trim':
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=0, initialW=initializer, nobias=bn)
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class CoPSBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, bn=True):
super(CoPSBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.bn = bn
with self.init_scope():
self.ps = L.Convolution2D(in_channels, in_channels*4, ksize=1, stride=1, initialW=initializer)
self.c = L.Convolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer)
if bn:
self.b = L.BatchNormalization(out_channels)
def pixel_shuffle(self, x):
out = self.ps(x)
b = out.shape[0]
c = out.shape[1]
h = out.shape[2]
w = out.shape[3]
out = F.reshape(out, (b, 2, 2, c//4, h, w))
out = F.transpose(out, (0, 3, 4, 1, 5, 2))
out = F.reshape(out, (b, c//4, h*2, w*2))
return out
def __call__(self, h):
h = self.pixel_shuffle(h)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNResBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, sample='none', dr=None):
super(SNResBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
initializer_sc = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.sample = _downsample if sample == 'down' else _upsample if sample == 'up' else None
self.learnable_sc = in_channels != out_channels or sample == 'down' or sample == 'up'
with self.init_scope():
self.c1 = SNConvolution2D(in_channels, out_channels, ksize=3, pad=1, initialW=initializer)
self.c2 = SNConvolution2D(out_channels, out_channels, ksize=3, pad=1, initialW=initializer)
if self.learnable_sc:
self.c_sc = SNConvolution2D(in_channels, out_channels, ksize=1, pad=0, initialW=initializer_sc)
def residual(self, x):
h = x
h = self.activation(h)
h = self.c1(h)
if self.sample:
h = self.sample(h)
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.activation(h)
h = self.c2(h)
return h
def shortcut(self, x):
if self.learnable_sc:
x = self.c_sc(x)
if self.sample:
return self.sample(x)
else:
return x
else:
return x
def __call__(self, x):
return self.residual(x) + self.shortcut(x)
class SNConvBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, mode='none', activation=F.leaky_relu, bn=False, dr=None):
super(SNConvBlock, self).__init__()
# initializer = chainer.initializers.GlorotUniform()
initializer = chainer.initializers.HeUniform()
self.activation = activation
self.bn = bn
self.dr = dr
with self.init_scope():
if mode == 'none':
self.c = SNConvolution2D(in_channels, out_channels, ksize=3, stride=1, pad=1, initialW=initializer, nobias=bn)
elif mode == 'none-7':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(7,7), stride=1, pad=(3,3), initialW=initializer, nobias=bn)
elif mode == 'down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'up':
self.c = SNDeconvolution2D(in_channels, out_channels, ksize=4, stride=2, pad=1, initialW=initializer, nobias=bn)
elif mode == 'full-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=4, stride=1, pad=0, initialW=initializer, nobias=bn)
elif mode == 'frq':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
elif mode == 'frq-down':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_downsample(x))
elif mode == 'frq-up':
self.c = SNConvolution2D(in_channels, out_channels, ksize=(1,9), stride=1, pad=(0,4), initialW=initializer, nobias=bn)
self.activation = lambda x: activation(_upsample(x))
else:
raise Exception('mode is missing')
if bn:
self.b = L.BatchNormalization(out_channels)
def __call__(self, h):
if self.dr:
with chainer.using_config('train', True):
h = F.dropout(h, self.dr)
h = self.c(h)
if self.bn:
h = self.b(h)
if self.activation:
h = self.activation(h)
return h
class SNLinearBlock(chainer.Chain):
def __init__(self, in_channels, out_channels, activation=F.leaky_relu, dr=None):
super(SNLinearBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
if type(out_channels) is tuple:
self.out_shape = (-1,)+out_channels
else:
self.out_shape = None
with self.init_scope():
self.l = SNLinear(in_channels, np.prod(out_channels), initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = self.l(x)
x = self.activation(x)
if self.out_shape:
x = F.reshape(x, self.out_shape)
return x
class SNMDBlock(chainer.Chain):
def __init__(self, in_channels, in_size=4, B=100, C=5, gap=True, dr=None):
super(SNMDBlock, self).__init__()
# initializer = chainer.initializers.GlorotUniform()
initializer = chainer.initializers.HeUniform()
self.B = B
self.C = C
self.dr = dr
self.gap = gap
if gap:
in_size = 1
if type(in_size) is int:
in_size = (in_size, in_size)
with self.init_scope():
self.l = SNLinear(in_size[0] * in_size[1] * in_channels + B, 1, initialW=initializer)
self.md = SNLinear(in_size[0] * in_size[1] * in_channels, B * C, initialW=initializer)
def __call__(self, x):
if self.dr:
with chainer.using_config('train', True):
x = F.dropout(x, self.dr)
if self.gap:
x = F.sum(x, axis=(2,3))
N = x.shape[0]
#Below code copyed from https://github.com/pfnet-research/chainer-gan-lib/blob/master/minibatch_discrimination/net.py
feature = F.reshape(F.leaky_relu(x), (N, -1))
m = F.reshape(self.md(feature), (N, self.B * self.C, 1))
m0 = F.broadcast_to(m, (N, self.B * self.C, N))
m1 = F.transpose(m0, (2, 1, 0))
d = F.absolute(F.reshape(m0 - m1, (N, self.B, self.C, N)))
d = F.sum(F.exp(-F.sum(d, axis=2)), axis=2) - 1
h = F.concat([feature, d])
h = self.l(h)
return h
class SNL1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNL1DBlock, self).__init__()
initializer = chainer.initializers.GlorotUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = SNLinear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class L1DBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(L1DBlock, self).__init__()
# initializer = chainer.initializers.GlorotUniform()
initializer = chainer.initializers.HeUniform()
self.activation = activation
self.dr = dr
self.out_ch = out_ch
with self.init_scope():
self.l = L.Linear(in_ch*width, out_ch*width, initialW=initializer)
def __call__(self, x):
if self.dr:
x = F.dropout(x, self.dr)
x = F.transpose(x, (0, 2, 1, 3))
out_shape = list(x.shape)
x = F.reshape(x, (-1, x.shape[2]*x.shape[3]))
x = self.l(x)
x = self.activation(x)
out_shape[2] = self.out_ch
x = F.reshape(x, out_shape)
x = F.transpose(x, (0, 2, 1, 3))
return x
class CLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, liner_out_ch=1, dr=None):
super(CLBlock, self).__init__()
self.dr = dr
if out_ch - liner_out_ch <= 0:
raise Exception('out_ch <= liner_out_ch!')
with self.init_scope():
self.c = ConvBlock(in_ch, out_ch-liner_out_ch, activation=activation)
self.l = L1DBlock(in_ch, liner_out_ch, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
class SNCLBlock(chainer.Chain):
def __init__(self, in_ch, out_ch, width, activation=F.leaky_relu, dr=None):
super(SNCLBlock, self).__init__()
self.dr = dr
with self.init_scope():
self.c = SNConvBlock(in_ch, out_ch-1, activation=activation)
self.l = SNL1DBlock(in_ch, 1, width, activation)
def __call__(self, x):
h = x
if self.dr:
h = F.dropout(h, self.dr)
h1 = self.c(h)
h2 = self.l(h)
h = F.concat([h1,h2])
return h
| 40.07874 | 134 | 0.583628 |
f70dfdbde3e077aab6bf814a03c39fed976f228e | 463 | py | Python | examples/aditi/aniket/sister.py | FlaskAio/navycut | 40f378f1710a26645df8d726c4d1caf33097da50 | [
"MIT"
] | 4 | 2021-09-22T09:23:04.000Z | 2022-03-05T05:58:46.000Z | examples/aditi/aniket/sister.py | FlaskAio/navycut | 40f378f1710a26645df8d726c4d1caf33097da50 | [
"MIT"
] | 21 | 2021-09-27T03:19:21.000Z | 2022-03-31T03:20:59.000Z | examples/aditi/aniket/sister.py | FlaskAio/navycut | 40f378f1710a26645df8d726c4d1caf33097da50 | [
"MIT"
] | null | null | null | """
Do not change anything if you dont have enough knowledge
how to handle it, otherwise it may mess the server.
"""
from navycut.core import AppSister
from navycut.utils import path
__basedir__ = path.abspath(__file__).parent
class AniketSister(AppSister):
name = "aniket"
template_folder = __basedir__ / "templates"
static_folder = __basedir__ / "static"
static_url_path = "/static"
url_prefix = "/aniket"
import_app_feature = True | 24.368421 | 57 | 0.732181 |
f70e0b2708b01792a275ee48480039747794c660 | 3,377 | py | Python | predict_functions.py | xXEminenTXx/ImageClassifier | e0e63e12108b523270ea7d615afcbfc696b07996 | [
"MIT"
] | null | null | null | predict_functions.py | xXEminenTXx/ImageClassifier | e0e63e12108b523270ea7d615afcbfc696b07996 | [
"MIT"
] | null | null | null | predict_functions.py | xXEminenTXx/ImageClassifier | e0e63e12108b523270ea7d615afcbfc696b07996 | [
"MIT"
] | null | null | null | # python imports
import numpy as np
from PIL import Image
import torch
from torch import nn, optim
import torch.nn.functional as F
from torchvision import datasets, transforms, models
from collections import OrderedDict
from sys import exit
# File containing all of the functions used in the predict program
def load_checkpoint(filepath):
checkpoint = torch.load(filepath)
if checkpoint["arch"] == 'VGG':
model = models.vgg16(pretrained=True)
elif checkpoint["arch"] == 'Densenet':
model = models.densenet121(pretrained=True)
else:
print("Unsupported arch used in checkpoint")
exit(1)
for param in model.parameters():
param.requires_grad = False
model.class_to_idx = checkpoint['class_to_idx']
# Load classifier from checkpoint
classifier = checkpoint['classifier']
model.classifier = classifier
model.load_state_dict(checkpoint['model_state_dict'])
return model
def process_image(image_path):
''' Scales, crops, and normalizes a PIL image for a PyTorch model,
returns an Numpy array
'''
# Process a PIL image for use in a PyTorch model
pil_image = Image.open(image_path)
# Resize
if pil_image.size[0] > pil_image.size[1]:
pil_image.thumbnail((5000, 256))
else:
pil_image.thumbnail((256, 5000))
# Crop
left_margin = (pil_image.width-224)/2
bottom_margin = (pil_image.height-224)/2
right_margin = left_margin + 224
top_margin = bottom_margin + 224
pil_image = pil_image.crop((left_margin, bottom_margin, right_margin, top_margin))
# Normalize
np_image = np.array(pil_image)/255
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
np_image = (np_image - mean) / std
# PyTorch expects the color channel to be the first dimension but it's the third dimension in the PIL image and Numpy array
# Color channel needs to be first; retain the order of the other two dimensions.
np_image = np_image.transpose((2, 0, 1))
return np_image
def predict(image_path, model, topk, gpu):
''' Predict the class (or classes) of an image using a trained deep learning model.
'''
image = process_image(image_path)
if gpu:
model.to('cuda')
image = torch.from_numpy(image).type(torch.cuda.FloatTensor)
else:
model.to('cpu')
image = torch.from_numpy(image).type(torch.FloatTensor)
# Returns a new tensor with a dimension of size one inserted at the specified position.
image = image.unsqueeze(0)
output = model.forward(image)
probabilities = torch.exp(output)
# Probabilities and the indices of those probabilities corresponding to the classes
top_probabilities, top_indices = probabilities.topk(topk)
# Convert to lists
top_probabilities = top_probabilities.detach().type(torch.FloatTensor).numpy().tolist()[0]
top_indices = top_indices.detach().type(torch.FloatTensor).numpy().tolist()[0]
# Convert topk_indices to the actual class labels using class_to_idx
# Invert the dictionary so you get a mapping from index to class.
idx_to_class = {value: key for key, value in model.class_to_idx.items()}
#print(idx_to_class)
top_classes = [idx_to_class[index] for index in top_indices]
return top_probabilities, top_classes
| 30.423423 | 127 | 0.695292 |
f70e72a99e7f795d6d8542bc5c90c038e0e7a260 | 2,287 | py | Python | python/app/plugins/http/Spring/CVE_2017_8046.py | taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | [
"MIT"
] | 351 | 2020-02-26T05:23:26.000Z | 2022-03-26T12:39:19.000Z | python/app/plugins/http/Spring/CVE_2017_8046.py | taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | [
"MIT"
] | 15 | 2020-03-26T07:31:49.000Z | 2022-03-09T02:12:17.000Z | python/app/plugins/http/Spring/CVE_2017_8046.py | taomujian/linbing | fe772a58f41e3b046b51a866bdb7e4655abaf51a | [
"MIT"
] | 99 | 2020-02-28T07:30:46.000Z | 2022-03-16T16:41:09.000Z | #!/usr/bin/env python3
import json
from app.lib.utils.request import request
from app.lib.utils.common import get_useragent
class CVE_2017_8046_BaseVerify:
def __init__(self, url):
self.info = {
'name': 'CVE-2017-8046漏洞',
'description': 'CVE-2017-8046漏洞可执行任意命令,执行的命令:/usr/bin/touch ./test.jsp,利用小葵转ascii转换为47,117,115,114,47,98,105,110,47,116,111,117,99,104,32,46,47,116,101,115,116,46,106,115,112,影响范围为: Spring Data REST versions prior to 2.6.9 (Ingalls SR9), versions prior to 3.0.1 (Kay SR1)',
'date': '2017-04-21',
'exptype': 'check',
'type': 'RCE'
}
self.url = url
if not self.url.startswith("http") and not self.url.startswith("https"):
self.url = "http://" + self.url
self.headers1 = {
"User-Agent": get_useragent(),
"Content-Type": "application/json",
"Cache-Control": "no-cache"
}
self.headers2 = {
"User-Agent": get_useragent(),
"Content-Type": "application/json-patch+json",
"Cache-Control": "no-cache"
}
self.data1 = {
"firstName": "VulApps",
"lastName": "VulApps"
}
self.data2 = [{ "op": "replace", "path": "T(java.lang.Runtime).getRuntime().exec(new java.lang.String(new byte[]{47,117,115,114,47,98,105,110,47,116,111,117,99,104,32,46,47,116,101,115,116,46,106,115,112}))/lastName", "value": "vulapps-demo" }]
def check(self):
"""
检测是否存在漏洞
:param:
:return bool True or False: 是否存在漏洞
"""
try:
response1 = request.post(self.url + '/customers', headers = self.headers1, data = json.dumps(self.data1))
response2 = request.patch(self.url + '/customers/1', headers = self.headers2, data = json.dumps(self.data2))
content2 = response2.text
if 'maybe not public' in content2:
return True
else:
return False
except Exception as e:
print(e)
return False
finally:
pass
if __name__ == '__main__':
CVE_2017_8046 = CVE_2017_8046_BaseVerify('http://192.168.30.242:8086')
CVE_2017_8046.check() | 37.491803 | 285 | 0.561434 |
f70e9c0b8bac3d670748990df3ff0e02f0a7f8ad | 15,619 | py | Python | zerver/lib/cache.py | dehnert/zulip | f5935e81c7cf2f11ff4ccfcd31d2a1061b8d7ff5 | [
"Apache-2.0"
] | null | null | null | zerver/lib/cache.py | dehnert/zulip | f5935e81c7cf2f11ff4ccfcd31d2a1061b8d7ff5 | [
"Apache-2.0"
] | null | null | null | zerver/lib/cache.py | dehnert/zulip | f5935e81c7cf2f11ff4ccfcd31d2a1061b8d7ff5 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
from __future__ import print_function
from functools import wraps
from django.core.cache import cache as djcache
from django.core.cache import caches
from django.conf import settings
from django.db.models import Q
from django.core.cache.backends.base import BaseCache
from typing import Any, Callable, Iterable, Optional, Union, TypeVar
from zerver.lib.utils import statsd, statsd_key, make_safe_digest
import subprocess
import time
import base64
import random
import sys
import os
import os.path
import hashlib
import six
from six import text_type
if False:
from zerver.models import UserProfile, Realm, Message
# These modules have to be imported for type annotations but
# they cannot be imported at runtime due to cyclic dependency.
FuncT = TypeVar('FuncT', bound=Callable[..., Any])
remote_cache_time_start = 0.0
remote_cache_total_time = 0.0
remote_cache_total_requests = 0
def get_remote_cache_time():
# type: () -> float
return remote_cache_total_time
def get_remote_cache_requests():
# type: () -> int
return remote_cache_total_requests
def remote_cache_stats_start():
# type: () -> None
global remote_cache_time_start
remote_cache_time_start = time.time()
def remote_cache_stats_finish():
# type: () -> None
global remote_cache_total_time
global remote_cache_total_requests
global remote_cache_time_start
remote_cache_total_requests += 1
remote_cache_total_time += (time.time() - remote_cache_time_start)
def get_or_create_key_prefix():
# type: () -> text_type
if settings.TEST_SUITE:
# This sets the prefix mostly for the benefit of the JS tests.
# The Python tests overwrite KEY_PREFIX on each test.
return u'test_suite:%s:' % (text_type(os.getpid()),)
# directory `var` should exist in production
subprocess.check_call(["mkdir", "-p", os.path.join(settings.DEPLOY_ROOT, "var")])
filename = os.path.join(settings.DEPLOY_ROOT, "var", "remote_cache_prefix")
try:
fd = os.open(filename, os.O_CREAT | os.O_EXCL | os.O_RDWR, 0o444)
random_hash = hashlib.sha256(text_type(random.getrandbits(256)).encode('utf-8')).digest()
prefix = base64.b16encode(random_hash)[:32].decode('utf-8').lower() + ':'
# This does close the underlying file
with os.fdopen(fd, 'w') as f:
f.write(prefix + "\n")
except OSError:
# The file already exists
tries = 1
while tries < 10:
with open(filename, 'r') as f:
prefix = f.readline()[:-1]
if len(prefix) == 33:
break
tries += 1
prefix = ''
time.sleep(0.5)
if not prefix:
print("Could not read remote cache key prefix file")
sys.exit(1)
return prefix
KEY_PREFIX = get_or_create_key_prefix() # type: text_type
def bounce_key_prefix_for_testing(test_name):
# type: (text_type) -> None
global KEY_PREFIX
KEY_PREFIX = test_name + u':' + text_type(os.getpid()) + u':'
def get_cache_backend(cache_name):
# type: (Optional[str]) -> BaseCache
if cache_name is None:
return djcache
return caches[cache_name]
def cache_with_key(keyfunc, cache_name=None, timeout=None, with_statsd_key=None):
# type: (Any, Optional[str], Optional[int], Optional[str]) -> Any
# This function can't be typed perfectly because returning a generic function
# isn't supported in mypy - https://github.com/python/mypy/issues/1551.
"""Decorator which applies Django caching to a function.
Decorator argument is a function which computes a cache key
from the original function's arguments. You are responsible
for avoiding collisions with other uses of this decorator or
other uses of caching."""
def decorator(func):
# type: (Callable[..., Any]) -> (Callable[..., Any])
@wraps(func)
def func_with_caching(*args, **kwargs):
# type: (*Any, **Any) -> Callable[..., Any]
key = keyfunc(*args, **kwargs)
val = cache_get(key, cache_name=cache_name)
extra = ""
if cache_name == 'database':
extra = ".dbcache"
if with_statsd_key is not None:
metric_key = with_statsd_key
else:
metric_key = statsd_key(key)
status = "hit" if val is not None else "miss"
statsd.incr("cache%s.%s.%s" % (extra, metric_key, status))
# Values are singleton tuples so that we can distinguish
# a result of None from a missing key.
if val is not None:
return val[0]
val = func(*args, **kwargs)
cache_set(key, val, cache_name=cache_name, timeout=timeout)
return val
return func_with_caching
return decorator
def cache_set(key, val, cache_name=None, timeout=None):
# type: (text_type, Any, Optional[str], Optional[int]) -> None
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
cache_backend.set(KEY_PREFIX + key, (val,), timeout=timeout)
remote_cache_stats_finish()
def cache_get(key, cache_name=None):
# type: (text_type, Optional[str]) -> Any
remote_cache_stats_start()
cache_backend = get_cache_backend(cache_name)
ret = cache_backend.get(KEY_PREFIX + key)
remote_cache_stats_finish()
return ret
def cache_get_many(keys, cache_name=None):
# type: (List[text_type], Optional[str]) -> Dict[text_type, Any]
keys = [KEY_PREFIX + key for key in keys]
remote_cache_stats_start()
ret = get_cache_backend(cache_name).get_many(keys)
remote_cache_stats_finish()
return dict([(key[len(KEY_PREFIX):], value) for key, value in ret.items()])
def cache_set_many(items, cache_name=None, timeout=None):
# type: (Dict[text_type, Any], Optional[str], Optional[int]) -> None
new_items = {}
for key in items:
new_items[KEY_PREFIX + key] = items[key]
items = new_items
remote_cache_stats_start()
get_cache_backend(cache_name).set_many(items, timeout=timeout)
remote_cache_stats_finish()
def cache_delete(key, cache_name=None):
# type: (text_type, Optional[str]) -> None
remote_cache_stats_start()
get_cache_backend(cache_name).delete(KEY_PREFIX + key)
remote_cache_stats_finish()
def cache_delete_many(items, cache_name=None):
# type: (Iterable[text_type], Optional[str]) -> None
remote_cache_stats_start()
get_cache_backend(cache_name).delete_many(
KEY_PREFIX + item for item in items)
remote_cache_stats_finish()
# Required Arguments are as follows:
# * object_ids: The list of object ids to look up
# * cache_key_function: object_id => cache key
# * query_function: [object_ids] => [objects from database]
# Optional keyword arguments:
# * setter: Function to call before storing items to cache (e.g. compression)
# * extractor: Function to call on items returned from cache
# (e.g. decompression). Should be the inverse of the setter
# function.
# * id_fetcher: Function mapping an object from database => object_id
# (in case we're using a key more complex than obj.id)
# * cache_transformer: Function mapping an object from database =>
# value for cache (in case the values that we're caching are some
# function of the objects, not the objects themselves)
ObjKT = TypeVar('ObjKT', int, text_type)
ItemT = Any # https://github.com/python/mypy/issues/1721
CompressedItemT = Any # https://github.com/python/mypy/issues/1721
def generic_bulk_cached_fetch(cache_key_function, # type: Callable[[ObjKT], text_type]
query_function, # type: Callable[[List[ObjKT]], Iterable[Any]]
object_ids, # type: Iterable[ObjKT]
extractor=lambda obj: obj, # type: Callable[[CompressedItemT], ItemT]
setter=lambda obj: obj, # type: Callable[[ItemT], CompressedItemT]
id_fetcher=lambda obj: obj.id, # type: Callable[[Any], ObjKT]
cache_transformer=lambda obj: obj # type: Callable[[Any], ItemT]
):
# type: (...) -> Dict[ObjKT, Any]
cache_keys = {} # type: Dict[ObjKT, text_type]
for object_id in object_ids:
cache_keys[object_id] = cache_key_function(object_id)
cached_objects = cache_get_many([cache_keys[object_id]
for object_id in object_ids])
for (key, val) in cached_objects.items():
cached_objects[key] = extractor(cached_objects[key][0])
needed_ids = [object_id for object_id in object_ids if
cache_keys[object_id] not in cached_objects]
db_objects = query_function(needed_ids)
items_for_remote_cache = {} # type: Dict[text_type, Any]
for obj in db_objects:
key = cache_keys[id_fetcher(obj)]
item = cache_transformer(obj)
items_for_remote_cache[key] = (setter(item),)
cached_objects[key] = item
if len(items_for_remote_cache) > 0:
cache_set_many(items_for_remote_cache)
return dict((object_id, cached_objects[cache_keys[object_id]]) for object_id in object_ids
if cache_keys[object_id] in cached_objects)
def cache(func):
# type: (FuncT) -> FuncT
"""Decorator which applies Django caching to a function.
Uses a key based on the function's name, filename, and
the repr() of its arguments."""
func_uniqifier = '%s-%s' % (func.__code__.co_filename, func.__name__) # type: ignore # https://github.com/python/mypy/issues/1923
@wraps(func)
def keyfunc(*args, **kwargs):
# type: (*Any, **Any) -> str
# Django complains about spaces because memcached rejects them
key = func_uniqifier + repr((args, kwargs))
return key.replace('-', '--').replace(' ', '-s')
return cache_with_key(keyfunc)(func)
def display_recipient_cache_key(recipient_id):
# type: (int) -> text_type
return u"display_recipient_dict:%d" % (recipient_id,)
def user_profile_by_email_cache_key(email):
# type: (text_type) -> text_type
# See the comment in zerver/lib/avatar_hash.py:gravatar_hash for why we
# are proactively encoding email addresses even though they will
# with high likelihood be ASCII-only for the foreseeable future.
return u'user_profile_by_email:%s' % (make_safe_digest(email.strip()),)
def user_profile_by_id_cache_key(user_profile_id):
# type: (int) -> text_type
return u"user_profile_by_id:%s" % (user_profile_id,)
# TODO: Refactor these cache helpers into another file that can import
# models.py so that python3-style type annotations can also work.
def cache_save_user_profile(user_profile):
# type: (UserProfile) -> None
cache_set(user_profile_by_id_cache_key(user_profile.id), user_profile, timeout=3600*24*7)
active_user_dict_fields = ['id', 'full_name', 'short_name', 'email', 'is_realm_admin', 'is_bot'] # type: List[str]
def active_user_dicts_in_realm_cache_key(realm):
# type: (Realm) -> text_type
return u"active_user_dicts_in_realm:%s" % (realm.id,)
active_bot_dict_fields = ['id', 'full_name', 'short_name',
'email', 'default_sending_stream__name',
'default_events_register_stream__name',
'default_all_public_streams', 'api_key',
'bot_owner__email', 'avatar_source'] # type: List[str]
def active_bot_dicts_in_realm_cache_key(realm):
# type: (Realm) -> text_type
return u"active_bot_dicts_in_realm:%s" % (realm.id,)
def get_stream_cache_key(stream_name, realm):
# type: (text_type, Union[Realm, int]) -> text_type
from zerver.models import Realm
if isinstance(realm, Realm):
realm_id = realm.id
else:
realm_id = realm
return u"stream_by_realm_and_name:%s:%s" % (
realm_id, make_safe_digest(stream_name.strip().lower()))
def delete_user_profile_caches(user_profiles):
# type: (Iterable[UserProfile]) -> None
keys = []
for user_profile in user_profiles:
keys.append(user_profile_by_email_cache_key(user_profile.email))
keys.append(user_profile_by_id_cache_key(user_profile.id))
cache_delete_many(keys)
# Called by models.py to flush the user_profile cache whenever we save
# a user_profile object
def flush_user_profile(sender, **kwargs):
# type: (Any, **Any) -> None
user_profile = kwargs['instance']
delete_user_profile_caches([user_profile])
# Invalidate our active_users_in_realm info dict if any user has changed
# the fields in the dict or become (in)active
if kwargs.get('update_fields') is None or \
len(set(active_user_dict_fields + ['is_active']) & set(kwargs['update_fields'])) > 0:
cache_delete(active_user_dicts_in_realm_cache_key(user_profile.realm))
# Invalidate our active_bots_in_realm info dict if any bot has
# changed the fields in the dict or become (in)active
if user_profile.is_bot and (kwargs['update_fields'] is None or
(set(active_bot_dict_fields + ['is_active']) &
set(kwargs['update_fields']))):
cache_delete(active_bot_dicts_in_realm_cache_key(user_profile.realm))
# Invalidate realm-wide alert words cache if any user in the realm has changed
# alert words
if kwargs.get('update_fields') is None or "alert_words" in kwargs['update_fields']:
cache_delete(realm_alert_words_cache_key(user_profile.realm))
# Called by models.py to flush various caches whenever we save
# a Realm object. The main tricky thing here is that Realm info is
# generally cached indirectly through user_profile objects.
def flush_realm(sender, **kwargs):
# type: (Any, **Any) -> None
realm = kwargs['instance']
users = realm.get_active_users()
delete_user_profile_caches(users)
if realm.deactivated:
cache_delete(active_user_dicts_in_realm_cache_key(realm))
cache_delete(active_bot_dicts_in_realm_cache_key(realm))
cache_delete(realm_alert_words_cache_key(realm))
def realm_alert_words_cache_key(realm):
# type: (Realm) -> text_type
return u"realm_alert_words:%s" % (realm.domain,)
# Called by models.py to flush the stream cache whenever we save a stream
# object.
def flush_stream(sender, **kwargs):
# type: (Any, **Any) -> None
from zerver.models import UserProfile
stream = kwargs['instance']
items_for_remote_cache = {}
items_for_remote_cache[get_stream_cache_key(stream.name, stream.realm)] = (stream,)
cache_set_many(items_for_remote_cache)
if kwargs.get('update_fields') is None or 'name' in kwargs['update_fields'] and \
UserProfile.objects.filter(
Q(default_sending_stream=stream) |
Q(default_events_register_stream=stream)
).exists():
cache_delete(active_bot_dicts_in_realm_cache_key(stream.realm))
# TODO: Rename to_dict_cache_key_id and to_dict_cache_key
def to_dict_cache_key_id(message_id, apply_markdown):
# type: (int, bool) -> text_type
return u'message_dict:%d:%d' % (message_id, apply_markdown)
def to_dict_cache_key(message, apply_markdown):
# type: (Message, bool) -> text_type
return to_dict_cache_key_id(message.id, apply_markdown)
def flush_message(sender, **kwargs):
# type: (Any, **Any) -> None
message = kwargs['instance']
cache_delete(to_dict_cache_key(message, False))
cache_delete(to_dict_cache_key(message, True))
| 39.541772 | 133 | 0.681414 |
f70ea910deb64c851f94887e577c45916aab7cf2 | 12,652 | py | Python | doc/conf.py | gitter-badger/SoCo | 65977466057748ea522a6d8b7f2a649091485a07 | [
"MIT"
] | 1 | 2019-03-09T14:23:48.000Z | 2019-03-09T14:23:48.000Z | doc/conf.py | gitter-badger/SoCo | 65977466057748ea522a6d8b7f2a649091485a07 | [
"MIT"
] | null | null | null | doc/conf.py | gitter-badger/SoCo | 65977466057748ea522a6d8b7f2a649091485a07 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# soco documentation build configuration file, created by
# sphinx-quickstart on Mon Sep 14 08:03:37 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
import shlex
sys.path.insert(0, os.path.abspath('..'))
import soco
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
needs_sphinx = '1.3'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.extlinks',
'sphinx.ext.inheritance_diagram',
'sphinx.ext.intersphinx',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
'sphinx.ext.napoleon',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'SoCo'
copyright = '2015, The SoCo Team'
author = "`The SoCo Team"
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = soco.__version__
# The full version, including alpha/beta/rc tags.
release = soco.__version__
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
default_role = 'any'
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
modindex_common_prefix = ['soco.', 'soco.music_services.']
# If true, keep warnings as "system message" paragraphs in the built documents.
keep_warnings = True
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# Allow auto links into the Python and Requests docs
intersphinx_mapping = {
'python': ('https://docs.python.org/3', None),
'requests': ('http://www.python-requests.org/en/latest/', None)
}
# Shortcuts to Github Issues etc. Use them like this:
# :issue:`123` (which will generate a link to issue 123)
extlinks = {
'issue': ('https://github.com/SoCo/SoCo/issues/%s', '#'),
'PR': ('https://github.com/SoCo/SoCo/pull/%s', '#')
}
# Document members by default, and in source order. This allows the stub files
# in the api directory to be much shorter.
autodoc_default_flags = ['members']
autodoc_member_order = 'bysource'
# Concatenate the class and __init__ docstrings
autoclass_content = 'both'
# Nicer inheritance graphs for RTD theme. NB the image map does not rescale
# properly, so we have had to add some javascript to handle it. See
# _templates and _static
inheritance_node_attrs = dict(
fontsize=14, height=0.75, color='dodgerblue', style='rounded',
)
inheritance_graph_attrs = dict(
rankdir="LR", size='""',
)
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Language to be used for generating the HTML full-text search index.
# Sphinx supports the following languages:
# 'da', 'de', 'en', 'es', 'fi', 'fr', 'h', 'it', 'ja'
# 'nl', 'no', 'pt', 'ro', 'r', 'sv', 'tr'
# html_search_language = 'en'
# A dictionary with options for the search language support, empty by default.
# Now only 'ja' uses this config value
# html_search_options = {'type': 'default'}
# The name of a javascript file (relative to the configuration directory) that
# implements a search results scorer. If empty, the default will be used.
# html_search_scorer = 'scorer.js'
# Output file base name for HTML help builder.
htmlhelp_basename = 'socodoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
# 'preamble': '',
# Latex figure (float) alignment
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'soco.tex', 'soco Documentation',
'Author', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
# latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
# latex_use_parts = False
# If true, show page references after internal links.
# latex_show_pagerefs = False
# If true, show URL addresses after external links.
# latex_show_urls = False
# Documents to append as an appendix to all manuals.
# latex_appendices = []
# If false, no module index is generated.
# latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'soco', 'soco Documentation',
[author], 1)
]
# If true, show URL addresses after external links.
# man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'soco', 'soco Documentation',
author, 'soco', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
# texinfo_appendices = []
# If false, no module index is generated.
# texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
# texinfo_no_detailmenu = False
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The basename for the epub file. It defaults to the project name.
# epub_basename = project
# The HTML theme for the epub output. Since the default themes are not
# optimized for small screen space, using the same theme for HTML and epub
# output is usually not wise. This defaults to 'epub', a theme designed to
# save visual space.
# epub_theme = 'epub'
# The language of the text. It defaults to the language option
# or 'en' if the language is not set.
# epub_language = ''
# The scheme of the identifier. Typical schemes are ISBN or URL.
# epub_scheme = ''
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
# epub_identifier = ''
# A unique identification for the text.
# epub_uid = ''
# A tuple containing the cover image and cover page html template filenames.
# epub_cover = ()
# A sequence of (type, uri, title) tuples for the guide element of content.opf.
# epub_guide = ()
# HTML files that should be inserted before the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_pre_files = []
# HTML files shat should be inserted after the pages created by sphinx.
# The format is a list of tuples containing the path and title.
# epub_post_files = []
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# The depth of the table of contents in toc.ncx.
# epub_tocdepth = 3
# Allow duplicate toc entries.
# epub_tocdup = True
# Choose between 'default' and 'includehidden'.
# epub_tocscope = 'default'
# Fix unsupported image types using the Pillow.
# epub_fix_images = False
# Scale large images.
# epub_max_image_width = 0
# How to display URL addresses: 'footnote', 'no', or 'inline'.
# epub_show_urls = 'inline'
# If false, no index is generated.
# epub_use_index = True
| 31.788945 | 79 | 0.710639 |
f70eb1feadb7d4ea843f887e4935a9b29163f734 | 1,071 | py | Python | python/src/aoc/year2015/day6.py | ocirne/adventofcode | ea9b5f1b48a04284521e85c96b420ed54adf55f0 | [
"Unlicense"
] | 1 | 2021-02-16T21:30:04.000Z | 2021-02-16T21:30:04.000Z | python/src/aoc/year2015/day6.py | ocirne/adventofcode | ea9b5f1b48a04284521e85c96b420ed54adf55f0 | [
"Unlicense"
] | null | null | null | python/src/aoc/year2015/day6.py | ocirne/adventofcode | ea9b5f1b48a04284521e85c96b420ed54adf55f0 | [
"Unlicense"
] | null | null | null | from collections import defaultdict
from aoc.util import load_input
def turn(d, fun, sxy, exy):
sx, sy = map(int, sxy.split(","))
ex, ey = map(int, exy.split(","))
for x in range(sx, ex + 1):
for y in range(sy, ey + 1):
d[(x, y)] = fun(d[(x, y)])
def run(data, toggle, turn_on, turn_off):
grid = defaultdict(lambda: 0)
for line in data:
token = line.split()
if line.startswith("toggle"):
turn(grid, toggle, token[1], token[3])
elif line.startswith("turn on"):
turn(grid, turn_on, token[2], token[4])
elif line.startswith("turn off"):
turn(grid, turn_off, token[2], token[4])
else:
raise Exception
return sum(grid.values())
def part1(lines):
return run(lines, lambda v: not v, lambda _: True, lambda _: False)
def part2(lines):
return run(lines, lambda x: x + 2, lambda x: x + 1, lambda x: max(0, x - 1))
if __name__ == "__main__":
data = load_input(__file__, 2015, "6")
print(part1(data))
print(part2(data))
| 26.121951 | 80 | 0.573296 |
f70eb2f3401ca84927cd6ad2318eeb8439e46c72 | 2,814 | py | Python | core/queue/views.py | lottspot/prevention-point | e4d5eaa437c3e979e8585bdada4efd33e995e39e | [
"MIT"
] | 35 | 2019-03-12T23:59:10.000Z | 2021-04-05T15:07:38.000Z | core/queue/views.py | lottspot/prevention-point | e4d5eaa437c3e979e8585bdada4efd33e995e39e | [
"MIT"
] | 365 | 2019-03-12T23:40:39.000Z | 2022-02-10T11:07:26.000Z | core/queue/views.py | lottspot/prevention-point | e4d5eaa437c3e979e8585bdada4efd33e995e39e | [
"MIT"
] | 20 | 2019-03-12T23:36:25.000Z | 2021-12-30T00:05:42.000Z | import datetime
from rest_framework import viewsets, status
from rest_framework.response import Response
from rest_framework.permissions import IsAuthenticated
from core.permissions import DjangoModelPermissions
from core.visits.serializer import PopulatedVisitSerializer
from core.models import Visit, FrontDeskEvent, FrontDeskEventType
from core.front_desk_events.serializer import FrontDeskEventForQueueSerializer
from django.contrib.auth.models import User
class QueueViewSet(viewsets.ViewSet):
"""
API endpoint that displays the queue
uses regular ViewSet to be able to display adjacent model responses in one view,
hence the permission classes being repeated here instead of using viewsets.py prototype
"""
# DjangoModelPermissions requires a queryset to function,
# the next line is what the docs suggest as a 'sentinel queryset'
queryset= FrontDeskEvent.objects.none()
permission_classes = [DjangoModelPermissions, IsAuthenticated]
def retrieve(self, request, program_id=None):
"""
retrieve most recent front desk event for each
visit that is happening today, filtered by program
"""
# filter by visits that are happening today in a certain program
visits_queryset = (
Visit.objects.select_related("participant", "program")
.filter(
program=program_id,
created_at__date=datetime.date.today(),
)
.order_by("urgency", "-created_at")
)
todays_visit_data = PopulatedVisitSerializer(
visits_queryset, many=True, context={"request": request}
).data
active_visits_queue = []
front_desk_events = FrontDeskEvent.objects.select_related("visit").filter(
visit__in=[dict(x)["id"] for x in todays_visit_data]
).order_by("-created_at").values("id", "visit", "event_type", "created_at")
# for each visit, get the most recent front desk event, to glean current visit status
for visit in todays_visit_data:
events = list(
filter(lambda x: x.get("visit") is visit.get("id"), front_desk_events)
)
if events:
event = events[0]
event_type = event.get("event_type")
if event_type in [
FrontDeskEventType.ARRIVED.name,
FrontDeskEventType.STEPPED_OUT.name,
FrontDeskEventType.CAME_BACK.name,
]:
# if most recent front desk event is an 'active' status add it to visit object
visit["status"] = event
# then add it to the 'active visits queue'
active_visits_queue.append(visit)
return Response(active_visits_queue)
| 40.2 | 98 | 0.657783 |
f70eba7ac72db4241c482950c5d46e65d867d233 | 2,161 | py | Python | apps/erms/api.py | remocrevo/celus | 682b13168eb475d7f970502113e756e40a899877 | [
"MIT"
] | 7 | 2020-02-20T13:24:40.000Z | 2022-01-28T19:36:04.000Z | apps/erms/api.py | remocrevo/celus | 682b13168eb475d7f970502113e756e40a899877 | [
"MIT"
] | 15 | 2020-04-28T13:09:02.000Z | 2021-11-03T15:21:24.000Z | apps/erms/api.py | remocrevo/celus | 682b13168eb475d7f970502113e756e40a899877 | [
"MIT"
] | 4 | 2020-02-20T13:48:30.000Z | 2021-03-19T00:33:34.000Z | import urllib.parse
import requests
class ERMSError(Exception):
pass
class ERMS(object):
"""
Possible queries:
/object?id=eq.574
/object?id=in.(574,575)
"""
# endpoints
EP_OBJECT = 'object'
EP_IDENTITY = 'identity'
EP_CONSORTIUM = 'consortium'
EP_CONSORTIUM_MEMBER = 'consortium_member'
EP_ACQUISITION = 'acquisition'
EP_PROCUREMENT = 'procurement'
EP_OFFER = 'offer'
EP_OFFER_SPLIT = 'offer_split'
# object classes
CLS_PERSON = 'Person'
CLS_ORGANIZATION = 'Organization'
CLS_PLATFORM = 'Platform'
def __init__(self, base_url="https://erms.czechelib.cz/api/"):
self.base_url = base_url.rstrip('/')
self.session = requests.Session()
@classmethod
def _construct_query_string(cls, value):
if type(value) in (list, tuple, set):
return 'in.({})'.format(','.join(str(_id) for _id in value))
return f'eq.{value}'
def construct_object_url(self, cls=None, object_id=None):
params = {}
if cls:
params['class'] = self._construct_query_string(cls)
if object_id:
params['id'] = self._construct_query_string(object_id)
else:
params['order'] = 'id'
query = urllib.parse.urlencode(params)
return f'{self.base_url}/{self.EP_OBJECT}?{query}'
def fetch_url(self, url):
response = self.session.get(url)
if response.status_code == 200:
return response.json()
raise ERMSError(response)
def fetch_objects(self, cls=None, object_id=None):
url = self.construct_object_url(cls=cls, object_id=object_id)
data = self.fetch_url(url)
return data
def fetch_endpoint(self, endpoint, object_id=None, **kwargs):
url = f'{self.base_url}/{endpoint}'
params = {}
if object_id:
params['id'] = self._construct_query_string(object_id)
for key, value in kwargs.items():
params[key] = self._construct_query_string(value)
if params:
url += '?{}'.format(urllib.parse.urlencode(params))
return self.fetch_url(url)
| 28.064935 | 72 | 0.61777 |
f70ec8116b154a5c5324c8498dcdda97090753ab | 9,785 | py | Python | habitat/tasks/nav/object_nav_task.py | Ram81/habitat-imitation-baselines | c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505 | [
"MIT"
] | null | null | null | habitat/tasks/nav/object_nav_task.py | Ram81/habitat-imitation-baselines | c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505 | [
"MIT"
] | null | null | null | habitat/tasks/nav/object_nav_task.py | Ram81/habitat-imitation-baselines | c6e11c8ebadbf1260e1bed58a5b8dfb7faf6a505 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from typing import Any, List, Optional
import attr
from cv2 import log
import numpy as np
from gym import spaces
from habitat.config import Config
from habitat.core.dataset import SceneState
from habitat.core.logging import logger
from habitat.core.registry import registry
from habitat.core.simulator import AgentState, Sensor, SensorTypes
from habitat.core.utils import not_none_validator
from habitat.tasks.nav.nav import (
NavigationEpisode,
NavigationGoal,
NavigationTask
)
try:
from habitat.datasets.object_nav.object_nav_dataset import (
ObjectNavDatasetV1,
)
except ImportError:
pass
task_cat2mpcat40 = [
3, # ('chair', 2, 0)
5, # ('table', 4, 1)
6, # ('picture', 5, 2)
7, # ('cabinet', 6, 3)
8, # ('cushion', 7, 4)
10, # ('sofa', 9, 5),
11, # ('bed', 10, 6)
13, # ('chest_of_drawers', 12, 7),
14, # ('plant', 13, 8)
15, # ('sink', 14, 9)
18, # ('toilet', 17, 10),
19, # ('stool', 18, 11),
20, # ('towel', 19, 12)
22, # ('tv_monitor', 21, 13)
23, # ('shower', 22, 14)
25, # ('bathtub', 24, 15)
26, # ('counter', 25, 16),
27, # ('fireplace', 26, 17),
33, # ('gym_equipment', 32, 18),
34, # ('seating', 33, 19),
38, # ('clothes', 37, 20),
43, # ('foodstuff', 42, 21),
44, # ('stationery', 43, 22),
45, # ('fruit', 44, 23),
46, # ('plaything', 45, 24),
47, # ('hand_tool', 46, 25),
48, # ('game_equipment', 47, 26),
49, # ('kitchenware', 48, 27)
]
mapping_mpcat40_to_goal21 = {
3: 1,
5: 2,
6: 3,
7: 4,
8: 5,
10: 6,
11: 7,
13: 8,
14: 9,
15: 10,
18: 11,
19: 12,
20: 13,
22: 14,
23: 15,
25: 16,
26: 17,
27: 18,
33: 19,
34: 20,
38: 21,
43: 22, # ('foodstuff', 42, task_cat: 21)
44: 28, # ('stationery', 43, task_cat: 22)
45: 26, # ('fruit', 44, task_cat: 23)
46: 25, # ('plaything', 45, task_cat: 24)
47: 24, # ('hand_tool', 46, task_cat: 25)
48: 23, # ('game_equipment', 47, task_cat: 26)
49: 27, # ('kitchenware', 48, task_cat: 27)
}
@attr.s(auto_attribs=True, kw_only=True)
class AgentStateSpec:
r"""Agent data specifications that capture states of agent and sensor in replay state.
"""
position: Optional[List[float]] = attr.ib(default=None)
rotation: Optional[List[float]] = attr.ib(default=None)
sensor_data: Optional[dict] = attr.ib(default=None)
@attr.s(auto_attribs=True, kw_only=True)
class ReplayActionSpec:
r"""Replay specifications that capture metadata associated with action.
"""
action: str = attr.ib(default=None, validator=not_none_validator)
agent_state: Optional[AgentStateSpec] = attr.ib(default=None)
@attr.s(auto_attribs=True, kw_only=True)
class ObjectGoalNavEpisode(NavigationEpisode):
r"""ObjectGoal Navigation Episode
:param object_category: Category of the obect
"""
object_category: Optional[str] = None
reference_replay: Optional[List[ReplayActionSpec]] = None
scene_state: Optional[List[SceneState]] = None
is_thda: Optional[bool] = False
scene_dataset: Optional[str] = "mp3d"
@property
def goals_key(self) -> str:
r"""The key to retrieve the goals"""
return f"{os.path.basename(self.scene_id)}_{self.object_category}"
@attr.s(auto_attribs=True)
class ObjectViewLocation:
r"""ObjectViewLocation provides information about a position around an object goal
usually that is navigable and the object is visible with specific agent
configuration that episode's dataset was created.
that is target for
navigation. That can be specify object_id, position and object
category. An important part for metrics calculation are view points that
describe success area for the navigation.
Args:
agent_state: navigable AgentState with a position and a rotation where
the object is visible.
iou: an intersection of a union of the object and a rectangle in the
center of view. This metric is used to evaluate how good is the object
view form current position. Higher iou means better view, iou equals
1.0 if whole object is inside of the rectangle and no pixel inside
the rectangle belongs to anything except the object.
"""
agent_state: AgentState
iou: Optional[float]
@attr.s(auto_attribs=True, kw_only=True)
class ObjectGoal(NavigationGoal):
r"""Object goal provides information about an object that is target for
navigation. That can be specify object_id, position and object
category. An important part for metrics calculation are view points that
describe success area for the navigation.
Args:
object_id: id that can be used to retrieve object from the semantic
scene annotation
object_name: name of the object
object_category: object category name usually similar to scene semantic
categories
room_id: id of a room where object is located, can be used to retrieve
room from the semantic scene annotation
room_name: name of the room, where object is located
view_points: navigable positions around the object with specified
proximity of the object surface used for navigation metrics calculation.
The object is visible from these positions.
"""
object_id: str = attr.ib(default=None, validator=not_none_validator)
object_name: Optional[str] = None
object_name_id: Optional[int] = None
object_category: Optional[str] = None
room_id: Optional[str] = None
room_name: Optional[str] = None
view_points: Optional[List[ObjectViewLocation]] = None
@registry.register_sensor
class ObjectGoalSensor(Sensor):
r"""A sensor for Object Goal specification as observations which is used in
ObjectGoal Navigation. The goal is expected to be specified by object_id or
semantic category id.
For the agent in simulator the forward direction is along negative-z.
In polar coordinate format the angle returned is azimuth to the goal.
Args:
sim: a reference to the simulator for calculating task observations.
config: a config for the ObjectGoalSensor sensor. Can contain field
GOAL_SPEC that specifies which id use for goal specification,
GOAL_SPEC_MAX_VAL the maximum object_id possible used for
observation space definition.
dataset: a Object Goal navigation dataset that contains dictionaries
of categories id to text mapping.
"""
cls_uuid: str = "objectgoal"
def __init__(
self,
sim,
config: Config,
dataset: "ObjectNavDatasetV1",
*args: Any,
**kwargs: Any,
):
self._sim = sim
self._dataset = dataset
super().__init__(config=config)
def _get_uuid(self, *args: Any, **kwargs: Any) -> str:
return self.cls_uuid
def _get_sensor_type(self, *args: Any, **kwargs: Any):
return SensorTypes.SEMANTIC
def _get_observation_space(self, *args: Any, **kwargs: Any):
sensor_shape = (1,)
max_value = self.config.GOAL_SPEC_MAX_VAL - 1
if self.config.GOAL_SPEC == "TASK_CATEGORY_ID":
max_value = max(
self._dataset.category_to_task_category_id.values()
)
logger.info("max object cat: {}".format(max_value))
logger.info("cats: {}".format(self._dataset.category_to_task_category_id.values()))
return spaces.Box(
low=0, high=max_value, shape=sensor_shape, dtype=np.int64
)
def get_observation(
self,
observations,
*args: Any,
episode: ObjectGoalNavEpisode,
**kwargs: Any,
) -> Optional[int]:
if len(episode.goals) == 0:
logger.error(
f"No goal specified for episode {episode.episode_id}."
)
return None
if not isinstance(episode.goals[0], ObjectGoal):
logger.error(
f"First goal should be ObjectGoal, episode {episode.episode_id}."
)
return None
category_name = episode.object_category
if self.config.GOAL_SPEC == "TASK_CATEGORY_ID":
return np.array(
[self._dataset.category_to_task_category_id[category_name]],
dtype=np.int64,
)
elif self.config.GOAL_SPEC == "OBJECT_ID":
obj_goal = episode.goals[0]
assert isinstance(obj_goal, ObjectGoal) # for type checking
return np.array([obj_goal.object_name_id], dtype=np.int64)
else:
raise RuntimeError(
"Wrong GOAL_SPEC specified for ObjectGoalSensor."
)
@registry.register_task(name="ObjectNav-v1")
class ObjectNavigationTask(NavigationTask):
r"""An Object Navigation Task class for a task specific methods.
Used to explicitly state a type of the task in config.
"""
_is_episode_active: bool
_prev_action: int
def __init__(self, **kwargs) -> None:
super().__init__(**kwargs)
self._is_episode_active = False
def overwrite_sim_config(self, sim_config, episode):
super().overwrite_sim_config(sim_config, episode)
sim_config.defrost()
sim_config.scene_state = episode.scene_state
sim_config.freeze()
return sim_config
def _check_episode_is_active(self, action, *args: Any, **kwargs: Any) -> bool:
return not getattr(self, "is_stop_called", False)
| 33.62543 | 95 | 0.6465 |
f70ef0ac0372c717352edce2b5da38e908ee6060 | 31,508 | py | Python | Keras_tensorflow/source/tensorflow/core/protobuf/config_pb2.py | Con-Mi/lambda-packs | b23a8464abdd88050b83310e1d0e99c54dac28ab | [
"MIT"
] | 60 | 2017-08-05T21:47:56.000Z | 2022-03-08T21:46:29.000Z | Keras_tensorflow/source/tensorflow/core/protobuf/config_pb2.py | Con-Mi/lambda-packs | b23a8464abdd88050b83310e1d0e99c54dac28ab | [
"MIT"
] | 1 | 2017-08-22T07:17:47.000Z | 2017-09-24T22:04:19.000Z | Keras_tensorflow/source/tensorflow/core/protobuf/config_pb2.py | Con-Mi/lambda-packs | b23a8464abdd88050b83310e1d0e99c54dac28ab | [
"MIT"
] | 11 | 2017-09-10T16:22:21.000Z | 2021-08-09T09:24:50.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/protobuf/config.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import cost_graph_pb2 as tensorflow_dot_core_dot_framework_dot_cost__graph__pb2
from tensorflow.core.framework import graph_pb2 as tensorflow_dot_core_dot_framework_dot_graph__pb2
from tensorflow.core.framework import step_stats_pb2 as tensorflow_dot_core_dot_framework_dot_step__stats__pb2
from tensorflow.core.protobuf import debug_pb2 as tensorflow_dot_core_dot_protobuf_dot_debug__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/protobuf/config.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n%tensorflow/core/protobuf/config.proto\x12\ntensorflow\x1a*tensorflow/core/framework/cost_graph.proto\x1a%tensorflow/core/framework/graph.proto\x1a*tensorflow/core/framework/step_stats.proto\x1a$tensorflow/core/protobuf/debug.proto\"\xa1\x01\n\nGPUOptions\x12\'\n\x1fper_process_gpu_memory_fraction\x18\x01 \x01(\x01\x12\x16\n\x0e\x61llocator_type\x18\x02 \x01(\t\x12\x1f\n\x17\x64\x65\x66\x65rred_deletion_bytes\x18\x03 \x01(\x03\x12\x14\n\x0c\x61llow_growth\x18\x04 \x01(\x08\x12\x1b\n\x13visible_device_list\x18\x05 \x01(\t\"\xdf\x02\n\x10OptimizerOptions\x12+\n#do_common_subexpression_elimination\x18\x01 \x01(\x08\x12\x1b\n\x13\x64o_constant_folding\x18\x02 \x01(\x08\x12\x1c\n\x14\x64o_function_inlining\x18\x04 \x01(\x08\x12\x35\n\topt_level\x18\x03 \x01(\x0e\x32\".tensorflow.OptimizerOptions.Level\x12\x45\n\x10global_jit_level\x18\x05 \x01(\x0e\x32+.tensorflow.OptimizerOptions.GlobalJitLevel\" \n\x05Level\x12\x06\n\x02L1\x10\x00\x12\x0f\n\x02L0\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\"C\n\x0eGlobalJitLevel\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\x10\n\x03OFF\x10\xff\xff\xff\xff\xff\xff\xff\xff\xff\x01\x12\x08\n\x04ON_1\x10\x01\x12\x08\n\x04ON_2\x10\x02\"\xb9\x02\n\x0cGraphOptions\x12\x1e\n\x16\x65nable_recv_scheduling\x18\x02 \x01(\x08\x12\x37\n\x11optimizer_options\x18\x03 \x01(\x0b\x32\x1c.tensorflow.OptimizerOptions\x12\x18\n\x10\x62uild_cost_model\x18\x04 \x01(\x03\x12\x1e\n\x16\x62uild_cost_model_after\x18\t \x01(\x03\x12\x14\n\x0cinfer_shapes\x18\x05 \x01(\x08\x12\x1a\n\x12place_pruned_graph\x18\x06 \x01(\x08\x12 \n\x18\x65nable_bfloat16_sendrecv\x18\x07 \x01(\x08\x12\x15\n\rtimeline_step\x18\x08 \x01(\x05J\x04\x08\x01\x10\x02R%skip_common_subexpression_elimination\",\n\x15ThreadPoolOptionProto\x12\x13\n\x0bnum_threads\x18\x01 \x01(\x05\"2\n\nRPCOptions\x12$\n\x1cuse_rpc_for_inprocess_master\x18\x01 \x01(\x08\"\xd1\x04\n\x0b\x43onfigProto\x12>\n\x0c\x64\x65vice_count\x18\x01 \x03(\x0b\x32(.tensorflow.ConfigProto.DeviceCountEntry\x12$\n\x1cintra_op_parallelism_threads\x18\x02 \x01(\x05\x12$\n\x1cinter_op_parallelism_threads\x18\x05 \x01(\x05\x12\x1f\n\x17use_per_session_threads\x18\t \x01(\x08\x12G\n\x1csession_inter_op_thread_pool\x18\x0c \x03(\x0b\x32!.tensorflow.ThreadPoolOptionProto\x12\x18\n\x10placement_period\x18\x03 \x01(\x05\x12\x16\n\x0e\x64\x65vice_filters\x18\x04 \x03(\t\x12+\n\x0bgpu_options\x18\x06 \x01(\x0b\x32\x16.tensorflow.GPUOptions\x12\x1c\n\x14\x61llow_soft_placement\x18\x07 \x01(\x08\x12\x1c\n\x14log_device_placement\x18\x08 \x01(\x08\x12/\n\rgraph_options\x18\n \x01(\x0b\x32\x18.tensorflow.GraphOptions\x12\x1f\n\x17operation_timeout_in_ms\x18\x0b \x01(\x03\x12+\n\x0brpc_options\x18\r \x01(\x0b\x32\x16.tensorflow.RPCOptions\x1a\x32\n\x10\x44\x65viceCountEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x05:\x02\x38\x01\"\xa5\x02\n\nRunOptions\x12\x36\n\x0btrace_level\x18\x01 \x01(\x0e\x32!.tensorflow.RunOptions.TraceLevel\x12\x15\n\rtimeout_in_ms\x18\x02 \x01(\x03\x12\x1c\n\x14inter_op_thread_pool\x18\x03 \x01(\x05\x12\x1f\n\x17output_partition_graphs\x18\x05 \x01(\x08\x12/\n\rdebug_options\x18\x06 \x01(\x0b\x32\x18.tensorflow.DebugOptions\"R\n\nTraceLevel\x12\x0c\n\x08NO_TRACE\x10\x00\x12\x12\n\x0eSOFTWARE_TRACE\x10\x01\x12\x12\n\x0eHARDWARE_TRACE\x10\x02\x12\x0e\n\nFULL_TRACE\x10\x03J\x04\x08\x04\x10\x05\"\x96\x01\n\x0bRunMetadata\x12)\n\nstep_stats\x18\x01 \x01(\x0b\x32\x15.tensorflow.StepStats\x12,\n\ncost_graph\x18\x02 \x01(\x0b\x32\x18.tensorflow.CostGraphDef\x12.\n\x10partition_graphs\x18\x03 \x03(\x0b\x32\x14.tensorflow.GraphDefB-\n\x18org.tensorflow.frameworkB\x0c\x43onfigProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_cost__graph__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_graph__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_step__stats__pb2.DESCRIPTOR,tensorflow_dot_core_dot_protobuf_dot_debug__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_OPTIMIZEROPTIONS_LEVEL = _descriptor.EnumDescriptor(
name='Level',
full_name='tensorflow.OptimizerOptions.Level',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='L1', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='L0', index=1, number=-1,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=633,
serialized_end=665,
)
_sym_db.RegisterEnumDescriptor(_OPTIMIZEROPTIONS_LEVEL)
_OPTIMIZEROPTIONS_GLOBALJITLEVEL = _descriptor.EnumDescriptor(
name='GlobalJitLevel',
full_name='tensorflow.OptimizerOptions.GlobalJitLevel',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='DEFAULT', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='OFF', index=1, number=-1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ON_1', index=2, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ON_2', index=3, number=2,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=667,
serialized_end=734,
)
_sym_db.RegisterEnumDescriptor(_OPTIMIZEROPTIONS_GLOBALJITLEVEL)
_RUNOPTIONS_TRACELEVEL = _descriptor.EnumDescriptor(
name='TraceLevel',
full_name='tensorflow.RunOptions.TraceLevel',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='NO_TRACE', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SOFTWARE_TRACE', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='HARDWARE_TRACE', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='FULL_TRACE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=1952,
serialized_end=2034,
)
_sym_db.RegisterEnumDescriptor(_RUNOPTIONS_TRACELEVEL)
_GPUOPTIONS = _descriptor.Descriptor(
name='GPUOptions',
full_name='tensorflow.GPUOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='per_process_gpu_memory_fraction', full_name='tensorflow.GPUOptions.per_process_gpu_memory_fraction', index=0,
number=1, type=1, cpp_type=5, label=1,
has_default_value=False, default_value=float(0),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allocator_type', full_name='tensorflow.GPUOptions.allocator_type', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='deferred_deletion_bytes', full_name='tensorflow.GPUOptions.deferred_deletion_bytes', index=2,
number=3, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_growth', full_name='tensorflow.GPUOptions.allow_growth', index=3,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='visible_device_list', full_name='tensorflow.GPUOptions.visible_device_list', index=4,
number=5, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=219,
serialized_end=380,
)
_OPTIMIZEROPTIONS = _descriptor.Descriptor(
name='OptimizerOptions',
full_name='tensorflow.OptimizerOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='do_common_subexpression_elimination', full_name='tensorflow.OptimizerOptions.do_common_subexpression_elimination', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='do_constant_folding', full_name='tensorflow.OptimizerOptions.do_constant_folding', index=1,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='do_function_inlining', full_name='tensorflow.OptimizerOptions.do_function_inlining', index=2,
number=4, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='opt_level', full_name='tensorflow.OptimizerOptions.opt_level', index=3,
number=3, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='global_jit_level', full_name='tensorflow.OptimizerOptions.global_jit_level', index=4,
number=5, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_OPTIMIZEROPTIONS_LEVEL,
_OPTIMIZEROPTIONS_GLOBALJITLEVEL,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=383,
serialized_end=734,
)
_GRAPHOPTIONS = _descriptor.Descriptor(
name='GraphOptions',
full_name='tensorflow.GraphOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='enable_recv_scheduling', full_name='tensorflow.GraphOptions.enable_recv_scheduling', index=0,
number=2, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='optimizer_options', full_name='tensorflow.GraphOptions.optimizer_options', index=1,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='build_cost_model', full_name='tensorflow.GraphOptions.build_cost_model', index=2,
number=4, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='build_cost_model_after', full_name='tensorflow.GraphOptions.build_cost_model_after', index=3,
number=9, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='infer_shapes', full_name='tensorflow.GraphOptions.infer_shapes', index=4,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='place_pruned_graph', full_name='tensorflow.GraphOptions.place_pruned_graph', index=5,
number=6, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='enable_bfloat16_sendrecv', full_name='tensorflow.GraphOptions.enable_bfloat16_sendrecv', index=6,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timeline_step', full_name='tensorflow.GraphOptions.timeline_step', index=7,
number=8, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=737,
serialized_end=1050,
)
_THREADPOOLOPTIONPROTO = _descriptor.Descriptor(
name='ThreadPoolOptionProto',
full_name='tensorflow.ThreadPoolOptionProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='num_threads', full_name='tensorflow.ThreadPoolOptionProto.num_threads', index=0,
number=1, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1052,
serialized_end=1096,
)
_RPCOPTIONS = _descriptor.Descriptor(
name='RPCOptions',
full_name='tensorflow.RPCOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='use_rpc_for_inprocess_master', full_name='tensorflow.RPCOptions.use_rpc_for_inprocess_master', index=0,
number=1, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1098,
serialized_end=1148,
)
_CONFIGPROTO_DEVICECOUNTENTRY = _descriptor.Descriptor(
name='DeviceCountEntry',
full_name='tensorflow.ConfigProto.DeviceCountEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='key', full_name='tensorflow.ConfigProto.DeviceCountEntry.key', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='value', full_name='tensorflow.ConfigProto.DeviceCountEntry.value', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=_descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001')),
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1694,
serialized_end=1744,
)
_CONFIGPROTO = _descriptor.Descriptor(
name='ConfigProto',
full_name='tensorflow.ConfigProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='device_count', full_name='tensorflow.ConfigProto.device_count', index=0,
number=1, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='intra_op_parallelism_threads', full_name='tensorflow.ConfigProto.intra_op_parallelism_threads', index=1,
number=2, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inter_op_parallelism_threads', full_name='tensorflow.ConfigProto.inter_op_parallelism_threads', index=2,
number=5, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='use_per_session_threads', full_name='tensorflow.ConfigProto.use_per_session_threads', index=3,
number=9, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='session_inter_op_thread_pool', full_name='tensorflow.ConfigProto.session_inter_op_thread_pool', index=4,
number=12, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='placement_period', full_name='tensorflow.ConfigProto.placement_period', index=5,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='device_filters', full_name='tensorflow.ConfigProto.device_filters', index=6,
number=4, type=9, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='gpu_options', full_name='tensorflow.ConfigProto.gpu_options', index=7,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='allow_soft_placement', full_name='tensorflow.ConfigProto.allow_soft_placement', index=8,
number=7, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='log_device_placement', full_name='tensorflow.ConfigProto.log_device_placement', index=9,
number=8, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='graph_options', full_name='tensorflow.ConfigProto.graph_options', index=10,
number=10, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='operation_timeout_in_ms', full_name='tensorflow.ConfigProto.operation_timeout_in_ms', index=11,
number=11, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='rpc_options', full_name='tensorflow.ConfigProto.rpc_options', index=12,
number=13, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[_CONFIGPROTO_DEVICECOUNTENTRY, ],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1151,
serialized_end=1744,
)
_RUNOPTIONS = _descriptor.Descriptor(
name='RunOptions',
full_name='tensorflow.RunOptions',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='trace_level', full_name='tensorflow.RunOptions.trace_level', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='timeout_in_ms', full_name='tensorflow.RunOptions.timeout_in_ms', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='inter_op_thread_pool', full_name='tensorflow.RunOptions.inter_op_thread_pool', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='output_partition_graphs', full_name='tensorflow.RunOptions.output_partition_graphs', index=3,
number=5, type=8, cpp_type=7, label=1,
has_default_value=False, default_value=False,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='debug_options', full_name='tensorflow.RunOptions.debug_options', index=4,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_RUNOPTIONS_TRACELEVEL,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1747,
serialized_end=2040,
)
_RUNMETADATA = _descriptor.Descriptor(
name='RunMetadata',
full_name='tensorflow.RunMetadata',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='step_stats', full_name='tensorflow.RunMetadata.step_stats', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='cost_graph', full_name='tensorflow.RunMetadata.cost_graph', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='partition_graphs', full_name='tensorflow.RunMetadata.partition_graphs', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2043,
serialized_end=2193,
)
_OPTIMIZEROPTIONS.fields_by_name['opt_level'].enum_type = _OPTIMIZEROPTIONS_LEVEL
_OPTIMIZEROPTIONS.fields_by_name['global_jit_level'].enum_type = _OPTIMIZEROPTIONS_GLOBALJITLEVEL
_OPTIMIZEROPTIONS_LEVEL.containing_type = _OPTIMIZEROPTIONS
_OPTIMIZEROPTIONS_GLOBALJITLEVEL.containing_type = _OPTIMIZEROPTIONS
_GRAPHOPTIONS.fields_by_name['optimizer_options'].message_type = _OPTIMIZEROPTIONS
_CONFIGPROTO_DEVICECOUNTENTRY.containing_type = _CONFIGPROTO
_CONFIGPROTO.fields_by_name['device_count'].message_type = _CONFIGPROTO_DEVICECOUNTENTRY
_CONFIGPROTO.fields_by_name['session_inter_op_thread_pool'].message_type = _THREADPOOLOPTIONPROTO
_CONFIGPROTO.fields_by_name['gpu_options'].message_type = _GPUOPTIONS
_CONFIGPROTO.fields_by_name['graph_options'].message_type = _GRAPHOPTIONS
_CONFIGPROTO.fields_by_name['rpc_options'].message_type = _RPCOPTIONS
_RUNOPTIONS.fields_by_name['trace_level'].enum_type = _RUNOPTIONS_TRACELEVEL
_RUNOPTIONS.fields_by_name['debug_options'].message_type = tensorflow_dot_core_dot_protobuf_dot_debug__pb2._DEBUGOPTIONS
_RUNOPTIONS_TRACELEVEL.containing_type = _RUNOPTIONS
_RUNMETADATA.fields_by_name['step_stats'].message_type = tensorflow_dot_core_dot_framework_dot_step__stats__pb2._STEPSTATS
_RUNMETADATA.fields_by_name['cost_graph'].message_type = tensorflow_dot_core_dot_framework_dot_cost__graph__pb2._COSTGRAPHDEF
_RUNMETADATA.fields_by_name['partition_graphs'].message_type = tensorflow_dot_core_dot_framework_dot_graph__pb2._GRAPHDEF
DESCRIPTOR.message_types_by_name['GPUOptions'] = _GPUOPTIONS
DESCRIPTOR.message_types_by_name['OptimizerOptions'] = _OPTIMIZEROPTIONS
DESCRIPTOR.message_types_by_name['GraphOptions'] = _GRAPHOPTIONS
DESCRIPTOR.message_types_by_name['ThreadPoolOptionProto'] = _THREADPOOLOPTIONPROTO
DESCRIPTOR.message_types_by_name['RPCOptions'] = _RPCOPTIONS
DESCRIPTOR.message_types_by_name['ConfigProto'] = _CONFIGPROTO
DESCRIPTOR.message_types_by_name['RunOptions'] = _RUNOPTIONS
DESCRIPTOR.message_types_by_name['RunMetadata'] = _RUNMETADATA
GPUOptions = _reflection.GeneratedProtocolMessageType('GPUOptions', (_message.Message,), dict(
DESCRIPTOR = _GPUOPTIONS,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GPUOptions)
))
_sym_db.RegisterMessage(GPUOptions)
OptimizerOptions = _reflection.GeneratedProtocolMessageType('OptimizerOptions', (_message.Message,), dict(
DESCRIPTOR = _OPTIMIZEROPTIONS,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.OptimizerOptions)
))
_sym_db.RegisterMessage(OptimizerOptions)
GraphOptions = _reflection.GeneratedProtocolMessageType('GraphOptions', (_message.Message,), dict(
DESCRIPTOR = _GRAPHOPTIONS,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.GraphOptions)
))
_sym_db.RegisterMessage(GraphOptions)
ThreadPoolOptionProto = _reflection.GeneratedProtocolMessageType('ThreadPoolOptionProto', (_message.Message,), dict(
DESCRIPTOR = _THREADPOOLOPTIONPROTO,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.ThreadPoolOptionProto)
))
_sym_db.RegisterMessage(ThreadPoolOptionProto)
RPCOptions = _reflection.GeneratedProtocolMessageType('RPCOptions', (_message.Message,), dict(
DESCRIPTOR = _RPCOPTIONS,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.RPCOptions)
))
_sym_db.RegisterMessage(RPCOptions)
ConfigProto = _reflection.GeneratedProtocolMessageType('ConfigProto', (_message.Message,), dict(
DeviceCountEntry = _reflection.GeneratedProtocolMessageType('DeviceCountEntry', (_message.Message,), dict(
DESCRIPTOR = _CONFIGPROTO_DEVICECOUNTENTRY,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.ConfigProto.DeviceCountEntry)
))
,
DESCRIPTOR = _CONFIGPROTO,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.ConfigProto)
))
_sym_db.RegisterMessage(ConfigProto)
_sym_db.RegisterMessage(ConfigProto.DeviceCountEntry)
RunOptions = _reflection.GeneratedProtocolMessageType('RunOptions', (_message.Message,), dict(
DESCRIPTOR = _RUNOPTIONS,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.RunOptions)
))
_sym_db.RegisterMessage(RunOptions)
RunMetadata = _reflection.GeneratedProtocolMessageType('RunMetadata', (_message.Message,), dict(
DESCRIPTOR = _RUNMETADATA,
__module__ = 'tensorflow.core.protobuf.config_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.RunMetadata)
))
_sym_db.RegisterMessage(RunMetadata)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\014ConfigProtosP\001\370\001\001'))
_CONFIGPROTO_DEVICECOUNTENTRY.has_options = True
_CONFIGPROTO_DEVICECOUNTENTRY._options = _descriptor._ParseOptions(descriptor_pb2.MessageOptions(), _b('8\001'))
# @@protoc_insertion_point(module_scope)
| 43.161644 | 3,662 | 0.758601 |
f70f2240675cbbff5008d275512cd6a5bb90088b | 2,087 | py | Python | dvc/dependency/repo.py | drorata/dvc | b6bc65fcbf269b94b7c1ce2d9dff641dedb039b0 | [
"Apache-2.0"
] | null | null | null | dvc/dependency/repo.py | drorata/dvc | b6bc65fcbf269b94b7c1ce2d9dff641dedb039b0 | [
"Apache-2.0"
] | null | null | null | dvc/dependency/repo.py | drorata/dvc | b6bc65fcbf269b94b7c1ce2d9dff641dedb039b0 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
import copy
from funcy import merge
from schema import Optional
from contextlib import contextmanager
from dvc.external_repo import external_repo
from dvc.utils.compat import str
from .local import DependencyLOCAL
class DependencyREPO(DependencyLOCAL):
PARAM_REPO = "repo"
PARAM_URL = "url"
PARAM_REV = "rev"
PARAM_REV_LOCK = "rev_lock"
REPO_SCHEMA = {
Optional(PARAM_URL): str,
Optional(PARAM_REV): str,
Optional(PARAM_REV_LOCK): str,
}
def __init__(self, def_repo, stage, *args, **kwargs):
self.def_repo = def_repo
super(DependencyREPO, self).__init__(stage, *args, **kwargs)
def _parse_path(self, remote, path):
return None
@property
def is_in_repo(self):
return False
def __str__(self):
return "{} ({})".format(self.def_path, self.def_repo[self.PARAM_URL])
@contextmanager
def _make_repo(self, **overrides):
with external_repo(**merge(self.def_repo, overrides)) as repo:
yield repo
def status(self):
with self._make_repo() as repo:
current = repo.find_out_by_relpath(self.def_path).info
with self._make_repo(rev_lock=None) as repo:
updated = repo.find_out_by_relpath(self.def_path).info
if current != updated:
return {str(self): "update available"}
return {}
def save(self):
pass
def dumpd(self):
return {self.PARAM_PATH: self.def_path, self.PARAM_REPO: self.def_repo}
def download(self, to, resume=False):
with self._make_repo(
cache_dir=self.repo.cache.local.cache_dir
) as repo:
self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev()
out = repo.find_out_by_relpath(self.def_path)
repo.fetch(out.stage.path)
to.info = copy.copy(out.info)
to.checkout()
def update(self):
with self._make_repo(rev_lock=None) as repo:
self.def_repo[self.PARAM_REV_LOCK] = repo.scm.get_rev()
| 26.75641 | 79 | 0.643987 |
f70f2adaa945474149079545ea89047b44947487 | 10,636 | py | Python | carla_python_api_recorder.py | t27/carla-scenic-data-collector | 3f38fa0e23a9f0ed85726292c5703c8505330870 | [
"MIT"
] | 1 | 2022-03-30T07:30:51.000Z | 2022-03-30T07:30:51.000Z | carla_python_api_recorder.py | t27/carla-scenic-data-collector | 3f38fa0e23a9f0ed85726292c5703c8505330870 | [
"MIT"
] | 1 | 2021-03-15T03:48:28.000Z | 2021-03-15T03:48:28.000Z | carla_python_api_recorder.py | t27/carla-scenic-data-collector | 3f38fa0e23a9f0ed85726292c5703c8505330870 | [
"MIT"
] | 5 | 2021-03-14T22:19:53.000Z | 2021-11-11T15:28:05.000Z | # Recorder that records agent states as dataframes and also stores a carla recording, in synchronous mode
#!/usr/bin/env python
# Copyright (c) 2019 Computer Vision Center (CVC) at the Universitat Autonoma de
# Barcelona (UAB).
#
# This work is licensed under the terms of the MIT license.
# For a copy, see <https://opensource.org/licenses/MIT>.
import glob
import os
import sys
import pandas as pd
from tqdm import tqdm
import math
CARLA_VERSION = "0.9.11"
try:
# sys.path.append("./libs/carla-0.9.9-py3.7-linux-x86_64.egg")
if CARLA_VERSION == "0.9.9":
sys.path.append("./libs/carla-0.9.9-py3.7-linux-x86_64.egg")
elif CARLA_VERSION == "0.9.11":
sys.path.append("./libs/carla-0.9.11-py3.7-linux-x86_64.egg")
except IndexError:
pass
import carla
import argparse
import random
import time
import logging
import click
import pathlib
import spawn
current_dir = pathlib.Path(__file__).parent.absolute()
SEED = 27
random.seed(SEED)
def get_metadata(actor, frame_id):
type_id = actor.type_id
def splitCarlaVec(vect):
return vect.x, vect.y, vect.z
id = actor.id
# clsname = ClientSideBoundingBoxes.get_class_name(actor)
tf = actor.get_transform()
roll, pitch, yaw = tf.rotation.roll, tf.rotation.pitch, tf.rotation.yaw
loc = actor.get_location()
pos_x, pos_y, pos_z = splitCarlaVec(loc)
try:
bbox3d = actor.bounding_box
bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = splitCarlaVec(
bbox3d.location
)
bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = splitCarlaVec(bbox3d.extent)
except:
bbox3d_offset_x, bbox3d_offset_y, bbox3d_offset_z = None, None, None
bbox3d_extent_x, bbox3d_extent_y, bbox3d_extent_z = None, None, None
velocity_x, velocity_y, velocity_z = splitCarlaVec(actor.get_velocity())
acc_x, acc_y, acc_z = splitCarlaVec(actor.get_acceleration())
angular_vel_x, angular_vel_y, angular_vel_z = splitCarlaVec(
actor.get_angular_velocity()
)
try:
# need to do this because Carla's Actor object doesnt support getattr
traffic_light_state = actor.state.name
except:
traffic_light_state = None
return (
frame_id,
id,
type_id,
pos_x,
pos_y,
pos_z,
roll,
pitch,
yaw,
velocity_x,
velocity_y,
velocity_z,
acc_x,
acc_y,
acc_z,
angular_vel_x,
angular_vel_y,
angular_vel_z,
bbox3d_offset_x,
bbox3d_offset_y,
bbox3d_offset_z,
bbox3d_extent_x,
bbox3d_extent_y,
bbox3d_extent_z,
traffic_light_state,
)
global_collision = False
def collision_detect_callback(event):
actor_we_collide_against = event.other_actor
impulse = event.normal_impulse
intensity = math.sqrt(impulse.x ** 2 + impulse.y ** 2 + impulse.z ** 2)
if "vehicle." in actor_we_collide_against.type_id:
global global_collision
global_collision = True
def attach_collision_sensor(actor, world):
blueprint_library = world.get_blueprint_library()
collision_sensor = world.spawn_actor(
blueprint_library.find("sensor.other.collision"),
carla.Transform(),
attach_to=actor,
)
collision_sensor.listen(lambda event: collision_detect_callback(event))
return collision_sensor
def run(
client,
round_name,
recording_dir,
speed_violation_prob=60,
tl_violation_prob=70,
perc_speed_diff=-30,
num_vehicles=25,
SESSION_DURATION=60,
):
safe = True # avoid spawning vehicles whose geometry is not ideal for carla
actor_list = []
sensors = []
logging.basicConfig(format="%(levelname)s: %(message)s", level=logging.INFO)
try:
FPS = 5
DELTA_T = 1 / FPS
world = client.get_world()
blueprints = world.get_blueprint_library().filter("vehicle.*")
traffic_manager = client.get_trafficmanager()
traffic_manager.set_global_distance_to_leading_vehicle(2.0)
if CARLA_VERSION == "0.9.11":
print("Using deterministic Traffic Manager")
traffic_manager.set_random_device_seed(SEED)
settings = client.get_world().get_settings()
if not settings.synchronous_mode:
traffic_manager.set_synchronous_mode(True)
synchronous_master = True
settings.synchronous_mode = True
settings.fixed_delta_seconds = DELTA_T
client.get_world().apply_settings(settings)
else:
synchronous_master = False
recording_dir_path = pathlib.Path(recording_dir)
recording_dir_path.mkdir(exist_ok=True)
session_recording = str(recording_dir_path / f"{round_name}.csv")
carla_session_recording = str(
recording_dir_path.absolute() / f"{round_name}_carla_recording"
)
print("Recording on file: %s" % client.start_recorder(carla_session_recording))
vehicles_list, walkers_list, all_actors = spawn.spawn(
client, world, num_vehicles, 0, safe
)
world.tick()
print("spawned %d vehicles, press Ctrl+C to exit." % len(actor_list))
# fmt: off
df_columns = [
"frame_id", "id", "type_id", "pos_x", "pos_y", "pos_z", "roll", "pitch", "yaw",
"velocity_x", "velocity_y", "velocity_z", "acc_x", "acc_y", "acc_z",
"angular_vel_x", "angular_vel_y", "angular_vel_z",
"bbox3d_offset_x", "bbox3d_offset_y", "bbox3d_offset_z",
"bbox3d_extent_x", "bbox3d_extent_y", "bbox3d_extent_z", "traffic_light_color",
]
# fmt: on
# get all non vehicle agents
global global_collision
global_collision = False
actors = world.get_actors()
for actor in actors:
if "vehicle." in actor.type_id:
sensors.append(attach_collision_sensor(actor, world))
non_vehicles = [
x
for x in actors
if ("vehicle" not in x.type_id and "traffic_light" not in x.type_id)
] # signs, traffic lights etc
frame_id = 0
df_arr = []
non_vehicle_arr = [get_metadata(actor, frame_id) for actor in non_vehicles]
df_arr += non_vehicle_arr
pbar = tqdm(total=FPS * SESSION_DURATION)
max_frames = FPS * SESSION_DURATION
collision_detected_once = False
while frame_id < max_frames:
if global_collision and not collision_detected_once:
# Todo, if detected, start a countdown of N frames and break only after N iterations
print("detected collision, exiting!")
collision_detected_once = True
max_frames = frame_id + 5
# continue
actors = world.get_actors()
for actor in actors:
if "vehicle." in actor.type_id:
# print(actor.type_id)
tm_port = traffic_manager.get_port()
actor.set_autopilot(True, tm_port)
traffic_manager.ignore_lights_percentage(actor, tl_violation_prob)
traffic_manager.distance_to_leading_vehicle(actor, 3)
if random.random() * 100 < speed_violation_prob:
traffic_manager.vehicle_percentage_speed_difference(
actor, perc_speed_diff
)
vehicles_and_lights = [
x
for x in actors
if "vehicle" in x.type_id or "traffic_light" in x.type_id
]
metadata_arr = [
get_metadata(actor, frame_id) for actor in vehicles_and_lights
]
df_arr += metadata_arr
frame_id += 1
pbar.update(1)
world.tick()
df = pd.DataFrame(df_arr, columns=df_columns)
pbar.close()
print(f"Saving CSV({len(df.frame_id.unique())} frames)")
# df.to_parquet(f"session_data.parquet")
df.to_csv(session_recording, index=False)
world.tick()
# if args.recorder_time > 0:
# time.sleep(args.recorder_time)
# else:
# while True:
# world.wait_for_tick()
# # time.sleep(0.1)
finally:
if synchronous_master:
settings = world.get_settings()
settings.synchronous_mode = False
settings.fixed_delta_seconds = None
world.apply_settings(settings)
print("\ndestroying %d actors" % (len(sensors) + len(vehicles_list)))
# all_agents = sensors + vehicles_list
for s in sensors:
s.destroy()
client.apply_batch_sync([carla.command.DestroyActor(x) for x in vehicles_list])
print("Stop recording")
client.stop_recorder()
@click.command()
@click.option(
"-s",
"--scenario_type",
type=click.Choice(["tl_sl", "nominal"], case_sensitive=False),
required=True,
)
@click.option("-n", "--num_rounds", default=100)
@click.option("--test", is_flag=True)
def main(scenario_type, num_rounds, test):
# print(scenario_type, test, num_rounds)
if test:
random.seed(72)
if scenario_type.lower() == "tl_sl":
SPEED_VIOLATION_PROB = 60
TL_VIOLATION_PROB = 70
PERC_SPEED_DIFF = -30
SCENARIO_NAME = "tl_sl"
# NUM_ROUNDS = 100
elif scenario_type.lower() == "nominal":
SPEED_VIOLATION_PROB = 0
TL_VIOLATION_PROB = 0
PERC_SPEED_DIFF = 0
SCENARIO_NAME = "nominal"
# NUM_ROUNDS = 200
NUM_ROUNDS = num_rounds
print(f"Recording {SCENARIO_NAME} data")
try:
host = "127.0.0.1" # IP of the host server (default: 127.0.0.1)
port = 2000 # TCP port to listen to (default: 2000)",
client = carla.Client(host, port)
if test:
scenario_dir = f"test_{SCENARIO_NAME}_recordings"
else:
scenario_dir = f"{SCENARIO_NAME}_recordings"
round_names = []
for i in range(NUM_ROUNDS):
run(
client,
f"{scenario_type}_round_{i}",
scenario_dir,
SPEED_VIOLATION_PROB,
TL_VIOLATION_PROB,
PERC_SPEED_DIFF,
)
round_names.append(f"{scenario_type}_round_{i}")
# client.reload_world()
except KeyboardInterrupt:
pass
finally:
print("\ndone.")
if __name__ == "__main__":
main()
| 32.036145 | 105 | 0.615269 |
f70fa0e5384e444c718b501e35ff39db46d5b99a | 6,872 | py | Python | pennylane/transforms/__init__.py | XanaduAI/pennylane | 0620b8a8bb56ff55bfc2130619fa0a5a1af2b2a4 | [
"Apache-2.0"
] | 539 | 2018-11-13T08:45:42.000Z | 2020-07-27T18:17:16.000Z | pennylane/transforms/__init__.py | XanaduAI/pennylane | 0620b8a8bb56ff55bfc2130619fa0a5a1af2b2a4 | [
"Apache-2.0"
] | 588 | 2018-11-14T10:21:47.000Z | 2020-07-28T06:27:14.000Z | pennylane/transforms/__init__.py | XanaduAI/pennylane | 0620b8a8bb56ff55bfc2130619fa0a5a1af2b2a4 | [
"Apache-2.0"
] | 165 | 2018-11-13T18:58:56.000Z | 2020-07-27T17:18:17.000Z | # Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This subpackage contains QNode, quantum function, device, and tape transforms.
.. currentmodule:: pennylane
Transforms
----------
Transforms that act on QNodes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These transforms accept QNodes, and return new transformed functions
that compute the desired quantity.
.. autosummary::
:toctree: api
~transforms.classical_jacobian
~batch_params
~batch_input
~metric_tensor
~adjoint_metric_tensor
~specs
~transforms.mitigate_with_zne
~transforms.split_non_commuting
Transforms that act on quantum functions
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These transforms accept quantum functions (Python functions
containing quantum operations) that are used to construct QNodes.
.. autosummary::
:toctree: api
~adjoint
~ctrl
~transforms.cond
~defer_measurements
~apply_controlled_Q
~quantum_monte_carlo
~transforms.insert
Transforms for circuit compilation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This set of transforms accept quantum functions, and perform basic circuit compilation tasks.
.. autosummary::
:toctree: api
~compile
~transforms.cancel_inverses
~transforms.commute_controlled
~transforms.merge_rotations
~transforms.single_qubit_fusion
~transforms.unitary_to_rot
~transforms.merge_amplitude_embedding
~transforms.remove_barrier
~transforms.undo_swaps
~transforms.pattern_matching_optimization
~transforms.transpile
There are also utility functions and decompositions available that assist with
both transforms, and decompositions within the larger PennyLane codebase.
.. autosummary::
:toctree: api
~transforms.zyz_decomposition
~transforms.two_qubit_decomposition
~transforms.set_decomposition
~transforms.simplify
~transforms.pattern_matching
There are also utility functions that take a circuit and return a DAG.
.. autosummary::
:toctree: api
~transforms.commutation_dag
~transforms.CommutationDAG
~transforms.CommutationDAGNode
Transform for circuit cutting
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
The :func:`~.cut_circuit` transform accepts a QNode and returns a new function that cuts the original circuit,
allowing larger circuits to be split into smaller circuits that are compatible with devices that
have a restricted number of qubits.
.. autosummary::
:toctree: api
~cut_circuit
The :func:`~.cut_circuit_mc` transform is designed to be used for cutting circuits which contain :func:`~.sample`
measurements and is implemented using a Monte Carlo method. Similarly to the :func:`~.cut_circuit`
transform, this transform accepts a QNode and returns a new function that cuts the original circuit.
This transform can also accept an optional classical processing function to calculate an
expectation value.
.. autosummary::
:toctree: api
~cut_circuit_mc
There are also low-level functions that can be used to build up the circuit cutting functionalities:
.. autosummary::
:toctree: api
~transforms.qcut.tape_to_graph
~transforms.qcut.replace_wire_cut_nodes
~transforms.qcut.fragment_graph
~transforms.qcut.graph_to_tape
~transforms.qcut.remap_tape_wires
~transforms.qcut.expand_fragment_tape
~transforms.qcut.expand_fragment_tapes_mc
~transforms.qcut.qcut_processing_fn
~transforms.qcut.qcut_processing_fn_sample
~transforms.qcut.qcut_processing_fn_mc
~transforms.qcut.CutStrategy
~transforms.qcut.kahypar_cut
~transforms.qcut.place_wire_cuts
~transforms.qcut.find_and_place_cuts
Transforms that act on tapes
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
These transforms accept quantum tapes, and return one or
more tapes as well as a classical processing function.
.. autosummary::
:toctree: api
~transforms.measurement_grouping
~transforms.hamiltonian_expand
Decorators and utility functions
--------------------------------
The following decorators and convenience functions are provided
to help build custom QNode, quantum function, and tape transforms:
.. autosummary::
:toctree: api
~single_tape_transform
~batch_transform
~qfunc_transform
~op_transform
~transforms.make_tape
~transforms.map_batch_transform
~transforms.create_expand_fn
~transforms.create_decomp_expand_fn
~transforms.expand_invalid_trainable
~transforms.expand_multipar
~transforms.expand_trainable_multipar
~transforms.expand_nonunitary_gen
"""
# Import the decorators first to prevent circular imports when used in other transforms
from .batch_transform import batch_transform, map_batch_transform
from .qfunc_transforms import make_tape, single_tape_transform, qfunc_transform
from .op_transforms import op_transform
from .adjoint import adjoint
from .batch_params import batch_params
from .batch_input import batch_input
from .classical_jacobian import classical_jacobian
from .condition import cond, Conditional
from .compile import compile
from .control import ControlledOperation, ctrl
from .decompositions import zyz_decomposition, two_qubit_decomposition
from .defer_measurements import defer_measurements
from .hamiltonian_expand import hamiltonian_expand
from .split_non_commuting import split_non_commuting
from .measurement_grouping import measurement_grouping
from .metric_tensor import metric_tensor
from .adjoint_metric_tensor import adjoint_metric_tensor
from .insert_ops import insert
from .mitigate import mitigate_with_zne
from .optimization import (
cancel_inverses,
commute_controlled,
merge_rotations,
single_qubit_fusion,
merge_amplitude_embedding,
remove_barrier,
undo_swaps,
pattern_matching,
pattern_matching_optimization,
)
from .specs import specs
from .qmc import apply_controlled_Q, quantum_monte_carlo
from .unitary_to_rot import unitary_to_rot
from .commutation_dag import (
commutation_dag,
is_commuting,
CommutationDAG,
CommutationDAGNode,
simplify,
)
from .tape_expand import (
expand_invalid_trainable,
expand_multipar,
expand_nonunitary_gen,
expand_trainable_multipar,
create_expand_fn,
create_decomp_expand_fn,
set_decomposition,
)
from .transpile import transpile
from . import qcut
from .qcut import cut_circuit, cut_circuit_mc
| 30.008734 | 113 | 0.766589 |
f70fad449b902120499a1dec1a4d6c495074a31f | 607 | py | Python | venv/Lib/site-packages/tensorflow/python/keras/api/_v2/keras/applications/resnet/__init__.py | rexliu3/StockTradingBotCloud | 46b732b9c05f73bc0e856a3c4a16854b6d12e18e | [
"MIT"
] | null | null | null | venv/Lib/site-packages/tensorflow/python/keras/api/_v2/keras/applications/resnet/__init__.py | rexliu3/StockTradingBotCloud | 46b732b9c05f73bc0e856a3c4a16854b6d12e18e | [
"MIT"
] | null | null | null | venv/Lib/site-packages/tensorflow/python/keras/api/_v2/keras/applications/resnet/__init__.py | rexliu3/StockTradingBotCloud | 46b732b9c05f73bc0e856a3c4a16854b6d12e18e | [
"MIT"
] | 1 | 2020-06-28T11:47:47.000Z | 2020-06-28T11:47:47.000Z | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""ResNet models for Keras.
"""
from __future__ import print_function as _print_function
import sys as _sys
from tensorflow.python.keras.applications.resnet import ResNet101
from tensorflow.python.keras.applications.resnet import ResNet152
from tensorflow.python.keras.applications.resnet import ResNet50
from tensorflow.python.keras.applications.resnet import decode_predictions
from tensorflow.python.keras.applications.resnet import preprocess_input
del _print_function
| 35.705882 | 82 | 0.84514 |
f70fbff6ebbce086af0b72f88cdfefd2aaa4e033 | 2,561 | py | Python | fieldbook/client.py | CSIS-iLab/fieldbook-python | 7dc5c26eab9675b4b3421ef1c943668d0616372e | [
"0BSD"
] | null | null | null | fieldbook/client.py | CSIS-iLab/fieldbook-python | 7dc5c26eab9675b4b3421ef1c943668d0616372e | [
"0BSD"
] | null | null | null | fieldbook/client.py | CSIS-iLab/fieldbook-python | 7dc5c26eab9675b4b3421ef1c943668d0616372e | [
"0BSD"
] | 1 | 2021-04-15T17:14:19.000Z | 2021-04-15T17:14:19.000Z | # -*- coding: utf-8 -*-
import requests
from urllib.parse import urljoin
from os import getenv
import types
class Fieldbook(object):
"""
Client for Fieldbook API: https://github.com/fieldbook/api-docs
Initialize with a fieldbook_id and optionally the api key (name) and secret.
"""
BASE_URL = "https://api.fieldbook.com"
API_VERSION = "v1"
def __init__(self, book_id, key=None, secret=None):
super(Fieldbook, self).__init__()
self._key = key if key else getenv('FIELDBOOK_API_KEY', None)
self._secret = secret if secret else getenv('FIELDBOOK_API_SECRET', None)
self.book_id = book_id
self.session = requests.Session()
if self._key and self._secret:
self.set_auth(self._key, self._secret)
def set_auth(self, key, secret):
self._key = key
self._secret = secret
self.session.auth = (self._key, self._secret)
def _make_sheet_endpoints(self, endpoint_names):
def make_endpoint(name):
def sheet_endpoint(self, **kwargs):
return self._get(name, **kwargs)
return sheet_endpoint
for name in endpoint_names:
endpoint = make_endpoint(name)
endpoint.__doc__ = "Query '{}' sheet.".format(name)
setattr(self, name, types.MethodType(endpoint, self))
def _make_url(self, sheet_name=None):
return urljoin(Fieldbook.BASE_URL, "/".join((Fieldbook.API_VERSION, self.book_id, sheet_name or '')))
def _get(self, sheet_name=None, **kwargs):
if not self.session.auth and self._key and self._secret:
self.set_auth(self._key, self._secret)
url = self._make_url(sheet_name=sheet_name)
if 'row_id' in kwargs:
row_id = str(kwargs.pop('row_id'))
url = '{}/{}'.format(url, row_id)
resp = self.session.get(url, params=kwargs)
if not resp.ok:
raise resp.raise_for_status()
return resp.json()
def sheets(self, make_endpoints=False):
"""Returns a list of sheets associated with a book"""
sheets = self._get()
if make_endpoints:
self._make_sheet_endpoints(sheets)
return sheets
def list(self, sheet_name, **kwargs):
"""Query a named sheet"""
return self._get(sheet_name=sheet_name, **kwargs)
def get(self, sheet_name, row_id, **kwargs):
"""Retrieve a row from a sheet by its id"""
kwargs['row_id'] = row_id
return self._get(sheet_name=sheet_name, **kwargs)
| 36.070423 | 109 | 0.627099 |
f70fcdc4a7591387a0e661b787c802ae0ddafa4c | 3,137 | py | Python | mir/qualia/comment.py | darkfeline/qualia | 28ccb419dd82b75878c2f52227f291b249b489d7 | [
"Apache-2.0"
] | 23 | 2017-01-18T13:53:05.000Z | 2020-05-30T10:41:56.000Z | mir/qualia/comment.py | project-mir/mir.qualia | 28ccb419dd82b75878c2f52227f291b249b489d7 | [
"Apache-2.0"
] | 4 | 2016-10-16T00:19:15.000Z | 2017-10-25T13:28:05.000Z | mir/qualia/comment.py | project-mir/mir.qualia | 28ccb419dd82b75878c2f52227f291b249b489d7 | [
"Apache-2.0"
] | 5 | 2016-10-16T00:07:38.000Z | 2022-03-30T13:11:30.000Z | # Copyright (C) 2016 Allen Li
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Comment and uncomment lines.
Classes:
CommentPrefix
"""
import re
from mir.qualia.indent import common_indent
class CommentPrefix:
r"""Comments and uncomments lines, given a prefix.
>>> prefix = CommentPrefix('#')
>>> prefix.uncomment(['#export EDITOR=vi\n'])
['export EDITOR=vi\n']
>>> prefix.comment(['export EDITOR=vi\n'])
['#export EDITOR=vi\n']
>>> prefix.is_commented(['export EDITOR=vi\n'])
False
Do not modify the comment_prefix attribute on an instance.
"""
def __init__(self, comment_prefix):
self._comment_prefix = comment_prefix
self._prefix_pattern = re.compile(
fr'^(?P<indent>\s*){re.escape(comment_prefix)}')
def __repr__(self):
cls = type(self).__qualname__
return f'{cls}({self._comment_prefix!r})'
def is_commented(self, lines):
"""Return True if all lines are commented."""
pattern = self._prefix_pattern
return all(pattern.search(line) for line in lines)
def uncomment(self, lines):
r"""Uncomment a sequence of lines.
This will keep uncommenting so long as the lines are all commented.
This is so that uncommenting is an idempotent operation.
>>> prefix = CommentPrefix('#')
>>> prefix.uncomment(['##foo\n', '##bar\n'])
['foo\n', 'bar\n']
>>> prefix.uncomment(prefix.uncomment(['##foo\n', '##bar\n']))
['foo\n', 'bar\n']
In almost all cases, this is desired behavior, but if you need to
preserve levels of commenting, include a line to protect them:
>>> prefix = CommentPrefix('#')
>>> prefix.uncomment(['##foo\n', '##bar\n', '#\n'])
['#foo\n', '#bar\n', '\n']
"""
if not lines:
return []
while self.is_commented(lines):
lines = self._force_uncomment(lines)
return lines
def _force_uncomment(self, lines):
"""Unconditionally uncomment a sequence of lines once."""
return [self._prefix_pattern.sub(r'\g<indent>', line)
for line in lines]
def comment(self, lines):
"""Comment a sequence of lines."""
if not self.is_commented(lines):
return self._force_comment(lines)
return lines
def _force_comment(self, lines):
"""Unconditionally comment a sequence of lines."""
indent = common_indent(lines)
indent_len = len(indent)
prefix = self._comment_prefix
return [f'{indent}{prefix}{line[indent_len:]}' for line in lines]
| 32.340206 | 75 | 0.63277 |
f70fe8e5aa412881ec6f288fac376593ff84e297 | 74 | py | Python | tests/unit/test_version.py | HoverHell/python-gron | 21977c36b5fafde6be351b5488673e97a7cb4aeb | [
"MIT"
] | 10 | 2018-06-23T11:32:14.000Z | 2021-12-15T09:45:53.000Z | tests/unit/test_version.py | HoverHell/python-gron | 21977c36b5fafde6be351b5488673e97a7cb4aeb | [
"MIT"
] | null | null | null | tests/unit/test_version.py | HoverHell/python-gron | 21977c36b5fafde6be351b5488673e97a7cb4aeb | [
"MIT"
] | 1 | 2021-04-06T10:56:37.000Z | 2021-04-06T10:56:37.000Z | import gron
def test_version():
assert hasattr(gron, '__VERSION__')
| 12.333333 | 39 | 0.716216 |
f70ff08456cae98acf4a012e994d88495cb533a7 | 2,886 | py | Python | nova/api/openstack/compute/schemas/volumes.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 5 | 2016-04-28T16:20:38.000Z | 2021-04-25T11:19:03.000Z | nova/api/openstack/compute/schemas/volumes.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 11 | 2017-06-19T01:28:55.000Z | 2017-06-23T02:01:47.000Z | nova/api/openstack/compute/schemas/volumes.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 5 | 2020-04-08T20:24:45.000Z | 2020-10-05T19:02:13.000Z | # Copyright 2014 IBM Corporation. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from nova.api.validation import parameter_types
create = {
'type': 'object',
'properties': {
'volume': {
'type': 'object',
'properties': {
'volume_type': {'type': 'string'},
'metadata': {'type': 'object'},
'snapshot_id': {'type': 'string'},
'size': {
'type': ['integer', 'string'],
'pattern': '^[0-9]+$',
'minimum': 1
},
'availability_zone': {'type': 'string'},
'display_name': {'type': 'string'},
'display_description': {'type': 'string'},
},
'required': ['size'],
'additionalProperties': False,
},
},
'required': ['volume'],
'additionalProperties': False,
}
snapshot_create = {
'type': 'object',
'properties': {
'snapshot': {
'type': 'object',
'properties': {
'volume_id': {'type': 'string'},
'force': parameter_types.boolean,
'display_name': {'type': 'string'},
'display_description': {'type': 'string'},
},
'required': ['volume_id'],
'additionalProperties': False,
},
},
'required': ['snapshot'],
'additionalProperties': False,
}
create_volume_attachment = {
'type': 'object',
'properties': {
'volumeAttachment': {
'type': 'object',
'properties': {
'volumeId': parameter_types.volume_id,
'device': {
'type': ['string', 'null'],
# NOTE: The validation pattern from match_device() in
# nova/block_device.py.
'pattern': '(^/dev/x{0,1}[a-z]{0,1}d{0,1})([a-z]+)[0-9]*$'
}
},
'required': ['volumeId'],
'additionalProperties': False,
},
},
'required': ['volumeAttachment'],
'additionalProperties': False,
}
update_volume_attachment = copy.deepcopy(create_volume_attachment)
del update_volume_attachment['properties']['volumeAttachment'][
'properties']['device']
| 32.066667 | 78 | 0.517672 |
f7101e05d6aa3f6fae0e0f1f853fa0dab34e1ab0 | 8,060 | py | Python | sdk/storage/azure-storage-blob/azure/storage/blob/_deserialize.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 2,728 | 2015-01-09T10:19:32.000Z | 2022-03-31T14:50:33.000Z | sdk/storage/azure-storage-blob/azure/storage/blob/_deserialize.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 17,773 | 2015-01-05T15:57:17.000Z | 2022-03-31T23:50:25.000Z | sdk/storage/azure-storage-blob/azure/storage/blob/_deserialize.py | v-xuto/azure-sdk-for-python | 9c6296d22094c5ede410bc83749e8df8694ccacc | [
"MIT"
] | 1,916 | 2015-01-19T05:05:41.000Z | 2022-03-31T19:36:44.000Z | # -------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
# --------------------------------------------------------------------------
# pylint: disable=no-self-use
from typing import ( # pylint: disable=unused-import
Tuple, Dict, List,
TYPE_CHECKING
)
try:
from urllib.parse import unquote
except ImportError:
from urllib import unquote
from ._models import BlobType, CopyProperties, ContentSettings, LeaseProperties, BlobProperties, ImmutabilityPolicy
from ._shared.models import get_enum_value
from ._shared.response_handlers import deserialize_metadata
from ._models import ContainerProperties, BlobAnalyticsLogging, Metrics, CorsRule, RetentionPolicy, \
StaticWebsite, ObjectReplicationPolicy, ObjectReplicationRule
if TYPE_CHECKING:
from ._generated.models import PageList
def deserialize_pipeline_response_into_cls(cls_method, response, obj, headers):
try:
deserialized_response = response.http_response
except AttributeError:
deserialized_response = response
return cls_method(deserialized_response, obj, headers)
def deserialize_blob_properties(response, obj, headers):
blob_properties = BlobProperties(
metadata=deserialize_metadata(response, obj, headers),
object_replication_source_properties=deserialize_ors_policies(response.http_response.headers),
**headers
)
if 'Content-Range' in headers:
if 'x-ms-blob-content-md5' in headers:
blob_properties.content_settings.content_md5 = headers['x-ms-blob-content-md5']
else:
blob_properties.content_settings.content_md5 = None
return blob_properties
def deserialize_ors_policies(policy_dictionary):
if policy_dictionary is None:
return None
# For source blobs (blobs that have policy ids and rule ids applied to them),
# the header will be formatted as "x-ms-or-<policy_id>_<rule_id>: {Complete, Failed}".
# The value of this header is the status of the replication.
or_policy_status_headers = {key: val for key, val in policy_dictionary.items()
if 'or-' in key and key != 'x-ms-or-policy-id'}
parsed_result = {}
for key, val in or_policy_status_headers.items():
# list blobs gives or-policy_rule and get blob properties gives x-ms-or-policy_rule
policy_and_rule_ids = key.split('or-')[1].split('_')
policy_id = policy_and_rule_ids[0]
rule_id = policy_and_rule_ids[1]
# If we are seeing this policy for the first time, create a new list to store rule_id -> result
parsed_result[policy_id] = parsed_result.get(policy_id) or list()
parsed_result[policy_id].append(ObjectReplicationRule(rule_id=rule_id, status=val))
result_list = [ObjectReplicationPolicy(policy_id=k, rules=v) for k, v in parsed_result.items()]
return result_list
def deserialize_blob_stream(response, obj, headers):
blob_properties = deserialize_blob_properties(response, obj, headers)
obj.properties = blob_properties
return response.http_response.location_mode, obj
def deserialize_container_properties(response, obj, headers):
metadata = deserialize_metadata(response, obj, headers)
container_properties = ContainerProperties(
metadata=metadata,
**headers
)
return container_properties
def get_page_ranges_result(ranges):
# type: (PageList) -> Tuple[List[Dict[str, int]], List[Dict[str, int]]]
page_range = [] # type: ignore
clear_range = [] # type: List
if ranges.page_range:
page_range = [{'start': b.start, 'end': b.end} for b in ranges.page_range] # type: ignore
if ranges.clear_range:
clear_range = [{'start': b.start, 'end': b.end} for b in ranges.clear_range]
return page_range, clear_range # type: ignore
def service_stats_deserialize(generated):
"""Deserialize a ServiceStats objects into a dict.
"""
return {
'geo_replication': {
'status': generated.geo_replication.status,
'last_sync_time': generated.geo_replication.last_sync_time,
}
}
def service_properties_deserialize(generated):
"""Deserialize a ServiceProperties objects into a dict.
"""
return {
'analytics_logging': BlobAnalyticsLogging._from_generated(generated.logging), # pylint: disable=protected-access
'hour_metrics': Metrics._from_generated(generated.hour_metrics), # pylint: disable=protected-access
'minute_metrics': Metrics._from_generated(generated.minute_metrics), # pylint: disable=protected-access
'cors': [CorsRule._from_generated(cors) for cors in generated.cors], # pylint: disable=protected-access
'target_version': generated.default_service_version, # pylint: disable=protected-access
'delete_retention_policy': RetentionPolicy._from_generated(generated.delete_retention_policy), # pylint: disable=protected-access
'static_website': StaticWebsite._from_generated(generated.static_website), # pylint: disable=protected-access
}
def get_blob_properties_from_generated_code(generated):
blob = BlobProperties()
if generated.name.encoded:
blob.name = unquote(generated.name.content)
else:
blob.name = generated.name.content
blob_type = get_enum_value(generated.properties.blob_type)
blob.blob_type = BlobType(blob_type) if blob_type else None
blob.etag = generated.properties.etag
blob.deleted = generated.deleted
blob.snapshot = generated.snapshot
blob.is_append_blob_sealed = generated.properties.is_sealed
blob.metadata = generated.metadata.additional_properties if generated.metadata else {}
blob.encrypted_metadata = generated.metadata.encrypted if generated.metadata else None
blob.lease = LeaseProperties._from_generated(generated) # pylint: disable=protected-access
blob.copy = CopyProperties._from_generated(generated) # pylint: disable=protected-access
blob.last_modified = generated.properties.last_modified
blob.creation_time = generated.properties.creation_time
blob.content_settings = ContentSettings._from_generated(generated) # pylint: disable=protected-access
blob.size = generated.properties.content_length
blob.page_blob_sequence_number = generated.properties.blob_sequence_number
blob.server_encrypted = generated.properties.server_encrypted
blob.encryption_scope = generated.properties.encryption_scope
blob.deleted_time = generated.properties.deleted_time
blob.remaining_retention_days = generated.properties.remaining_retention_days
blob.blob_tier = generated.properties.access_tier
blob.rehydrate_priority = generated.properties.rehydrate_priority
blob.blob_tier_inferred = generated.properties.access_tier_inferred
blob.archive_status = generated.properties.archive_status
blob.blob_tier_change_time = generated.properties.access_tier_change_time
blob.version_id = generated.version_id
blob.is_current_version = generated.is_current_version
blob.tag_count = generated.properties.tag_count
blob.tags = parse_tags(generated.blob_tags) # pylint: disable=protected-access
blob.object_replication_source_properties = deserialize_ors_policies(generated.object_replication_metadata)
blob.last_accessed_on = generated.properties.last_accessed_on
blob.immutability_policy = ImmutabilityPolicy._from_generated(generated) # pylint: disable=protected-access
blob.has_legal_hold = generated.properties.legal_hold
blob.has_versions_only = generated.has_versions_only
return blob
def parse_tags(generated_tags):
# type: (Optional[List[BlobTag]]) -> Union[Dict[str, str], None]
"""Deserialize a list of BlobTag objects into a dict.
"""
if generated_tags:
tag_dict = {t.key: t.value for t in generated_tags.blob_tag_set}
return tag_dict
return None
| 46.057143 | 138 | 0.735732 |
f710381da3f755d00f1686fe84e2e0bb0f62b4dc | 1,215 | py | Python | pyIsoDep/tests/read_csv.py | MattKrecicki/PYTHON-ISOTOPIC-DEPLETION-PACKAGE | ccad214de8721aa9b499ef70cd39966f18bceb76 | [
"MIT"
] | 1 | 2022-01-04T22:21:18.000Z | 2022-01-04T22:21:18.000Z | pyIsoDep/tests/read_csv.py | DanKotlyar/PYTHON-ISOTOPIC-DEPLETION-PACKAGE | d9da8be6eff4ba301f9689ce5c38a5e50856d033 | [
"MIT"
] | null | null | null | pyIsoDep/tests/read_csv.py | DanKotlyar/PYTHON-ISOTOPIC-DEPLETION-PACKAGE | d9da8be6eff4ba301f9689ce5c38a5e50856d033 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""read_csv
Read the different csv files
Created on Mon Oct 11 21:30:00 2021 @author: Dan Kotlyar
Last updated on Mon Oct 11 21:45:00 2021 @author: Dan Kotlyar
"""
import numpy as np
import pandas as pd
def ReadCsv(csvFile):
data = pd.read_csv('bootstrap.csv')
ID = np.array(data['ZAID'], dtype=int)
xsTypes = np.array(data['MT'], dtype=int)
xsVals = np.array(data["XS [barns]"], dtype=float)
N0 = np.array(data["N0 [atoms/b-cm]"], dtype=float)
fullID = np.unique(ID) # unique isotopes
nIsotopes = len(fullID)
# 1-ID, 2-ND, 3-cap, 4-fiss, 5-(n,alpha)
xsTable = np.zeros((nIsotopes, 5))
xsTable[:, 0] = fullID
# obtain all the cross section types
numMTs = np.array([102, 18, 107])
for idx, numMT in enumerate(numMTs):
vals, idxFull, idx0 =\
np.intersect1d(fullID, ID[xsTypes == numMT], assume_unique=False,
return_indices=True)
if idx == 0:
xsTable[idxFull, 1] = N0[xsTypes == numMT][idx0]
xsTable[idxFull, idx+2] = xsVals[xsTypes == numMT][idx0]
idxFields = {"ID": 0, "N0": 1, "sig_c": 2, "sig_alpha": 3, "sig_f": 4}
return xsTable, idxFields
| 28.255814 | 77 | 0.604115 |
f710383da7cf5e7b2bacdc981bb14cc2aeedc558 | 6,434 | py | Python | components/start_page.py | SrGambiarra/KivyStudioDesigner | 7f617b60aef3d5e99865cb559b9b5ee93a1988f5 | [
"MIT"
] | 3 | 2022-03-05T21:54:34.000Z | 2022-03-15T12:55:45.000Z | components/start_page.py | SrGambiarra/KivyStudioDesigner | 7f617b60aef3d5e99865cb559b9b5ee93a1988f5 | [
"MIT"
] | 2 | 2022-03-13T04:15:47.000Z | 2022-03-30T11:51:41.000Z | components/start_page.py | SrGambiarra/KivyStudioDesigner | 7f617b60aef3d5e99865cb559b9b5ee93a1988f5 | [
"MIT"
] | null | null | null | __all__ = [
'DesignerLinkLabel', 'RecentItem',
'RecentFilesBox' 'DesignerStartPage']
from utils.utils import get_designer, get_fs_encoding
from kivy.properties import ObjectProperty, StringProperty
from kivy.uix.scrollview import ScrollView
from kivy.uix.boxlayout import BoxLayout
from kivy.lang.builder import Builder
from kivy.uix.button import Button
import webbrowser
Builder.load_string("""
#: import theme_atlas utils.utils.theme_atlas
<DesignerButtonFit@DesignerButton>
size_hint_x: None
width: (self.texture_size[0]+sp(32))
<DesignerStartPage>:
btn_open: btn_open
btn_new: btn_new
recent_files_box: recent_files_box
orientation: 'vertical'
padding: (0, 0, 0, dp(20))
Label:
text: 'Kivy Designer'
font_size: '26pt'
size_hint_y: None
height: '40pt'
Label:
markup: True
text: '[i]Innovative User Interfaces, Desktop, and Mobile Development Made Easy.[/i]'
font_size: pt(12)
halign: 'center'
size_hint_y: None
height: '15pt'
GridLayout:
cols: 2
size_hint: None, None
height: self.minimum_height
width: self.minimum_width
pos_hint: {'center_x': 0.5}
padding: (0, pt(15), 0, 0)
spacing: '4sp'
DesignerButtonFit:
id: btn_open
text: 'Open Project'
on_release: root.dispatch('on_open_down')
DesignerButtonFit:
id: btn_new
text: 'New Project'
on_release: root.dispatch('on_new_down')
Label:
text: 'Getting Started'
font_size: '16pt'
bold: True
size_hint_y: None
height: '30pt'
GridLayout:
kivy_label: kivy_label
cols: 2
size_hint: None, None
height: self.minimum_height
width: '450dp'
pos_hint: {'center_x': 0.5}
row_force_default: True
row_default_height: '40sp'
spacing: '4sp'
padding: '16sp', '0sp'
DesignerLinkLabel:
id: kivy_label
text: ' Kivy'
link: 'http://kivy.org'
DesignerLinkLabel:
text: ' Kivy Designer Help'
on_release: root.dispatch('on_help')
DesignerLinkLabel:
id: kivy_label
text: ' Kivy Documentation'
link: 'http://kivy.org/docs'
DesignerLinkLabel:
text: ' Kivy Designer Documentation'
link: 'http://kivy-designer.readthedocs.org/'
Label:
text: 'Recent Projects'
font_size: '16pt'
bold: True
size_hint_y: None
height: '30pt'
RecentFilesBox:
id: recent_files_box
pos_hint: {'center_x': 0.5}
size_hint_x: None
width: '600dp'
canvas.before:
Color:
rgba: (1, 1, 1, 0.05)
Rectangle:
pos: self.pos
size: self.size
<DesignerLinkLabel>:
color: (0, 0, 1, 1)
background_normal: theme_atlas('action_item')
background_disabled_normal: theme_atlas('action_item_disabled')
text_size: self.width, None
<RecentFilesBox>:
grid: grid
cols: 1
padding: '2sp'
size_hint_x: None
bar_width: '10dp'
scroll_type: ['bars', 'content']
GridLayout:
id: grid
cols: 1
size_hint_y: None
height: '1dp'
<RecentItem>:
orientation: 'vertical'
size_hint: 1, None
height: '40dp'
on_touch_down: if self.collide_point(*args[1].pos): root.dispatch('on_press')
canvas.after:
Color:
rgb: (0.2, 0.2, 0.2)
Rectangle:
pos: ((self.x+dp(25)), self.y)
size: ((self.width-dp(50)), dp(1))
Label:
text: root.path
text_size: self.size
valign: 'middle'
shorten: True
padding_x: '20dp'
""")
class DesignerLinkLabel(Button):
'''DesignerLinkLabel displays a http link and opens it in a browser window
when clicked.
'''
link = StringProperty(None)
'''Contains the http link to be opened.
:data:`link` is a :class:`~kivy.properties.StringProperty`
'''
def on_release(self, *args):
'''Default event handler for 'on_release' event.
'''
if self.link:
webbrowser.open(self.link)
class RecentItem(BoxLayout):
path = StringProperty('')
'''Contains the application path
:data:`path` is a :class:`~kivy.properties.StringProperty`
'''
__events__ = ('on_press', )
def on_press(self, *args):
'''Item pressed
'''
class RecentFilesBox(ScrollView):
'''Container consistings of buttons, with their names specifying
the recent files.
'''
grid = ObjectProperty(None)
'''The grid layout consisting of all buttons.
This property is an instance of :class:`~kivy.uix.gridlayout`
:data:`grid` is a :class:`~kivy.properties.ObjectProperty`
'''
def __init__(self, **kwargs):
super(RecentFilesBox, self).__init__(**kwargs)
def add_recent(self, list_files):
'''To add buttons representing Recent Files.
:param list_files: array of paths
'''
for p in list_files:
if isinstance(p, bytes):
p = p.decode(get_fs_encoding())
recent_item = RecentItem(path=p)
self.grid.add_widget(recent_item)
recent_item.bind(on_press=self.btn_release)
self.grid.height += recent_item.height
self.grid.height = max(self.grid.height, self.height)
def btn_release(self, instance):
'''Event Handler for 'on_release' of an event.
'''
d = get_designer()
d.ids.toll_bar_top._perform_open(instance.path)
class DesignerStartPage(BoxLayout):
recent_files_box = ObjectProperty(None)
'''This property is an instance
of :class:`~designer.components.start_page.RecentFilesBox`
:data:`recent_files_box` is a :class:`~kivy.properties.ObjectProperty`
'''
__events__ = ('on_open_down', 'on_new_down', 'on_help')
def on_open_down(self, *args):
'''Default Event Handler for 'on_open_down'
'''
pass
def on_new_down(self, *args):
'''Default Event Handler for 'on_new_down'
'''
pass
def on_help(self, *args):
'''Default Event Handler for 'on_help'
'''
pass
| 27.495726 | 93 | 0.594809 |
f7104f79652ee5e3c7047f0cf3b972ab698cbea7 | 6,962 | py | Python | nova/console/websocketproxy.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | null | null | null | nova/console/websocketproxy.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | null | null | null | nova/console/websocketproxy.py | ebalduf/nova-backports | 6bf97ec73467de522d34ab7a17ca0e0874baa7f9 | [
"Apache-2.0"
] | 1 | 2020-07-24T00:41:18.000Z | 2020-07-24T00:41:18.000Z | # Copyright (c) 2012 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
Websocket proxy that is compatible with OpenStack Nova.
Leverages websockify.py by Joel Martin
'''
import socket
import sys
from oslo_log import log as logging
from six.moves import http_cookies as Cookie
import six.moves.urllib.parse as urlparse
import websockify
import nova.conf
from nova.consoleauth import rpcapi as consoleauth_rpcapi
from nova import context
from nova import exception
from nova.i18n import _
LOG = logging.getLogger(__name__)
CONF = nova.conf.CONF
class NovaProxyRequestHandlerBase(object):
def address_string(self):
# NOTE(rpodolyaka): override the superclass implementation here and
# explicitly disable the reverse DNS lookup, which might fail on some
# deployments due to DNS configuration and break VNC access completely
return str(self.client_address[0])
def verify_origin_proto(self, connection_info, origin_proto):
access_url = connection_info.get('access_url')
if not access_url:
detail = _("No access_url in connection_info. "
"Cannot validate protocol")
raise exception.ValidationError(detail=detail)
expected_protos = [urlparse.urlparse(access_url).scheme]
# NOTE: For serial consoles the expected protocol could be ws or
# wss which correspond to http and https respectively in terms of
# security.
if 'ws' in expected_protos:
expected_protos.append('http')
if 'wss' in expected_protos:
expected_protos.append('https')
return origin_proto in expected_protos
def new_websocket_client(self):
"""Called after a new WebSocket connection has been established."""
# Reopen the eventlet hub to make sure we don't share an epoll
# fd with parent and/or siblings, which would be bad
from eventlet import hubs
hubs.use_hub()
# The nova expected behavior is to have token
# passed to the method GET of the request
parse = urlparse.urlparse(self.path)
if parse.scheme not in ('http', 'https'):
# From a bug in urlparse in Python < 2.7.4 we cannot support
# special schemes (cf: http://bugs.python.org/issue9374)
if sys.version_info < (2, 7, 4):
raise exception.NovaException(
_("We do not support scheme '%s' under Python < 2.7.4, "
"please use http or https") % parse.scheme)
query = parse.query
token = urlparse.parse_qs(query).get("token", [""]).pop()
if not token:
# NoVNC uses it's own convention that forward token
# from the request to a cookie header, we should check
# also for this behavior
hcookie = self.headers.getheader('cookie')
if hcookie:
cookie = Cookie.SimpleCookie()
cookie.load(hcookie)
if 'token' in cookie:
token = cookie['token'].value
ctxt = context.get_admin_context()
rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
connect_info = rpcapi.check_token(ctxt, token=token)
if not connect_info:
raise exception.InvalidToken(token=token)
# Verify Origin
expected_origin_hostname = self.headers.getheader('Host')
if ':' in expected_origin_hostname:
e = expected_origin_hostname
if '[' in e and ']' in e:
expected_origin_hostname = e.split(']')[0][1:]
else:
expected_origin_hostname = e.split(':')[0]
expected_origin_hostnames = CONF.console_allowed_origins
expected_origin_hostnames.append(expected_origin_hostname)
origin_url = self.headers.getheader('Origin')
# missing origin header indicates non-browser client which is OK
if origin_url is not None:
origin = urlparse.urlparse(origin_url)
origin_hostname = origin.hostname
origin_scheme = origin.scheme
if origin_hostname == '' or origin_scheme == '':
detail = _("Origin header not valid.")
raise exception.ValidationError(detail=detail)
if origin_hostname not in expected_origin_hostnames:
detail = _("Origin header does not match this host.")
raise exception.ValidationError(detail=detail)
if not self.verify_origin_proto(connect_info, origin_scheme):
detail = _("Origin header protocol does not match this host.")
raise exception.ValidationError(detail=detail)
self.msg(_('connect info: %s'), str(connect_info))
host = connect_info['host']
port = int(connect_info['port'])
# Connect to the target
self.msg(_("connecting to: %(host)s:%(port)s") % {'host': host,
'port': port})
tsock = self.socket(host, port, connect=True)
# Handshake as necessary
if connect_info.get('internal_access_path'):
tsock.send("CONNECT %s HTTP/1.1\r\n\r\n" %
connect_info['internal_access_path'])
while True:
data = tsock.recv(4096, socket.MSG_PEEK)
if data.find("\r\n\r\n") != -1:
if data.split("\r\n")[0].find("200") == -1:
raise exception.InvalidConnectionInfo()
tsock.recv(len(data))
break
# Start proxying
try:
self.do_proxy(tsock)
except Exception:
if tsock:
tsock.shutdown(socket.SHUT_RDWR)
tsock.close()
self.vmsg(_("%(host)s:%(port)s: Target closed") %
{'host': host, 'port': port})
raise
class NovaProxyRequestHandler(NovaProxyRequestHandlerBase,
websockify.ProxyRequestHandler):
def __init__(self, *args, **kwargs):
websockify.ProxyRequestHandler.__init__(self, *args, **kwargs)
def socket(self, *args, **kwargs):
return websockify.WebSocketServer.socket(*args, **kwargs)
class NovaWebSocketProxy(websockify.WebSocketProxy):
@staticmethod
def get_logger():
return LOG
| 40.011494 | 78 | 0.620511 |
f710765d0c0048687b632aaad0876e54da59b574 | 2,249 | py | Python | lib/models/resnet_trans_head.py | hz-ants/CDPN-source- | 625f9a80858f8a2fb9e74f88ea83073495141693 | [
"Apache-2.0"
] | 31 | 2020-12-21T09:36:30.000Z | 2022-03-04T03:27:48.000Z | lib/models/resnet_trans_head.py | hz-ants/CDPN-source- | 625f9a80858f8a2fb9e74f88ea83073495141693 | [
"Apache-2.0"
] | 3 | 2021-03-29T10:54:41.000Z | 2021-04-28T08:33:48.000Z | lib/models/resnet_trans_head.py | hz-ants/CDPN-source- | 625f9a80858f8a2fb9e74f88ea83073495141693 | [
"Apache-2.0"
] | 13 | 2020-12-21T09:42:05.000Z | 2022-03-25T06:04:24.000Z | import torch.nn as nn
import torch
class TransHeadNet(nn.Module):
def __init__(self, in_channels, num_layers=3, num_filters=256, kernel_size=3, output_dim=3, freeze=False,
with_bias_end=True):
super(TransHeadNet, self).__init__()
self.freeze = freeze
if kernel_size == 3:
padding = 1
elif kernel_size == 2:
padding = 0
self.features = nn.ModuleList()
for i in range(num_layers):
_in_channels = in_channels if i == 0 else num_filters
self.features.append(nn.Conv2d(_in_channels, num_filters, kernel_size=kernel_size, stride=1, padding=padding, bias=False))
self.features.append(nn.BatchNorm2d(num_filters))
self.features.append(nn.ReLU(inplace=True))
self.linears = nn.ModuleList()
self.linears.append(nn.Linear(256 * 8 * 8, 4096))
self.linears.append(nn.ReLU(inplace=True))
self.linears.append(nn.Linear(4096, 4096))
self.linears.append(nn.ReLU(inplace=True))
self.linears.append(nn.Linear(4096, output_dim))
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.normal_(m.weight, mean=0, std=0.001)
if with_bias_end and (m.bias is not None):
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
nn.init.normal_(m.weight, mean=0, std=0.001)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, mean=0, std=0.001)
def forward(self, x):
if self.freeze:
with torch.no_grad():
for i, l in enumerate(self.features):
x = l(x)
x = x.view(-1, 256*8*8)
for i, l in enumerate(self.linears):
x = l(x)
return x.detach()
else:
for i, l in enumerate(self.features):
x = l(x)
x = x.view(-1, 256*8*8)
for i, l in enumerate(self.linears):
x = l(x)
return x
| 37.483333 | 134 | 0.549133 |
f710769b0ae8210f3a325400b468f41629c87e45 | 4,382 | py | Python | pipelines/cont_pipeline.py | SurvivorT/SRTP | 1ddc0c4ec31d61daf9f4292c533722e61818eb51 | [
"MIT"
] | 489 | 2017-02-21T21:40:22.000Z | 2022-03-31T08:01:30.000Z | pipelines/cont_pipeline.py | AliBeikmohammadi/MADRL | 3156eb6d6a1e8a4c91ff1dce9f5fc565b2c25c94 | [
"MIT"
] | 35 | 2017-03-10T12:28:11.000Z | 2022-02-14T14:58:21.000Z | pipelines/cont_pipeline.py | AliBeikmohammadi/MADRL | 3156eb6d6a1e8a4c91ff1dce9f5fc565b2c25c94 | [
"MIT"
] | 121 | 2017-02-24T20:13:53.000Z | 2022-03-08T08:56:32.000Z | #!/usr/bin/env python
#
# File: cont_pipeline.py
#
# Created: Friday, July 15 2016 by rejuvyesh <mail@rejuvyesh.com>
#
import argparse
import os
import yaml
import shutil
import rltools
from pipelines import pipeline
# Fix python 2.x
try:
input = raw_input
except NameError:
pass
def phase_train(spec, spec_file):
rltools.util.header('=== Running {} ==='.format(spec_file))
# Make checkpoint dir. All outputs go here
storagedir = spec['options']['storagedir']
n_workers = spec['options']['n_workers']
checkptdir = os.path.join(spec['options']['storagedir'], spec['options']['checkpt_subdir'])
rltools.util.mkdir_p(checkptdir)
assert not os.listdir(checkptdir), 'Checkpoint directory {} is not empty!'.format(checkptdir)
cmd_templates, output_filenames, argdicts = [], [], []
for alg in spec['training']['algorithms']:
for bline in spec['training']['baselines']:
for n_ev in spec['n_evaders']:
for n_pu in spec['n_pursuers']:
for n_se in spec['n_sensors']:
for n_co in spec['n_coop']:
# Number of cooperating agents can't be greater than pursuers
if n_co > n_pu:
continue
for f_rew in spec['food_reward']:
for p_rew in spec['poison_reward']:
for e_rew in spec['encounter_reward']:
for disc in spec['discounts']:
for gae in spec['gae_lambdas']:
for run in range(spec['training']['runs']):
strid = 'alg={},bline={},n_ev={},n_pu={},n_se={},n_co={},f_rew={},p_rew={},e_rew={},disc={},gae={},run={}'.format(
alg['name'], bline, n_ev, n_pu, n_se, n_co,
f_rew, p_rew, e_rew, disc, gae, run)
cmd_templates.append(alg['cmd'].replace(
'\n', ' ').strip())
output_filenames.append(strid + '.txt')
argdicts.append({
'baseline_type': bline,
'n_evaders': n_ev,
'n_pursuers': n_pu,
'n_sensors': n_se,
'n_coop': n_co,
'discount': disc,
'food_reward': f_rew,
'poison_reward': p_rew,
'encounter_reward': e_rew,
'gae_lambda': gae,
'log': os.path.join(checkptdir,
strid + '.h5')
})
rltools.util.ok('{} jobs to run...'.format(len(cmd_templates)))
rltools.util.warn('Continue? y/n')
if input() == 'y':
pipeline.run_jobs(cmd_templates, output_filenames, argdicts, storagedir,
n_workers=n_workers)
else:
rltools.util.failure('Canceled.')
sys.exit(1)
# Copy the pipeline yaml file to the output dir too
shutil.copyfile(spec_file, os.path.join(checkptdir, 'pipeline.yaml'))
# Keep git commit
import subprocess
git_hash = subprocess.check_output('git rev-parse HEAD', shell=True).strip()
with open(os.path.join(checkptdir, 'git_hash.txt'), 'w') as f:
f.write(git_hash + '\n')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('spec', type=str)
args = parser.parse_args()
with open(args.spec, 'r') as f:
spec = yaml.load(f)
phase_train(spec, args.spec)
if __name__ == '__main__':
main()
| 43.82 | 166 | 0.438384 |
f7109e5f329e34712969175bcdd6c832599f7ef5 | 1,218 | py | Python | mandala/tests/test_call_graph.py | amakelov/mandala | a9ec051ef730ada4eed216c62a07b033126e78d5 | [
"Apache-2.0"
] | 9 | 2022-02-22T19:24:01.000Z | 2022-03-23T04:46:41.000Z | mandala/tests/test_call_graph.py | amakelov/mandala | a9ec051ef730ada4eed216c62a07b033126e78d5 | [
"Apache-2.0"
] | null | null | null | mandala/tests/test_call_graph.py | amakelov/mandala | a9ec051ef730ada4eed216c62a07b033126e78d5 | [
"Apache-2.0"
] | null | null | null | from .utils import *
from .funcs import *
def test_unit():
storage = Storage()
@op(storage)
def f(x:int) -> int:
return x + 1
@superop(storage)
def f_twice(x:int) -> int:
return f(f(x))
with run(storage, autocommit=True):
f_twice(42)
cg = storage.call_graph_st
nodes = cg.get_nodes()
assert nodes == [f_twice.op.qualified_name, f.op.qualified_name]
assert cg.get_neighbors(node=nodes[0]) == [f.op.qualified_name]
assert cg.get_callers(node=f.op.qualified_name) == [f_twice.op.qualified_name]
### now, check that we detect invalidation of previous version of calling superop
@op(storage, version='1')
def f(x:int) -> int:
return x - 1
# this should not work
try:
@superop(storage)
def f_twice(x:int) -> int:
return f(f(x))
assert False
except SynchronizationError:
assert True
except:
assert False
# this should work
try:
@superop(storage, version='1')
def f_twice(x:int) -> int:
return f(f(x))
assert True
except SynchronizationError:
assert False
except:
assert False | 24.857143 | 85 | 0.587028 |
f710a6c24308bd6ba7693092f6d121cecdb9b7b8 | 1,607 | py | Python | inaccel/keras/applications/imagenet_utils.py | inaccel/keras | bebd0ca930b9e2c2aee320e2e40b3d00cd15e46a | [
"Apache-2.0"
] | 1 | 2021-01-27T12:20:35.000Z | 2021-01-27T12:20:35.000Z | inaccel/keras/applications/imagenet_utils.py | inaccel/keras | bebd0ca930b9e2c2aee320e2e40b3d00cd15e46a | [
"Apache-2.0"
] | null | null | null | inaccel/keras/applications/imagenet_utils.py | inaccel/keras | bebd0ca930b9e2c2aee320e2e40b3d00cd15e46a | [
"Apache-2.0"
] | null | null | null | """Utilities for ImageNet data preprocessing & prediction decoding.
"""
import json
import keras.utils.data_utils as data_utils
CLASS_INDEX = None
CLASS_INDEX_PATH = ('https://storage.googleapis.com/download.tensorflow.org/'
'data/imagenet_class_index.json')
def decode_predictions(preds, top=5):
"""Decodes the prediction of an ImageNet model.
# Arguments
preds: Numpy array encoding a batch of predictions.
top: Integer, how many top-guesses to return.
# Returns
A list of lists of top class prediction tuples
`(class_name, class_description)`.
One list of tuples per sample in batch input.
# Raises
ValueError: In case of invalid shape of the `preds` array
(must be 2D).
"""
global CLASS_INDEX
if len(preds.shape) != 2 or preds.shape[1] != 5:
raise ValueError('`decode_predictions` expects '
'a batch of predictions '
'(i.e. a 2D array of shape (samples, 5)). '
'Found array with shape: ' + str(preds.shape))
if CLASS_INDEX is None:
fpath = data_utils.get_file(
'imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models',
file_hash='c2c37ea517e94d9795004a39431a14cb')
with open(fpath) as f:
CLASS_INDEX = json.load(f)
results = []
for pred in preds:
top_indices = pred[:min(top, 5)]
result = [tuple(CLASS_INDEX[str(i)]) for i in top_indices]
results.append(result)
return results
| 33.479167 | 77 | 0.613566 |
f710ac528885b1b93f31c632c55a3507e9b7fd6d | 3,475 | py | Python | pipe-cli/mount/pipefuse/fslock.py | cmbkoko1989/cloud-pipeline | 9af1218151ef02f87915726eb92c0cc626f7ab11 | [
"Apache-2.0"
] | null | null | null | pipe-cli/mount/pipefuse/fslock.py | cmbkoko1989/cloud-pipeline | 9af1218151ef02f87915726eb92c0cc626f7ab11 | [
"Apache-2.0"
] | null | null | null | pipe-cli/mount/pipefuse/fslock.py | cmbkoko1989/cloud-pipeline | 9af1218151ef02f87915726eb92c0cc626f7ab11 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-2019 EPAM Systems, Inc. (https://www.epam.com/)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import time
from abc import ABCMeta, abstractmethod
from threading import RLock, Thread
from fuse import fuse_get_context
def get_lock(threads, monitoring_delay):
return PathLock(monitoring_delay=monitoring_delay) if threads else DummyLock()
def monitor_locks(monitor_lock, locks, timeout):
while True:
try:
monitor_lock.acquire()
logging.debug('Updating path lock status')
free_paths = [path for path, lock in locks.iteritems() if lock.acquire(blocking=False)]
logging.debug('Releasing %d locks' % len(free_paths))
for path in free_paths:
del locks[path]
logging.debug('Finished path lock status update')
finally:
monitor_lock.release()
time.sleep(timeout)
class FileSystemLock:
__metaclass__ = ABCMeta
@abstractmethod
def lock(self, path):
pass
@abstractmethod
def unlock(self, path):
pass
class DummyLock(FileSystemLock):
def lock(self, path):
pass
def unlock(self, path):
pass
class PathLock(FileSystemLock):
def __init__(self, monitoring_delay=600):
self._mutex = RLock()
self._monitor_lock = RLock()
self._locks = {}
self._monitor = Thread(target=monitor_locks, args=(self._monitor_lock, self._locks, monitoring_delay,))
self._monitor.daemon = True
self._monitor.start()
def lock(self, path):
try:
self._monitor_lock.acquire()
logging.debug('Locking path %s for %s' % (path, str(fuse_get_context())))
path_lock = self._get_path_lock(path)
self._lock_path(path_lock)
logging.debug('Acquired lock for %s' % path)
finally:
self._monitor_lock.release()
def unlock(self, path):
logging.debug('Unlocking path %s for %s' % (path, str(fuse_get_context())))
self._release_path(path)
def _release_path(self, path):
try:
self._mutex.acquire()
if path not in self._locks:
logging.debug('Cannot release non-existing lock.')
else:
self._locks[path].release()
logging.debug('Released lock for %s' % path)
finally:
self._mutex.release()
logging.debug('Finished unlocking for %s' % path)
def _get_path_lock(self, path):
try:
self._mutex.acquire()
if path not in self._locks:
self._locks[path] = RLock()
logging.debug('Created new lock for %s' % path)
return self._locks[path]
finally:
self._mutex.release()
def _lock_path(self, path_lock):
try:
path_lock.acquire()
except:
path_lock.release()
raise
| 30.217391 | 111 | 0.624748 |
f710de970e7fba982966b7b605985bbabc605981 | 447 | py | Python | bibliohub/catalog/urls.py | apjanco/bibliohub | 95da034d2e136bd4ae25a9b6932fd19124dacd9b | [
"MIT"
] | null | null | null | bibliohub/catalog/urls.py | apjanco/bibliohub | 95da034d2e136bd4ae25a9b6932fd19124dacd9b | [
"MIT"
] | null | null | null | bibliohub/catalog/urls.py | apjanco/bibliohub | 95da034d2e136bd4ae25a9b6932fd19124dacd9b | [
"MIT"
] | null | null | null | from django.urls import path
from . import views
from .views import SearchResultsView, HomePageView
urlpatterns = [
path('', views.index, name='index'),
# path('books/', views.BookListView.as_view(), name='books'),
path('search/', SearchResultsView.as_view(), name='search_results'),
path('home/', HomePageView.as_view(),name='home'),
# path('author_search/', AuthorSearchResultsView.as_view(), name='author_search_results'),
] | 44.7 | 94 | 0.711409 |
f7112d60b47eb6fb4ad1be2a0ffbcd3b4d41a3f4 | 21,283 | py | Python | reclor_trainer_base_v2.py | SparkJiao/MERIt | e887dd11bd2969345a5fb07c47d49bd0245e41e6 | [
"MIT"
] | 8 | 2022-03-01T09:02:44.000Z | 2022-03-18T14:41:56.000Z | reclor_trainer_base_v2.py | SparkJiao/MERIt | e887dd11bd2969345a5fb07c47d49bd0245e41e6 | [
"MIT"
] | 1 | 2022-03-09T12:12:22.000Z | 2022-03-10T09:08:42.000Z | reclor_trainer_base_v2.py | SparkJiao/MERIt | e887dd11bd2969345a5fb07c47d49bd0245e41e6 | [
"MIT"
] | 2 | 2022-03-02T01:46:52.000Z | 2022-03-02T13:51:53.000Z | # coding=utf-8
#
# Copyright 2020 Heinrich Heine University Duesseldorf
#
# Part of this code is based on the source code of BERT-DST
# (arXiv:1907.03040)
# Part of this code is based on the source code of Transformers
# (arXiv:1910.03771)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import glob
import json
import logging
import os
import sys
from typing import Dict, Union
import hydra
import numpy as np
import torch
import transformers
from fairscale.nn.data_parallel.fully_sharded_data_parallel import FullyShardedDataParallel as FullyShardedDDP
from fairscale.nn.wrap.auto_wrap import auto_wrap
from fairscale.optim.grad_scaler import ShardedGradScaler
from omegaconf import DictConfig, OmegaConf
from torch import distributed as dist
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, TensorDataset)
from torch.utils.data.distributed import DistributedSampler
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm, trange
from transformers import (get_linear_schedule_with_warmup, AutoTokenizer, PreTrainedTokenizer)
from general_util.logger import setting_logger
from general_util.training_utils import batch_to_device, unwrap_model, set_seed, note_best_checkpoint, initialize_optimizer
logger: logging.Logger
# transformers.logging.set_verbosity_error()
def save_model(model: Union[torch.nn.Module, FullyShardedDDP], cfg: DictConfig, output_dir: str, tokenizer: PreTrainedTokenizer = None):
# Save model checkpoint.
if cfg.local_rank != -1:
state_dict = model.state_dict()
if cfg.local_rank == 0:
unwrap_model(model).save_pretrained(output_dir, state_dict=state_dict)
else:
model.save_pretrained(output_dir)
# Save tokenizer and training args.
if cfg.local_rank in [-1, 0]:
if tokenizer is not None:
tokenizer.save_pretrained(output_dir)
OmegaConf.save(cfg, os.path.join(output_dir, "training_config.yaml"))
logger.info("Saving model checkpoint to %s", output_dir)
def forward_step(model, inputs: Dict[str, torch.Tensor], cfg, scaler):
if cfg.fp16:
with torch.cuda.amp.autocast():
outputs = model(**inputs)
loss = outputs["loss"] # model outputs are always tuple in transformers (see doc)
else:
outputs = model(**inputs)
loss = outputs["loss"] # model outputs are always tuple in pytorch-transformers (see doc)
if cfg.n_gpu > 1:
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training
if cfg.gradient_accumulation_steps > 1:
loss = loss / cfg.gradient_accumulation_steps
if cfg.fp16:
scaler.scale(loss).backward()
else:
loss.backward()
return loss.item()
def train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step=0):
""" Train the model """
if cfg.local_rank in [-1, 0]:
_dir_splits = cfg.output_dir.split('/')
_log_dir = '/'.join([_dir_splits[0], 'runs'] + _dir_splits[1:])
tb_writer = SummaryWriter(log_dir=_log_dir)
else:
tb_writer = None
cfg.train_batch_size = cfg.per_gpu_train_batch_size * max(1, cfg.n_gpu)
train_sampler = RandomSampler(train_dataset) if cfg.local_rank == -1 else DistributedSampler(train_dataset)
train_collator = hydra.utils.instantiate(cfg.collator) if "collator" in cfg and cfg.collator else None
train_dataloader = DataLoader(dataset=train_dataset, sampler=train_sampler, batch_size=cfg.train_batch_size,
collate_fn=train_collator, num_workers=cfg.num_workers, pin_memory=True,
prefetch_factor=cfg.prefetch_factor)
if "extended_vocab" in cfg and cfg.extended_vocab:
logger.info(f"Extended extra vocab size: {cfg.extended_vocab}")
model.resize_token_embeddings(model.config.vocab_size + cfg.extended_vocab)
if cfg.max_steps > 0:
t_total = cfg.max_steps
cfg.num_train_epochs = cfg.max_steps // (len(train_dataloader) // cfg.gradient_accumulation_steps) + 1
else:
t_total = len(train_dataloader) // cfg.gradient_accumulation_steps * cfg.num_train_epochs
num_warmup_steps = int(t_total * cfg.warmup_proportion) if cfg.warmup_proportion else cfg.warmup_steps
optimizer = scheduler = None
# Prepare optimizer and schedule (linear warmup and decay)
if cfg.local_rank == -1:
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': cfg.weight_decay
},
{
'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': 0.0
}
]
optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
if cfg.fp16:
if cfg.local_rank != -1:
scaler = ShardedGradScaler()
else:
from torch.cuda.amp.grad_scaler import GradScaler
scaler = GradScaler()
else:
scaler = None
# multi-gpu training (should be after apex fp16 initialization)
model_single_gpu = model
if cfg.n_gpu > 1:
model = torch.nn.DataParallel(model_single_gpu)
# Distributed training (should be after apex fp16 initialization)
if cfg.local_rank != -1:
model = auto_wrap(model)
model = FullyShardedDDP(model,
mixed_precision=cfg.fp16,
flatten_parameters=getattr(cfg, "flatten_parameters", True),
reshard_after_forward=cfg.reshard_after_forward,
move_grads_to_cpu=cfg.move_grads_to_cpu,
move_params_to_cpu=cfg.move_params_to_cpu)
if not cfg.move_params_to_cpu:
model = model.to(cfg.device)
no_decay = ['bias', 'LayerNorm.weight', 'layer_norm.weight']
optimizer_grouped_parameters = [
{
'params': [p for n, p in model.named_parameters() if (not any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': cfg.weight_decay
},
{
'params': [p for n, p in model.named_parameters() if (any(nd in n for nd in no_decay)) and p.requires_grad],
'weight_decay': 0.0
}
]
optimizer = initialize_optimizer(cfg, optimizer_grouped_parameters)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=num_warmup_steps, num_training_steps=t_total)
logger.info(optimizer)
# Train!
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", cfg.num_train_epochs)
logger.info(" Instantaneous batch size per GPU = %d", cfg.per_gpu_train_batch_size)
logger.info(" Total train batch size (w. parallel, distributed & accumulation) = %d",
cfg.train_batch_size * cfg.gradient_accumulation_steps * (dist.get_world_size() if cfg.local_rank != -1 else 1))
logger.info(" Gradient Accumulation steps = %d", cfg.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
logger.info(" Warmup steps = %d", num_warmup_steps)
if continue_from_global_step > 0:
logger.info("Fast forwarding to global step %d to resume training from latest checkpoint...", continue_from_global_step)
global_step = 0
tr_loss, logging_loss = 0.0, 0.0
model.zero_grad()
train_iterator = trange(int(cfg.num_train_epochs), desc="Epoch", disable=cfg.local_rank not in [-1, 0])
set_seed(cfg) # Added here for reproducibility (even between python 2 and 3)
for epoch in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=cfg.local_rank not in [-1, 0], dynamic_ncols=True)
if cfg.local_rank != -1:
train_dataloader.sampler.set_epoch(epoch)
for step, batch in enumerate(epoch_iterator):
# If training is continued from a checkpoint, fast forward
# to the state of that checkpoint.
if global_step < continue_from_global_step:
if (step + 1) % cfg.gradient_accumulation_steps == 0:
scheduler.step() # Update learning rate schedule
global_step += 1
continue
model.train()
batch = batch_to_device(batch, cfg.device)
if (step + 1) % cfg.gradient_accumulation_steps != 0 and cfg.local_rank != -1:
# Avoid unnecessary DDP synchronization since there will be no backward pass on this example.
with model.no_sync():
loss = forward_step(model, batch, cfg, scaler)
else:
loss = forward_step(model, batch, cfg, scaler)
tr_loss += loss
if (step + 1) % cfg.gradient_accumulation_steps == 0:
if cfg.fp16:
scaler.unscale_(optimizer)
if cfg.max_grad_norm:
if hasattr(optimizer, "clip_grad_norm"):
optimizer.clip_grad_norm(cfg.max_grad_norm)
elif hasattr(model, "clip_grad_norm_"):
model.clip_grad_norm_(cfg.max_grad_norm)
else:
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg.max_grad_norm)
if cfg.fp16:
scaler.step(optimizer)
scaler.update()
else:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad(set_to_none=True)
global_step += 1
# Log metrics
if cfg.local_rank in [-1, 0] and cfg.logging_steps > 0 and global_step % cfg.logging_steps == 0:
tb_writer.add_scalar('lr', scheduler.get_lr()[0], global_step)
tb_writer.add_scalar('loss', (tr_loss - logging_loss) / cfg.logging_steps, global_step)
logging_loss = tr_loss
# Save model checkpoint
if cfg.save_steps > 0 and global_step % cfg.save_steps == 0:
output_dir = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))
if cfg.local_rank in [-1, 0] and not os.path.exists(output_dir):
os.makedirs(output_dir)
save_model(model, cfg, output_dir, tokenizer)
# Evaluation
if cfg.evaluate_during_training and cfg.eval_steps > 0 and global_step % cfg.eval_steps == 0:
state_dict = model.state_dict()
if cfg.local_rank in [-1, 0]:
results = evaluate(cfg, model, tokenizer, prefix=str(global_step), _split="dev")
for key, value in results.items():
tb_writer.add_scalar(f"eval/{key}", value, global_step)
sub_path = os.path.join(cfg.output_dir, 'checkpoint-{}'.format(global_step))
flag = note_best_checkpoint(cfg, results, sub_path)
if cfg.save_best and flag:
if cfg.local_rank == 0:
unwrap_model(model).save_pretrained(cfg.output_dir, state_dict=state_dict)
else:
model.save_pretrained(cfg.output_dir)
tokenizer.save_pretrained(cfg.output_dir)
OmegaConf.save(cfg, os.path.join(cfg.output_dir, "training_config.yaml"))
logger.info("Saving best model checkpoint to %s", cfg.output_dir)
if 0 < cfg.max_steps < global_step:
epoch_iterator.close()
break
if 0 < cfg.max_steps < global_step:
train_iterator.close()
break
if cfg.local_rank in [-1, 0]:
tb_writer.close()
return global_step, tr_loss / global_step
def evaluate(cfg, model, tokenizer: PreTrainedTokenizer, prefix="", _split="dev"):
dataset, features = load_and_cache_examples(cfg, tokenizer, _split=_split)
if not os.path.exists(os.path.join(cfg.output_dir, prefix)):
os.makedirs(os.path.join(cfg.output_dir, prefix))
cfg.eval_batch_size = cfg.per_gpu_eval_batch_size
eval_sampler = SequentialSampler(dataset) # Note that DistributedSampler samples randomly
eval_collator = hydra.utils.instantiate(cfg.collator) if "collator" in cfg and cfg.collator else None
eval_dataloader = DataLoader(dataset, sampler=eval_sampler, batch_size=cfg.eval_batch_size,
collate_fn=eval_collator)
single_model_gpu = unwrap_model(model)
single_model_gpu.get_eval_log(reset=True)
# Eval!
torch.cuda.empty_cache()
logger.info("***** Running evaluation {}.{} *****".format(_split, prefix))
logger.info(" Num examples = %d", len(dataset))
logger.info(" Batch size = %d", cfg.eval_batch_size)
# Seems FSDP does not need to unwrap the model for evaluating.
model.eval()
pred_list = []
prob_list = []
for batch in tqdm(eval_dataloader, desc="Evaluating", dynamic_ncols=True):
batch = batch_to_device(batch, cfg.device)
with torch.cuda.amp.autocast():
with torch.no_grad():
outputs = model(**batch)
probs = outputs["logits"].softmax(dim=-1).detach().float().cpu()
prob, pred = probs.max(dim=-1)
pred_list.extend(pred.tolist())
prob_list.extend(prob.tolist())
metric_log, results = single_model_gpu.get_eval_log(reset=True)
logger.info("****** Evaluation Results ******")
logger.info(f"Global Steps: {prefix}")
logger.info(metric_log)
prediction_file = os.path.join(cfg.output_dir, prefix, "eval_predictions.npy")
np.save(prediction_file, pred_list)
json.dump(prob_list, open(os.path.join(cfg.output_dir, prefix, "eval_probs.json"), "w"))
return results
def load_and_cache_examples(cfg, tokenizer: PreTrainedTokenizer, _split="train"):
if cfg.local_rank not in [-1, 0] and _split == "train":
dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
if _split == "train":
input_file = cfg.train_file
elif _split == "dev":
input_file = cfg.dev_file
elif _split == "test":
input_file = cfg.test_file
else:
raise RuntimeError(_split)
examples, features, tensors = hydra.utils.call(cfg.read_tensor, file_path=input_file, tokenizer=tokenizer)
if cfg.local_rank == 0 and _split == "train":
dist.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache
dataset = TensorDataset(*tensors)
return dataset, features
@hydra.main(config_path="conf", config_name="config")
def main(cfg: DictConfig):
if cfg.local_rank == -1 or cfg.no_cuda:
device = str(torch.device("cuda" if torch.cuda.is_available() and not cfg.no_cuda else "cpu"))
cfg.n_gpu = torch.cuda.device_count()
else: # Initializes the distributed backend which will take care of synchronizing nodes/GPUs
torch.cuda.set_device(cfg.local_rank)
device = str(torch.device("cuda", cfg.local_rank))
dist.init_process_group(backend='nccl')
cfg.n_gpu = 1
cfg.world_size = dist.get_world_size()
cfg.device = device
global logger
logger = setting_logger(cfg.output_dir, local_rank=cfg.local_rank)
logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
cfg.local_rank, device, cfg.n_gpu, bool(cfg.local_rank != -1), cfg.fp16)
# Set seed
set_seed(cfg)
# Load pre-trained model and tokenizer
if cfg.local_rank not in [-1, 0]:
dist.barrier() # Make sure only the first process in distributed training will download model & vocab
if cfg.pretrain:
pretrain_state_dict = torch.load(cfg.pretrain, map_location='cpu')
else:
pretrain_state_dict = None
tokenizer = AutoTokenizer.from_pretrained(cfg.model_name_or_path)
model = hydra.utils.call(cfg.model, cfg.model_name_or_path, state_dict=pretrain_state_dict)
if cfg.local_rank == 0:
dist.barrier() # Make sure only the first process in distributed training will download model & vocab
if cfg.local_rank == -1: # For FullyShardedDDP, place the model on cpu first.
model.to(cfg.device)
# logger.info("Training/evaluation parameters %s", OmegaConf.to_yaml(cfg))
if cfg.local_rank in [-1, 0] and cfg.do_train:
if not os.path.exists(cfg.output_dir):
os.makedirs(cfg.output_dir)
OmegaConf.save(cfg, os.path.join(cfg.output_dir, "training_config.yaml"))
# Training
if cfg.do_train:
# TODO: Add option for continuously training from checkpoint.
# The operation should be introduced in ``train`` method since both the state dict
# of schedule and optimizer (and scaler, if any) should be loaded.
# If output files already exists, assume to continue training from latest checkpoint (unless overwrite_output_dir is set)
continue_from_global_step = 0 # If set to 0, start training from the beginning
# if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir:
# checkpoints = list(os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/*/' + WEIGHTS_NAME, recursive=True)))
# if len(checkpoints) > 0:
# checkpoint = checkpoints[-1]
# logger.info("Resuming training from the latest checkpoint: %s", checkpoint)
# continue_from_global_step = int(checkpoint.split('-')[-1])
# model = model_class.from_pretrained(checkpoint)
# model.to(args.device)
train_dataset, features = load_and_cache_examples(cfg, tokenizer, _split="train")
global_step, tr_loss = train(cfg, train_dataset, features, model, tokenizer, continue_from_global_step)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
# Test
results = {}
if cfg.do_eval and cfg.local_rank in [-1, 0]:
checkpoints = [cfg.output_dir]
if cfg.save_best:
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
elif cfg.prediction_cfg.best_checkpoint and os.path.exists(cfg.prediction_cfg.best_checkpoint):
checkpoints = [cfg.prediction_cfg.best_checkpoint]
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
elif cfg.eval_sub_path:
checkpoints = list(
os.path.dirname(c) for c in
sorted(glob.glob(cfg.output_dir + f"/{cfg.eval_sub_path}/" + "pytorch_model.bin", recursive=True))
)
logging.getLogger("transformers.modeling_utils").setLevel(logging.WARN) # Reduce logging
logger.info(" the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
global_step = checkpoint.split("-")[-1] if len(checkpoints) > 1 else ""
prefix = checkpoint.split("/")[-1] if checkpoint.find("checkpoint") != -1 else ""
split = "dev"
model = hydra.utils.call(cfg.model, checkpoint)
model.to(device)
if cfg.test_file:
prefix = f'test' + (f'-{prefix}' if prefix != "" else "")
split = "test"
result = evaluate(cfg, model, tokenizer, prefix=prefix, _split=split)
result = dict((k + "_{}".format(global_step), v) for k, v in result.items())
results.update(result)
return results
if __name__ == "__main__":
hydra_formatted_args = []
# convert the cli params added by torch.distributed.launch into Hydra format
for arg in sys.argv:
if arg.startswith("--"):
hydra_formatted_args.append(arg[len("--"):])
else:
hydra_formatted_args.append(arg)
sys.argv = hydra_formatted_args
main()
| 45.091102 | 137 | 0.644881 |
f7115de338f1de8c6d119b74bc44f2877d482c1c | 15,511 | py | Python | ibis/backends/clickhouse/tests/test_functions.py | jreback/ibis | fdcca59b085416b1311eb268be3886abad1db230 | [
"Apache-2.0"
] | 1 | 2020-08-19T03:36:26.000Z | 2020-08-19T03:36:26.000Z | ibis/backends/clickhouse/tests/test_functions.py | jreback/ibis | fdcca59b085416b1311eb268be3886abad1db230 | [
"Apache-2.0"
] | null | null | null | ibis/backends/clickhouse/tests/test_functions.py | jreback/ibis | fdcca59b085416b1311eb268be3886abad1db230 | [
"Apache-2.0"
] | 2 | 2020-11-27T22:21:50.000Z | 2021-04-03T09:36:25.000Z | import math
import operator
from datetime import date, datetime
from operator import methodcaller
import pandas as pd
import pandas.testing as tm
import pytest
from pytest import param
import ibis
import ibis.expr.datatypes as dt
import ibis.expr.types as ir
from ibis import literal as L
clickhouse_driver = pytest.importorskip('clickhouse_driver')
pytestmark = pytest.mark.clickhouse
@pytest.mark.parametrize(
('to_type', 'expected'),
[
('int8', 'CAST(`double_col` AS Int8)'),
('int16', 'CAST(`double_col` AS Int16)'),
('float', 'CAST(`double_col` AS Float32)'),
# alltypes.double_col is non-nullable
(dt.Double(nullable=False), '`double_col`'),
],
)
def test_cast_double_col(alltypes, translate, to_type, expected):
expr = alltypes.double_col.cast(to_type)
assert translate(expr) == expected
@pytest.mark.parametrize(
('to_type', 'expected'),
[
('int8', 'CAST(`string_col` AS Int8)'),
('int16', 'CAST(`string_col` AS Int16)'),
(dt.String(nullable=False), '`string_col`'),
('timestamp', 'CAST(`string_col` AS DateTime)'),
('date', 'CAST(`string_col` AS Date)'),
],
)
def test_cast_string_col(alltypes, translate, to_type, expected):
expr = alltypes.string_col.cast(to_type)
assert translate(expr) == expected
@pytest.mark.xfail(
raises=AssertionError, reason='Clickhouse doesn\'t have decimal type'
)
def test_decimal_cast():
assert False
@pytest.mark.parametrize(
'column',
[
'index',
'Unnamed: 0',
'id',
'bool_col',
'tinyint_col',
'smallint_col',
'int_col',
'bigint_col',
'float_col',
'double_col',
'date_string_col',
'string_col',
'timestamp_col',
'year',
'month',
],
)
def test_noop_cast(alltypes, translate, column):
col = alltypes[column]
result = col.cast(col.type())
assert result.equals(col)
assert translate(result) == '`{}`'.format(column)
def test_timestamp_cast_noop(alltypes, translate):
target = dt.Timestamp(nullable=False)
result1 = alltypes.timestamp_col.cast(target)
result2 = alltypes.int_col.cast(target)
assert isinstance(result1, ir.TimestampColumn)
assert isinstance(result2, ir.TimestampColumn)
assert translate(result1) == '`timestamp_col`'
assert translate(result2) == 'CAST(`int_col` AS DateTime)'
def test_timestamp_now(con, translate):
expr = ibis.now()
# now = datetime.now().strftime('%Y-%m-%d %H:%M:%S')
assert translate(expr) == 'now()'
# assert con.execute(expr) == now
@pytest.mark.parametrize(
('unit', 'expected'),
[
('y', '2009-01-01'),
param('m', '2009-05-01', marks=pytest.mark.xfail),
('d', '2009-05-17'),
('w', '2009-05-11'),
('h', '2009-05-17 12:00:00'),
('minute', '2009-05-17 12:34:00'),
],
)
def test_timestamp_truncate(con, translate, unit, expected):
stamp = ibis.timestamp('2009-05-17 12:34:56')
expr = stamp.truncate(unit)
assert con.execute(expr) == pd.Timestamp(expected)
@pytest.mark.parametrize(
('func', 'expected'),
[
(methodcaller('year'), 2015),
(methodcaller('month'), 9),
(methodcaller('day'), 1),
(methodcaller('hour'), 14),
(methodcaller('minute'), 48),
(methodcaller('second'), 5),
],
)
def test_simple_datetime_operations(con, func, expected):
value = ibis.timestamp('2015-09-01 14:48:05.359')
with pytest.raises(ValueError):
con.execute(func(value))
value = ibis.timestamp('2015-09-01 14:48:05')
con.execute(func(value)) == expected
@pytest.mark.parametrize(('value', 'expected'), [(0, None), (5.5, 5.5)])
def test_nullifzero(con, value, expected):
result = con.execute(L(value).nullifzero())
if expected is None:
assert pd.isnull(result)
else:
assert result == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(None).isnull(), True),
(L(1).isnull(), False),
(L(None).notnull(), False),
(L(1).notnull(), True),
],
)
def test_isnull_notnull(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.coalesce(5, None, 4), 5),
(ibis.coalesce(ibis.NA, 4, ibis.NA), 4),
(ibis.coalesce(ibis.NA, ibis.NA, 3.14), 3.14),
],
)
def test_coalesce(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(ibis.NA.fillna(5), 5),
(L(5).fillna(10), 5),
(L(5).nullif(5), None),
(L(10).nullif(5), 10),
],
)
def test_fillna_nullif(con, expr, expected):
result = con.execute(expr)
if expected is None:
assert pd.isnull(result)
else:
assert result == expected
@pytest.mark.parametrize(
('value', 'expected'),
[
(L('foo_bar'), 'String'),
(L(5), 'UInt8'),
(L(1.2345), 'Float64'),
(L(datetime(2015, 9, 1, hour=14, minute=48, second=5)), 'DateTime'),
(L(date(2015, 9, 1)), 'Date'),
param(
ibis.NA,
'Null',
marks=pytest.mark.xfail(
raises=AssertionError,
reason=(
'Client/server version mismatch not handled in the '
'clickhouse driver'
),
),
),
],
)
def test_typeof(con, value, expected):
assert con.execute(value.typeof()) == expected
@pytest.mark.parametrize(('value', 'expected'), [('foo_bar', 7), ('', 0)])
def test_string_length(con, value, expected):
assert con.execute(L(value).length()) == expected
@pytest.mark.parametrize(
('op', 'expected'),
[
(methodcaller('substr', 0, 3), 'foo'),
(methodcaller('substr', 4, 3), 'bar'),
(methodcaller('substr', 1), 'oo_bar'),
],
)
def test_string_substring(con, op, expected):
value = L('foo_bar')
assert con.execute(op(value)) == expected
def test_string_column_substring(con, alltypes, translate):
expr = alltypes.string_col.substr(2)
assert translate(expr) == 'substring(`string_col`, 2 + 1)'
assert len(con.execute(expr))
expr = alltypes.string_col.substr(0, 3)
assert translate(expr) == 'substring(`string_col`, 0 + 1, 3)'
assert len(con.execute(expr))
def test_string_reverse(con):
assert con.execute(L('foo').reverse()) == 'oof'
def test_string_upper(con):
assert con.execute(L('foo').upper()) == 'FOO'
def test_string_lower(con):
assert con.execute(L('FOO').lower()) == 'foo'
def test_string_lenght(con):
assert con.execute(L('FOO').length()) == 3
@pytest.mark.parametrize(
('value', 'op', 'expected'),
[
(L('foobar'), methodcaller('contains', 'bar'), True),
(L('foobar'), methodcaller('contains', 'foo'), True),
(L('foobar'), methodcaller('contains', 'baz'), False),
(L('100%'), methodcaller('contains', '%'), True),
(L('a_b_c'), methodcaller('contains', '_'), True),
],
)
def test_string_contains(con, op, value, expected):
assert con.execute(op(value)) == expected
# TODO: clickhouse-driver escaping bug
def test_re_replace(con, translate):
expr1 = L('Hello, World!').re_replace('.', '\\\\0\\\\0')
expr2 = L('Hello, World!').re_replace('^', 'here: ')
assert con.execute(expr1) == 'HHeelllloo,, WWoorrlldd!!'
assert con.execute(expr2) == 'here: Hello, World!'
@pytest.mark.parametrize(
('value', 'expected'),
[(L('a'), 0), (L('b'), 1), (L('d'), -1)], # TODO: what's the expected?
)
def test_find_in_set(con, value, expected, translate):
vals = list('abc')
expr = value.find_in_set(vals)
assert con.execute(expr) == expected
def test_string_column_find_in_set(con, alltypes, translate):
s = alltypes.string_col
vals = list('abc')
expr = s.find_in_set(vals)
assert translate(expr) == "indexOf(['a','b','c'], `string_col`) - 1"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('url', 'extract', 'expected'),
[
(L('https://www.cloudera.com'), 'HOST', 'www.cloudera.com'),
(L('https://www.cloudera.com'), 'PROTOCOL', 'https'),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),
'PATH',
'/watch',
),
(
L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10'),
'QUERY',
'v=kEuEcWfewf8&t=10',
),
],
)
def test_parse_url(con, translate, url, extract, expected):
expr = url.parse_url(extract)
assert con.execute(expr) == expected
def test_parse_url_query_parameter(con, translate):
url = L('https://www.youtube.com/watch?v=kEuEcWfewf8&t=10')
expr = url.parse_url('QUERY', 't')
assert con.execute(expr) == '10'
expr = url.parse_url('QUERY', 'v')
assert con.execute(expr) == 'kEuEcWfewf8'
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('foobar').find('bar'), 3),
(L('foobar').find('baz'), -1),
(L('foobar').like('%bar'), True),
(L('foobar').like('foo%'), True),
(L('foobar').like('%baz%'), False),
(L('foobar').like(['%bar']), True),
(L('foobar').like(['foo%']), True),
(L('foobar').like(['%baz%']), False),
(L('foobar').like(['%bar', 'foo%']), True),
(L('foobarfoo').replace('foo', 'H'), 'HbarH'),
],
)
def test_string_find_like(con, expr, expected):
assert con.execute(expr) == expected
def test_string_column_like(con, alltypes, translate):
expr = alltypes.string_col.like('foo%')
assert translate(expr) == "`string_col` LIKE 'foo%'"
assert len(con.execute(expr))
expr = alltypes.string_col.like(['foo%', '%bar'])
expected = "`string_col` LIKE 'foo%' OR `string_col` LIKE '%bar'"
assert translate(expr) == expected
assert len(con.execute(expr))
def test_string_column_find(con, alltypes, translate):
s = alltypes.string_col
expr = s.find('a')
assert translate(expr) == "position(`string_col`, 'a') - 1"
assert len(con.execute(expr))
expr = s.find(s)
assert translate(expr) == "position(`string_col`, `string_col`) - 1"
assert len(con.execute(expr))
@pytest.mark.parametrize(
('call', 'expected'),
[
(methodcaller('log'), 'log(`double_col`)'),
(methodcaller('log2'), 'log2(`double_col`)'),
(methodcaller('log10'), 'log10(`double_col`)'),
(methodcaller('round'), 'round(`double_col`)'),
(methodcaller('round', 0), 'round(`double_col`, 0)'),
(methodcaller('round', 2), 'round(`double_col`, 2)'),
(methodcaller('exp'), 'exp(`double_col`)'),
(methodcaller('abs'), 'abs(`double_col`)'),
(methodcaller('ceil'), 'ceil(`double_col`)'),
(methodcaller('floor'), 'floor(`double_col`)'),
(methodcaller('sqrt'), 'sqrt(`double_col`)'),
(
methodcaller('sign'),
'intDivOrZero(`double_col`, abs(`double_col`))',
),
],
)
def test_translate_math_functions(con, alltypes, translate, call, expected):
expr = call(alltypes.double_col)
assert translate(expr) == expected
assert len(con.execute(expr))
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L(-5).abs(), 5),
(L(5).abs(), 5),
(L(5.5).round(), 6.0),
(L(5.556).round(2), 5.56),
(L(5.556).ceil(), 6.0),
(L(5.556).floor(), 5.0),
(L(5.556).exp(), math.exp(5.556)),
(L(5.556).sign(), 1),
(L(-5.556).sign(), -1),
(L(0).sign(), 0),
(L(5.556).sqrt(), math.sqrt(5.556)),
(L(5.556).log(2), math.log(5.556, 2)),
(L(5.556).ln(), math.log(5.556)),
(L(5.556).log2(), math.log(5.556, 2)),
(L(5.556).log10(), math.log10(5.556)),
],
)
def test_math_functions(con, expr, expected, translate):
assert con.execute(expr) == expected
def test_greatest(con, alltypes, translate):
expr = ibis.greatest(alltypes.int_col, 10)
assert translate(expr) == "greatest(`int_col`, 10)"
assert len(con.execute(expr))
expr = ibis.greatest(alltypes.int_col, alltypes.bigint_col)
assert translate(expr) == "greatest(`int_col`, `bigint_col`)"
assert len(con.execute(expr))
def test_least(con, alltypes, translate):
expr = ibis.least(alltypes.int_col, 10)
assert translate(expr) == "least(`int_col`, 10)"
assert len(con.execute(expr))
expr = ibis.least(alltypes.int_col, alltypes.bigint_col)
assert translate(expr) == "least(`int_col`, `bigint_col`)"
assert len(con.execute(expr))
# TODO: clickhouse-driver escaping bug
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').re_search('[a-z]'), True),
(L('abcd').re_search(r'[\\d]+'), False),
(L('1222').re_search(r'[\\d]+'), True),
],
)
def test_regexp(con, expr, expected):
assert con.execute(expr) == expected
@pytest.mark.parametrize(
('expr', 'expected'),
[
(L('abcd').re_extract('([a-z]+)', 0), 'abcd'),
# (L('abcd').re_extract('(ab)(cd)', 1), 'cd'),
# valid group number but no match => empty string
(L('abcd').re_extract(r'(\\d)', 0), ''),
# match but not a valid group number => NULL
# (L('abcd').re_extract('abcd', 3), None),
],
)
def test_regexp_extract(con, expr, expected, translate):
assert con.execute(expr) == expected
def test_column_regexp_extract(con, alltypes, translate):
expected = r"extractAll(`string_col`, '[\d]+')[3 + 1]"
expr = alltypes.string_col.re_extract(r'[\d]+', 3)
assert translate(expr) == expected
assert len(con.execute(expr))
def test_column_regexp_replace(con, alltypes, translate):
expected = r"replaceRegexpAll(`string_col`, '[\d]+', 'aaa')"
expr = alltypes.string_col.re_replace(r'[\d]+', 'aaa')
assert translate(expr) == expected
assert len(con.execute(expr))
def test_numeric_builtins_work(con, alltypes, df, translate):
expr = alltypes.double_col
result = expr.execute()
expected = df.double_col.fillna(0)
tm.assert_series_equal(result, expected)
def test_null_column(alltypes, translate):
t = alltypes
nrows = t.count().execute()
expr = t.mutate(na_column=ibis.NA).na_column
result = expr.execute()
expected = pd.Series([None] * nrows, name='na_column')
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize(
('attr', 'expected'),
[
(operator.methodcaller('year'), {2009, 2010}),
(operator.methodcaller('month'), set(range(1, 13))),
(operator.methodcaller('day'), set(range(1, 32))),
],
)
def test_date_extract_field(db, alltypes, attr, expected):
t = alltypes
expr = attr(t.timestamp_col.cast('date')).distinct()
result = expr.execute().astype(int)
assert set(result) == expected
def test_timestamp_from_integer(con, alltypes, translate):
# timestamp_col has datetime type
expr = alltypes.int_col.to_timestamp()
assert translate(expr) == 'toDateTime(`int_col`)'
assert len(con.execute(expr))
def test_count_distinct_with_filter(alltypes):
expr = alltypes.string_col.nunique(
where=alltypes.string_col.cast('int64') > 1
)
result = expr.execute()
expected = alltypes.string_col.execute()
expected = expected[expected.astype('int64') > 1].nunique()
assert result == expected
| 28.938433 | 76 | 0.597318 |
f71183a31b57d8e8d37c461a4adb535c2b4581ed | 931 | py | Python | backend/public_info/migrations/0001_initial.py | StichtingIAPC/swipe | d1ea35a40813d2d5e9cf9edde33148c0a825efc4 | [
"BSD-3-Clause-Clear"
] | null | null | null | backend/public_info/migrations/0001_initial.py | StichtingIAPC/swipe | d1ea35a40813d2d5e9cf9edde33148c0a825efc4 | [
"BSD-3-Clause-Clear"
] | null | null | null | backend/public_info/migrations/0001_initial.py | StichtingIAPC/swipe | d1ea35a40813d2d5e9cf9edde33148c0a825efc4 | [
"BSD-3-Clause-Clear"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.9 on 2018-05-29 18:22
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
]
operations = [
migrations.CreateModel(
name='Sharing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('uuid', models.UUIDField(default=uuid.uuid4, editable=False)),
('public', models.BooleanField(default=True)),
('sharing_id', models.PositiveIntegerField()),
('sharing_type', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to='contenttypes.ContentType')),
],
),
]
| 31.033333 | 128 | 0.627282 |
f7119d7c59ca6eac52878c1e416f7ea7327c9543 | 4,445 | py | Python | IOTSocket/IOTSocketClient.py | AbhijithAJ/IOTSocket | 1a27c12491edc31b1c4fab8bcda34c643a5ef21c | [
"MIT"
] | 51 | 2020-02-19T16:46:32.000Z | 2022-03-19T08:51:35.000Z | IOTSocket/IOTSocketClient.py | AbhijithAJ/IOTSocket | 1a27c12491edc31b1c4fab8bcda34c643a5ef21c | [
"MIT"
] | null | null | null | IOTSocket/IOTSocketClient.py | AbhijithAJ/IOTSocket | 1a27c12491edc31b1c4fab8bcda34c643a5ef21c | [
"MIT"
] | 6 | 2020-02-19T16:46:43.000Z | 2021-11-23T13:37:03.000Z | '''
Developed by Abhijith Boppe - linkedin.com/in/abhijith-boppe/
'''
import socket
import ssl
import time
data_maxLength = 65535
fields_maxLength =1024
sock = ''
device_id = ''
device_key = ''
time_stamps = []
def connectionSet(host, port, id_, key, Encrypt=1, cert_path=None):
global sock, device_id, device_key, time_stamps
device_id = id_
device_key = key
time_stamps = []
sock = socket.create_connection((host, port))
if Encrypt == 1:
ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT).load_verify_locations(cert_path)
sock = ssl.wrap_socket(sock, keyfile=None, certfile=None, server_side=False, cert_reqs=ssl.CERT_NONE, ssl_version=ssl.PROTOCOL_SSLv23)
sock.settimeout(1)
def chkTime(server_time, device_time):
"""
Check if the time matches the server time and
to make sure there are no reused time (no replay attacks)
"""
global time_stamps
time_drop_max = 3 # packet with time difference 30sec will not be accepted
device_time = float(device_time)
server_time = float(server_time)
if(server_time in time_stamps):
raise Exception(f"ERROR: Replay attack observer. Time stamps:{time_stamps}, Replayed time: {server_time}")
return False
else:
if len(time_stamps) < 100: # if 100 req in less than 30sec
time_diff = abs(device_time - server_time)
if len(time_stamps) > 1: # to remove old time stamps (to reduce memory usage)
if (abs(time_stamps[-1] - server_time) > time_drop_max):
time_stamps = []
if (time_diff > time_drop_max):
return 0
elif (time_diff < time_drop_max):
time_stamps.append(server_time)
return 1
else:
raise Exception(
"ERROR: DOS attack more than 100 requests from server in 30sec")
def recvData():
time_now = f'{time.time():.4f}'
try:
# 65535 max data (including headers)
data = sock.recv(data_maxLength)
except socket.timeout as _:
data = b''
pass
except Exception as _:
raise Exception("socket closed/refused by server")
data = data.decode()
if not data:
return ''
else:
data = data.split('|#|') # split data at delimeter
while '' in data:
data.remove('')
if data[0]: # clear the remaining queue/buffer and read only first element/data
data = data[0]
# split headers and data
fields, data = data.split("\r\n\r\n", 1)
fields, data = fields.strip() if len(
fields) < fields_maxLength else 0, data.strip() if len(data) < (data_maxLength-3000) else ''
headers = {}
for field in fields.split('\r\n'):
# split each line by http field name and value
key, value = field.split(':')
headers[key] = value
if len(headers) > 10:
break
if len(headers) != 5 or len(data) < 5:
raise Exception("ERROR: Header length issue ")
else:
if(headers['IOT'] == '1.1'):
time_chk = chkTime(headers['TIME'], time_now)
if(time_chk):
return data
else:
raise Exception(
f"ERROR: Incorrect time stamp. server time {headers['TIME']} client time {time_now}")
else:
raise Exception(
f"ERROR: Incorrect IOT version detected {headers['IOT']}")
def _headers():
time_now = f'{time.time():.4f}'
headers = '''IOT:1.1
DATE:12/12/2019
TIME:{time_now}
DEVICE:{device_id}
KEY:{device_key}
'''.format(time_now=time_now, device_id= device_id, device_key=device_key)
return headers
def sendData(data):
if len(data) > 5 and len(data) < 60000:
try:
headers = _headers()
data = headers.replace('\n','\r\n') + data.replace('|#|','') + '|#|'
sock.send(data.encode())
except socket.timeout as e:
raise Exception("Socket time out")
except Exception as e:
raise Exception("Socket closed by server")
# ConnectionResetError(10054, 'An existing connection was forcibly closed by the remote host', None, 10054, None)
| 36.434426 | 142 | 0.575928 |
f711d576cf9e1b71718444316027ae1001e6df66 | 111,408 | py | Python | zerver/tests/test_bugdown.py | networksneaker/zulip | fae365f8c7e2d6ee041024a22b6ba5a64cbffe4e | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_bugdown.py | networksneaker/zulip | fae365f8c7e2d6ee041024a22b6ba5a64cbffe4e | [
"Apache-2.0"
] | null | null | null | zerver/tests/test_bugdown.py | networksneaker/zulip | fae365f8c7e2d6ee041024a22b6ba5a64cbffe4e | [
"Apache-2.0"
] | null | null | null | import copy
import os
import re
from typing import Any, Dict, List, Optional, Set, Tuple
from unittest import mock
import ujson
from django.conf import settings
from django.test import TestCase, override_settings
from zerver.lib import bugdown, mdiff
from zerver.lib.actions import (
do_add_alert_words,
do_remove_realm_emoji,
do_set_realm_property,
do_set_user_display_setting,
)
from zerver.lib.alert_words import get_alert_word_automaton
from zerver.lib.create_user import create_user
from zerver.lib.emoji import get_emoji_url
from zerver.lib.exceptions import BugdownRenderingException
from zerver.lib.mention import possible_mentions, possible_user_group_mentions
from zerver.lib.message import render_markdown
from zerver.lib.request import JsonableError
from zerver.lib.test_classes import ZulipTestCase
from zerver.lib.test_runner import slow
from zerver.lib.tex import render_tex
from zerver.lib.user_groups import create_user_group
from zerver.models import (
MAX_MESSAGE_LENGTH,
Message,
Realm,
RealmEmoji,
RealmFilter,
Stream,
UserGroup,
UserMessage,
UserProfile,
flush_per_request_caches,
flush_realm_filter,
get_client,
get_realm,
get_stream,
realm_filters_for_realm,
realm_in_local_realm_filters_cache,
)
class FencedBlockPreprocessorTest(TestCase):
def test_simple_quoting(self) -> None:
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
markdown = [
'~~~ quote',
'hi',
'bye',
'',
'',
]
expected = [
'',
'> hi',
'> bye',
'',
'',
'',
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def test_serial_quoting(self) -> None:
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
markdown = [
'~~~ quote',
'hi',
'~~~',
'',
'~~~ quote',
'bye',
'',
'',
]
expected = [
'',
'> hi',
'',
'',
'',
'> bye',
'',
'',
'',
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def test_serial_code(self) -> None:
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
# Simulate code formatting.
processor.format_code = lambda lang, code: lang + ':' + code # type: ignore[assignment] # mypy doesn't allow monkey-patching functions
processor.placeholder = lambda s: '**' + s.strip('\n') + '**' # type: ignore[assignment] # https://github.com/python/mypy/issues/708
markdown = [
'``` .py',
'hello()',
'```',
'',
'```vb.net',
'goodbye()',
'```',
'',
'```c#',
'weirdchar()',
'```',
'',
'```',
'no-highlight()',
'```',
'',
]
expected = [
'',
'**py:hello()**',
'',
'',
'',
'**vb.net:goodbye()**',
'',
'',
'',
'**c#:weirdchar()**',
'',
'',
'',
'**:no-highlight()**',
'',
'',
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def test_nested_code(self) -> None:
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
# Simulate code formatting.
processor.format_code = lambda lang, code: lang + ':' + code # type: ignore[assignment] # mypy doesn't allow monkey-patching functions
processor.placeholder = lambda s: '**' + s.strip('\n') + '**' # type: ignore[assignment] # https://github.com/python/mypy/issues/708
markdown = [
'~~~ quote',
'hi',
'``` .py',
'hello()',
'```',
'',
'',
]
expected = [
'',
'> hi',
'',
'> **py:hello()**',
'',
'',
'',
]
lines = processor.run(markdown)
self.assertEqual(lines, expected)
def bugdown_convert(content: str) -> str:
return bugdown.convert(
content=content,
message_realm=get_realm('zulip'),
)
class BugdownMiscTest(ZulipTestCase):
def test_diffs_work_as_expected(self) -> None:
str1 = "<p>The quick brown fox jumps over the lazy dog. Animal stories are fun, yeah</p>"
str2 = "<p>The fast fox jumps over the lazy dogs and cats. Animal stories are fun</p>"
expected_diff = "\u001b[34m-\u001b[0m <p>The \u001b[33mquick brown\u001b[0m fox jumps over the lazy dog. Animal stories are fun\u001b[31m, yeah\u001b[0m</p>\n\u001b[34m+\u001b[0m <p>The \u001b[33mfast\u001b[0m fox jumps over the lazy dog\u001b[32ms and cats\u001b[0m. Animal stories are fun</p>\n"
self.assertEqual(mdiff.diff_strings(str1, str2), expected_diff)
def test_get_possible_mentions_info(self) -> None:
realm = get_realm('zulip')
def make_user(email: str, full_name: str) -> UserProfile:
return create_user(
email=email,
password='whatever',
realm=realm,
full_name=full_name,
short_name='whatever',
)
fred1 = make_user('fred1@example.com', 'Fred Flintstone')
fred1.is_active = False
fred1.save()
fred2 = make_user('fred2@example.com', 'Fred Flintstone')
fred3 = make_user('fred3@example.com', 'Fred Flintstone')
fred3.is_active = False
fred3.save()
fred4 = make_user('fred4@example.com', 'Fred Flintstone')
lst = bugdown.get_possible_mentions_info(realm.id, {'Fred Flintstone', 'cordelia LEAR', 'Not A User'})
set_of_names = set(map(lambda x: x['full_name'].lower(), lst))
self.assertEqual(set_of_names, {'fred flintstone', 'cordelia lear'})
by_id = {
row['id']: row
for row in lst
}
self.assertEqual(by_id.get(fred2.id), dict(
email=fred2.email,
full_name='Fred Flintstone',
id=fred2.id,
))
self.assertEqual(by_id.get(fred4.id), dict(
email=fred4.email,
full_name='Fred Flintstone',
id=fred4.id,
))
def test_mention_data(self) -> None:
realm = get_realm('zulip')
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
content = '@**King Hamlet** @**Cordelia lear**'
mention_data = bugdown.MentionData(realm.id, content)
self.assertEqual(mention_data.get_user_ids(), {hamlet.id, cordelia.id})
self.assertEqual(mention_data.get_user_by_id(hamlet.id), dict(
email=hamlet.email,
full_name=hamlet.full_name,
id=hamlet.id,
))
user = mention_data.get_user_by_name('king hamLET')
assert(user is not None)
self.assertEqual(user['email'], hamlet.email)
self.assertFalse(mention_data.message_has_wildcards())
content = '@**King Hamlet** @**Cordelia lear** @**all**'
mention_data = bugdown.MentionData(realm.id, content)
self.assertTrue(mention_data.message_has_wildcards())
def test_invalid_katex_path(self) -> None:
with self.settings(DEPLOY_ROOT="/nonexistent"):
with mock.patch('logging.error') as mock_logger:
render_tex("random text")
mock_logger.assert_called_with("Cannot find KaTeX for latex rendering!")
class BugdownListPreprocessorTest(ZulipTestCase):
# We test that the preprocessor inserts blank lines at correct places.
# We use <> to indicate that we need to insert a blank line here.
def split_message(self, msg: str) -> Tuple[List[str], List[str]]:
original = msg.replace('<>', '').split('\n')
expected = re.split(r'\n|<>', msg)
return original, expected
def test_basic_list(self) -> None:
preprocessor = bugdown.BugdownListPreprocessor()
original, expected = self.split_message('List without a gap\n<>* One\n* Two')
self.assertEqual(preprocessor.run(original), expected)
def test_list_after_quotes(self) -> None:
preprocessor = bugdown.BugdownListPreprocessor()
original, expected = self.split_message('```quote\nSomething\n```\n\nList without a gap\n<>* One\n* Two')
self.assertEqual(preprocessor.run(original), expected)
def test_list_in_code(self) -> None:
preprocessor = bugdown.BugdownListPreprocessor()
original, expected = self.split_message('```\nList without a gap\n* One\n* Two\n```')
self.assertEqual(preprocessor.run(original), expected)
def test_complex_nesting_with_different_fences(self) -> None:
preprocessor = bugdown.BugdownListPreprocessor()
msg = """```quote
In quote. We should convert a list here:<>
* one
* two
~~~
This is a nested code fence, do not make changes here:
* one
* two
````quote
Quote in code fence. Should not convert:
* one
* two
````
~~~
Back in the quote. We should convert:<>
* one
* two
```
Outside. Should convert:<>
* one
* two
"""
original, expected = self.split_message(msg)
self.assertEqual(preprocessor.run(original), expected)
def test_complex_nesting_with_same_fence(self) -> None:
preprocessor = bugdown.BugdownListPreprocessor()
msg = """```quote
In quote. We should convert a list here:<>
* one
* two
```python
This is a nested code fence, do not make changes here:
* one
* two
```quote
Quote in code fence. Should not convert:
* one
* two
```
```
Back in the quote. We should convert:<>
* one
* two
```
Outside. Should convert:<>
* one
* two
"""
original, expected = self.split_message(msg)
self.assertEqual(preprocessor.run(original), expected)
class BugdownTest(ZulipTestCase):
def setUp(self) -> None:
super().setUp()
bugdown.clear_state_for_testing()
def assertEqual(self, first: Any, second: Any, msg: str = "") -> None:
if isinstance(first, str) and isinstance(second, str):
if first != second:
raise AssertionError("Actual and expected outputs do not match; showing diff.\n" +
mdiff.diff_strings(first, second) + msg)
else:
super().assertEqual(first, second)
def load_bugdown_tests(self) -> Tuple[Dict[str, Any], List[List[str]]]:
test_fixtures = {}
with open(os.path.join(os.path.dirname(__file__), 'fixtures/markdown_test_cases.json')) as f:
data = ujson.load(f)
for test in data['regular_tests']:
test_fixtures[test['name']] = test
return test_fixtures, data['linkify_tests']
def test_bugdown_no_ignores(self) -> None:
# We do not want any ignored tests to be committed and merged.
format_tests, linkify_tests = self.load_bugdown_tests()
for name, test in format_tests.items():
message = f'Test "{name}" shouldn\'t be ignored.'
is_ignored = test.get('ignore', False)
self.assertFalse(is_ignored, message)
@slow("Aggregate of runs dozens of individual markdown tests")
def test_bugdown_fixtures(self) -> None:
format_tests, linkify_tests = self.load_bugdown_tests()
valid_keys = {"name", "input", "expected_output",
"backend_only_rendering",
"marked_expected_output", "text_content",
"translate_emoticons", "ignore"}
for name, test in format_tests.items():
with self.subTest(markdown_test_case=name):
# Check that there aren't any unexpected keys as those are often typos
self.assertEqual(len(set(test.keys()) - valid_keys), 0)
# Ignore tests if specified
if test.get('ignore', False):
continue # nocoverage
if test.get('translate_emoticons', False):
# Create a userprofile and send message with it.
user_profile = self.example_user('othello')
do_set_user_display_setting(user_profile, 'translate_emoticons', True)
msg = Message(sender=user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, test['input'])
else:
converted = bugdown_convert(test['input'])
self.assertEqual(converted, test['expected_output'])
def replaced(payload: str, url: str, phrase: str='') -> str:
if url[:4] == 'http':
href = url
elif '@' in url:
href = 'mailto:' + url
else:
href = 'http://' + url
return payload % (f"<a href=\"{href}\">{url}</a>",)
print("Running Bugdown Linkify tests")
with mock.patch('zerver.lib.url_preview.preview.link_embed_data_from_cache', return_value=None):
for inline_url, reference, url in linkify_tests:
try:
match = replaced(reference, url, phrase=inline_url)
except TypeError:
match = reference
converted = bugdown_convert(inline_url)
self.assertEqual(match, converted)
def test_inline_file(self) -> None:
msg = 'Check out this file file:///Volumes/myserver/Users/Shared/pi.py'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Check out this file <a href="file:///Volumes/myserver/Users/Shared/pi.py">file:///Volumes/myserver/Users/Shared/pi.py</a></p>')
bugdown.clear_state_for_testing()
with self.settings(ENABLE_FILE_LINKS=False):
realm = Realm.objects.create(string_id='file_links_test')
bugdown.maybe_update_markdown_engines(realm.id, False)
converted = bugdown.convert(msg, message_realm=realm)
self.assertEqual(converted, '<p>Check out this file file:///Volumes/myserver/Users/Shared/pi.py</p>')
def test_inline_bitcoin(self) -> None:
msg = 'To bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa or not to bitcoin'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>To <a href="bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa">bitcoin:1A1zP1eP5QGefi2DMPTfTL5SLmv7DivfNa</a> or not to bitcoin</p>')
def test_inline_youtube(self) -> None:
msg = 'Check out the debate: http://www.youtube.com/watch?v=hx1mjT73xYE'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Check out the debate: <a href="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg"></a></div>')
msg = 'http://www.youtube.com/watch?v=hx1mjT73xYE'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="http://www.youtube.com/watch?v=hx1mjT73xYE">http://www.youtube.com/watch?v=hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="http://www.youtube.com/watch?v=hx1mjT73xYE"><img src="https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg"></a></div>')
msg = 'https://youtu.be/hx1mjT73xYE'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://youtu.be/hx1mjT73xYE">https://youtu.be/hx1mjT73xYE</a></p>\n<div class="youtube-video message_inline_image"><a data-id="hx1mjT73xYE" href="https://youtu.be/hx1mjT73xYE"><img src="https://i.ytimg.com/vi/hx1mjT73xYE/default.jpg"></a></div>')
msg = 'https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo'
not_converted = bugdown_convert(msg)
self.assertEqual(not_converted, '<p><a href="https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo">https://www.youtube.com/playlist?list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo</a></p>')
msg = 'https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo">https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo</a></p>\n<div class="youtube-video message_inline_image"><a data-id="O5nskjZ_GoI" href="https://www.youtube.com/playlist?v=O5nskjZ_GoI&list=PL8dPuuaLjXtNlUrzyH5r6jN9ulIgZBpdo"><img src="https://i.ytimg.com/vi/O5nskjZ_GoI/default.jpg"></a></div>')
msg = 'http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw">http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw</a></p>\n<div class="youtube-video message_inline_image"><a data-id="nOJgD4fcZhI" href="http://www.youtube.com/watch_videos?video_ids=nOJgD4fcZhI,i96UO8-GFvw"><img src="https://i.ytimg.com/vi/nOJgD4fcZhI/default.jpg"></a></div>')
@override_settings(INLINE_URL_EMBED_PREVIEW=False)
def test_inline_vimeo(self) -> None:
msg = 'Check out the debate: https://vimeo.com/246979354'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Check out the debate: <a href="https://vimeo.com/246979354">https://vimeo.com/246979354</a></p>')
msg = 'https://vimeo.com/246979354'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://vimeo.com/246979354">https://vimeo.com/246979354</a></p>')
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_thumbnail_url(self) -> None:
realm = get_realm("zephyr")
msg = '[foobar](/user_uploads/{realm_id}/50/w2G6ok9kr8AMCQCTNAUOFMln/IMG_0677.JPG)'
msg = msg.format(realm_id=realm.id)
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F50%2Fw2G6ok9kr8AMCQCTNAUOFMln%2FIMG_0677.JPG&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F50%2Fw2G6ok9kr8AMCQCTNAUOFMln%2FIMG_0677.JPG&size=thumbnail"><'
thumbnail_img = thumbnail_img.format(realm_id=realm.id)
converted = bugdown_convert(msg)
self.assertIn(thumbnail_img, converted)
msg = 'https://www.google.com/images/srpr/logo4w.png'
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail">'
converted = bugdown_convert(msg)
self.assertIn(thumbnail_img, converted)
msg = 'www.google.com/images/srpr/logo4w.png'
thumbnail_img = '<img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=http%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail">'
converted = bugdown_convert(msg)
self.assertIn(thumbnail_img, converted)
msg = 'https://www.google.com/images/srpr/logo4w.png'
thumbnail_img = '<div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png"><img src="https://www.google.com/images/srpr/logo4w.png"></a></div>'
with self.settings(THUMBNAIL_IMAGES=False):
converted = bugdown_convert(msg)
self.assertIn(thumbnail_img, converted)
# Any url which is not an external link and doesn't start with
# /user_uploads/ is not thumbnailed
msg = '[foobar](/static/images/cute/turtle.png)'
thumbnail_img = '<div class="message_inline_image"><a href="/static/images/cute/turtle.png" title="foobar"><img src="/static/images/cute/turtle.png"></a></div>'
converted = bugdown_convert(msg)
self.assertIn(thumbnail_img, converted)
msg = '[foobar](/user_avatars/{realm_id}/emoji/images/50.png)'
msg = msg.format(realm_id=realm.id)
thumbnail_img = '<div class="message_inline_image"><a href="/user_avatars/{realm_id}/emoji/images/50.png" title="foobar"><img src="/user_avatars/{realm_id}/emoji/images/50.png"></a></div>'
thumbnail_img = thumbnail_img.format(realm_id=realm.id)
converted = bugdown_convert(msg)
self.assertIn(thumbnail_img, converted)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_preview(self) -> None:
with_preview = '<div class="message_inline_image"><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=full" src="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=thumbnail"></a></div>'
without_preview = '<p><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></p>'
content = 'http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, with_preview)
realm = msg.get_realm()
setattr(realm, 'inline_image_preview', False)
realm.save()
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, without_preview)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_quoted_blocks(self) -> None:
content = 'http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg'
expected = '<div class="message_inline_image"><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=full" src="/thumbnail?url=http%3A%2F%2Fcdn.wallpapersafari.com%2F13%2F6%2F16eVjx.jpeg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
content = '>http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg\n\nAwesome!'
expected = '<blockquote>\n<p><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></p>\n</blockquote>\n<p>Awesome!</p>'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
content = '>* http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg\n\nAwesome!'
expected = '<blockquote>\n<ul>\n<li><a href="http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg">http://cdn.wallpapersafari.com/13/6/16eVjx.jpeg</a></li>\n</ul>\n</blockquote>\n<p>Awesome!</p>'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_inline_image_preview_order(self) -> None:
realm = get_realm("zulip")
content = 'http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg\nhttp://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg\nhttp://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg'
expected = '<p><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg</a><br>\n<a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg</a><br>\n<a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg</a></p>\n<div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=thumbnail"></a></div><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_02.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_02.jpg&size=thumbnail"></a></div><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
content = 'http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg\n\n>http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg\n\n* http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg\n* https://www.google.com/images/srpr/logo4w.png'
expected = '<div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_01.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_01.jpg&size=thumbnail"></a></div><blockquote>\n<p><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg">http://imaging.nikon.com/lineup/dslr/df/img/sample/img_02.jpg</a></p>\n</blockquote>\n<ul>\n<li><div class="message_inline_image"><a href="http://imaging.nikon.com/lineup/dslr/df/img/sample/img_03.jpg"><img data-src-fullsize="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=full" src="/thumbnail?url=http%3A%2F%2Fimaging.nikon.com%2Flineup%2Fdslr%2Fdf%2Fimg%2Fsample%2Fimg_03.jpg&size=thumbnail"></a></div></li>\n<li><div class="message_inline_image"><a href="https://www.google.com/images/srpr/logo4w.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.google.com%2Fimages%2Fsrpr%2Flogo4w.png&size=thumbnail"></a></div></li>\n</ul>'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
content = 'Test 1\n[21136101110_1dde1c1a7e_o.jpg](/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg) \n\nNext Image\n[IMG_20161116_023910.jpg](/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg) \n\nAnother Screenshot\n[Screenshot-from-2016-06-01-16-22-42.png](/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png)'
content = content.format(realm_id=realm.id)
expected = '<p>Test 1<br>\n<a href="/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg">21136101110_1dde1c1a7e_o.jpg</a> </p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/6d/F1PX6u16JA2P-nK45PyxHIYZ/21136101110_1dde1c1a7e_o.jpg" title="21136101110_1dde1c1a7e_o.jpg"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F6d%2FF1PX6u16JA2P-nK45PyxHIYZ%2F21136101110_1dde1c1a7e_o.jpg&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F6d%2FF1PX6u16JA2P-nK45PyxHIYZ%2F21136101110_1dde1c1a7e_o.jpg&size=thumbnail"></a></div><p>Next Image<br>\n<a href="/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg">IMG_20161116_023910.jpg</a> </p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/69/sh7L06e7uH7NaX6d5WFfVYQp/IMG_20161116_023910.jpg" title="IMG_20161116_023910.jpg"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F69%2Fsh7L06e7uH7NaX6d5WFfVYQp%2FIMG_20161116_023910.jpg&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F69%2Fsh7L06e7uH7NaX6d5WFfVYQp%2FIMG_20161116_023910.jpg&size=thumbnail"></a></div><p>Another Screenshot<br>\n<a href="/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png">Screenshot-from-2016-06-01-16-22-42.png</a></p>\n<div class="message_inline_image"><a href="/user_uploads/{realm_id}/70/_aZmIEWaN1iUaxwkDjkO7bpj/Screenshot-from-2016-06-01-16-22-42.png" title="Screenshot-from-2016-06-01-16-22-42.png"><img data-src-fullsize="/thumbnail?url=user_uploads%2F{realm_id}%2F70%2F_aZmIEWaN1iUaxwkDjkO7bpj%2FScreenshot-from-2016-06-01-16-22-42.png&size=full" src="/thumbnail?url=user_uploads%2F{realm_id}%2F70%2F_aZmIEWaN1iUaxwkDjkO7bpj%2FScreenshot-from-2016-06-01-16-22-42.png&size=thumbnail"></a></div>'
expected = expected.format(realm_id=realm.id)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
@override_settings(INLINE_IMAGE_PREVIEW=True)
def test_corrected_image_source(self) -> None:
# testing only wikipedia because linx.li urls can be expected to expire
content = 'https://en.wikipedia.org/wiki/File:Wright_of_Derby,_The_Orrery.jpg'
expected = '<div class="message_inline_image"><a href="https://en.wikipedia.org/wiki/Special:FilePath/File:Wright_of_Derby,_The_Orrery.jpg"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FSpecial%3AFilePath%2FFile%3AWright_of_Derby%2C_The_Orrery.jpg&size=full" src="/thumbnail?url=https%3A%2F%2Fen.wikipedia.org%2Fwiki%2FSpecial%3AFilePath%2FFile%3AWright_of_Derby%2C_The_Orrery.jpg&size=thumbnail"></a></div>'
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
@override_settings(INLINE_IMAGE_PREVIEW=False)
def test_image_preview_enabled(self) -> None:
ret = bugdown.image_preview_enabled()
self.assertEqual(ret, False)
settings.INLINE_IMAGE_PREVIEW = True
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm = message.get_realm()
ret = bugdown.image_preview_enabled()
self.assertEqual(ret, True)
ret = bugdown.image_preview_enabled(no_previews=True)
self.assertEqual(ret, False)
ret = bugdown.image_preview_enabled(message, realm)
self.assertEqual(ret, True)
ret = bugdown.image_preview_enabled(message)
self.assertEqual(ret, True)
ret = bugdown.image_preview_enabled(message, realm,
no_previews=True)
self.assertEqual(ret, False)
ret = bugdown.image_preview_enabled(message, no_previews=True)
self.assertEqual(ret, False)
@override_settings(INLINE_URL_EMBED_PREVIEW=False)
def test_url_embed_preview_enabled(self) -> None:
sender_user_profile = self.example_user('othello')
message = copy.deepcopy(Message(sender=sender_user_profile, sending_client=get_client("test")))
realm = message.get_realm()
realm.inline_url_embed_preview = True # off by default
realm.save(update_fields=['inline_url_embed_preview'])
ret = bugdown.url_embed_preview_enabled()
self.assertEqual(ret, False)
settings.INLINE_URL_EMBED_PREVIEW = True
ret = bugdown.url_embed_preview_enabled()
self.assertEqual(ret, True)
ret = bugdown.image_preview_enabled(no_previews=True)
self.assertEqual(ret, False)
ret = bugdown.url_embed_preview_enabled(message, realm)
self.assertEqual(ret, True)
ret = bugdown.url_embed_preview_enabled(message)
self.assertEqual(ret, True)
ret = bugdown.url_embed_preview_enabled(message, no_previews=True)
self.assertEqual(ret, False)
def test_inline_dropbox(self) -> None:
msg = 'Look at how hilarious our old office was: https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG'
image_info = {'image': 'https://photos-4.dropbox.com/t/2/AABIre1oReJgPYuc_53iv0IHq1vUzRaDg2rrCfTpiWMccQ/12/129/jpeg/1024x1024/2/_/0/4/IMG_0923.JPG/CIEBIAEgAiAHKAIoBw/ymdijjcg67hv2ta/AABz2uuED1ox3vpWWvMpBxu6a/IMG_0923.JPG', 'desc': 'Shared with Dropbox', 'title': 'IMG_0923.JPG'}
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=image_info):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Look at how hilarious our old office was: <a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG">https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG" title="IMG_0923.JPG"><img src="https://www.dropbox.com/s/ymdijjcg67hv2ta/IMG_0923.JPG?dl=1"></a></div>')
msg = 'Look at my hilarious drawing folder: https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl='
image_info = {'image': 'https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png', 'desc': 'Shared with Dropbox', 'title': 'Saves'}
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=image_info):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Look at my hilarious drawing folder: <a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=">https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=</a></p>\n<div class="message_inline_ref"><a href="https://www.dropbox.com/sh/cm39k9e04z7fhim/AAAII5NK-9daee3FcF41anEua?dl=" title="Saves"><img src="https://cf.dropboxstatic.com/static/images/icons128/folder_dropbox.png"></a><div><div class="message_inline_image_title">Saves</div><desc class="message_inline_image_desc"></desc></div></div>')
def test_inline_dropbox_preview(self) -> None:
# Test photo album previews
msg = 'https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5'
image_info = {'image': 'https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0', 'desc': 'Shared with Dropbox', 'title': '1 photo'}
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=image_info):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5">https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/sc/tditp9nitko60n5/03rEiZldy5" title="1 photo"><img src="https://photos-6.dropbox.com/t/2/AAAlawaeD61TyNewO5vVi-DGf2ZeuayfyHFdNTNzpGq-QA/12/271544745/jpeg/1024x1024/2/_/0/5/baby-piglet.jpg/CKnjvYEBIAIgBygCKAc/tditp9nitko60n5/AADX03VAIrQlTl28CtujDcMla/0"></a></div>')
def test_inline_dropbox_negative(self) -> None:
# Make sure we're not overzealous in our conversion:
msg = 'Look at the new dropbox logo: https://www.dropbox.com/static/images/home_logo.png'
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=None):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Look at the new dropbox logo: <a href="https://www.dropbox.com/static/images/home_logo.png">https://www.dropbox.com/static/images/home_logo.png</a></p>\n<div class="message_inline_image"><a href="https://www.dropbox.com/static/images/home_logo.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fwww.dropbox.com%2Fstatic%2Fimages%2Fhome_logo.png&size=full" src="/thumbnail?url=https%3A%2F%2Fwww.dropbox.com%2Fstatic%2Fimages%2Fhome_logo.png&size=thumbnail"></a></div>')
def test_inline_dropbox_bad(self) -> None:
# Don't fail on bad dropbox links
msg = "https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM"
with mock.patch('zerver.lib.bugdown.fetch_open_graph_image', return_value=None):
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><a href="https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM">https://zulip-test.dropbox.com/photos/cl/ROmr9K1XYtmpneM</a></p>')
def test_inline_github_preview(self) -> None:
# Test photo album previews
msg = 'Test: https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Test: <a href="https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png">https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png</a></p>\n<div class="message_inline_image"><a href="https://github.com/zulip/zulip/blob/master/static/images/logo/zulip-icon-128x128.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fraw.githubusercontent.com%2Fzulip%2Fzulip%2Fmaster%2Fstatic%2Fimages%2Flogo%2Fzulip-icon-128x128.png&size=full" src="/thumbnail?url=https%3A%2F%2Fraw.githubusercontent.com%2Fzulip%2Fzulip%2Fmaster%2Fstatic%2Fimages%2Flogo%2Fzulip-icon-128x128.png&size=thumbnail"></a></div>')
msg = 'Test: https://developer.github.com/assets/images/hero-circuit-bg.png'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>Test: <a href="https://developer.github.com/assets/images/hero-circuit-bg.png">https://developer.github.com/assets/images/hero-circuit-bg.png</a></p>\n<div class="message_inline_image"><a href="https://developer.github.com/assets/images/hero-circuit-bg.png"><img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fdeveloper.github.com%2Fassets%2Fimages%2Fhero-circuit-bg.png&size=full" src="/thumbnail?url=https%3A%2F%2Fdeveloper.github.com%2Fassets%2Fimages%2Fhero-circuit-bg.png&size=thumbnail"></a></div>')
def test_twitter_id_extraction(self) -> None:
self.assertEqual(bugdown.get_tweet_id('http://twitter.com/#!/VizzQuotes/status/409030735191097344'), '409030735191097344')
self.assertEqual(bugdown.get_tweet_id('http://twitter.com/VizzQuotes/status/409030735191097344'), '409030735191097344')
self.assertEqual(bugdown.get_tweet_id('http://twitter.com/VizzQuotes/statuses/409030735191097344'), '409030735191097344')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/wdaher/status/1017581858'), '1017581858')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/wdaher/status/1017581858/'), '1017581858')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/windyoona/status/410766290349879296/photo/1'), '410766290349879296')
self.assertEqual(bugdown.get_tweet_id('https://twitter.com/windyoona/status/410766290349879296/'), '410766290349879296')
def test_inline_interesting_links(self) -> None:
def make_link(url: str) -> str:
return f'<a href="{url}">{url}</a>'
normal_tweet_html = ('<a href="https://twitter.com/Twitter"'
'>@Twitter</a> '
'meets @seepicturely at #tcdisrupt cc.'
'<a href="https://twitter.com/boscomonkey"'
'>@boscomonkey</a> '
'<a href="https://twitter.com/episod"'
'>@episod</a> '
'<a href="http://t.co/6J2EgYM"'
'>http://instagr.am/p/MuW67/</a>')
mention_in_link_tweet_html = """<a href="http://t.co/@foo">http://foo.com</a>"""
media_tweet_html = ('<a href="http://t.co/xo7pAhK6n3">'
'http://twitter.com/NEVNBoston/status/421654515616849920/photo/1</a>')
emoji_in_tweet_html = """Zulip is <span aria-label=\"100\" class="emoji emoji-1f4af" role=\"img\" title="100">:100:</span>% open-source!"""
def make_inline_twitter_preview(url: str, tweet_html: str, image_html: str='') -> str:
## As of right now, all previews are mocked to be the exact same tweet
return ('<div class="inline-preview-twitter">'
'<div class="twitter-tweet">'
f'<a href="{url}">'
'<img class="twitter-avatar"'
' src="https://external-content.zulipcdn.net/external_content/1f7cd2436976d410eab8189ebceda87ae0b34ead/687474703a2f2f7062732e7477696d672e63'
'6f6d2f70726f66696c655f696d616765732f313338303931323137332f53637265656e5f73686f745f323031312d30362d30335f61745f372e33352e33'
'365f504d5f6e6f726d616c2e706e67">'
'</a>'
f'<p>{tweet_html}</p>'
'<span>- Eoin McMillan (@imeoin)</span>'
f'{image_html}'
'</div>'
'</div>')
msg = 'http://www.twitter.com'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>'.format(make_link('http://www.twitter.com')))
msg = 'http://www.twitter.com/wdaher/'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>'.format(make_link('http://www.twitter.com/wdaher/')))
msg = 'http://www.twitter.com/wdaher/status/3'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>'.format(make_link('http://www.twitter.com/wdaher/status/3')))
# id too long
msg = 'http://www.twitter.com/wdaher/status/2879779692873154569'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>'.format(make_link('http://www.twitter.com/wdaher/status/2879779692873154569')))
# id too large (i.e. tweet doesn't exist)
msg = 'http://www.twitter.com/wdaher/status/999999999999999999'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>'.format(make_link('http://www.twitter.com/wdaher/status/999999999999999999')))
msg = 'http://www.twitter.com/wdaher/status/287977969287315456'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>\n{}'.format(
make_link('http://www.twitter.com/wdaher/status/287977969287315456'),
make_inline_twitter_preview('http://www.twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
msg = 'https://www.twitter.com/wdaher/status/287977969287315456'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>\n{}'.format(
make_link('https://www.twitter.com/wdaher/status/287977969287315456'),
make_inline_twitter_preview('https://www.twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
msg = 'http://twitter.com/wdaher/status/287977969287315456'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>\n{}'.format(
make_link('http://twitter.com/wdaher/status/287977969287315456'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
# Repeated links will only be converted once
msg = ('http://twitter.com/wdaher/status/287977969287315456 '
'http://twitter.com/wdaher/status/287977969287315457 '
'http://twitter.com/wdaher/status/287977969287315457 '
'http://twitter.com/wdaher/status/287977969287315457')
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{} {} {} {}</p>\n{}{}'.format(
make_link('http://twitter.com/wdaher/status/287977969287315456'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315456', normal_tweet_html),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315457', normal_tweet_html)))
# A max of 3 will be converted
msg = ('http://twitter.com/wdaher/status/287977969287315456 '
'http://twitter.com/wdaher/status/287977969287315457 '
'https://twitter.com/wdaher/status/287977969287315456 '
'http://twitter.com/wdaher/status/287977969287315460')
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{} {} {} {}</p>\n{}{}{}'.format(
make_link('http://twitter.com/wdaher/status/287977969287315456'),
make_link('http://twitter.com/wdaher/status/287977969287315457'),
make_link('https://twitter.com/wdaher/status/287977969287315456'),
make_link('http://twitter.com/wdaher/status/287977969287315460'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315456', normal_tweet_html),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315457', normal_tweet_html),
make_inline_twitter_preview('https://twitter.com/wdaher/status/287977969287315456', normal_tweet_html)))
# Tweet has a mention in a URL, only the URL is linked
msg = 'http://twitter.com/wdaher/status/287977969287315458'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>\n{}'.format(
make_link('http://twitter.com/wdaher/status/287977969287315458'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315458', mention_in_link_tweet_html)))
# Tweet with an image
msg = 'http://twitter.com/wdaher/status/287977969287315459'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>\n{}'.format(
make_link('http://twitter.com/wdaher/status/287977969287315459'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315459',
media_tweet_html,
('<div class="twitter-image">'
'<a href="http://t.co/xo7pAhK6n3">'
'<img src="https://pbs.twimg.com/media/BdoEjD4IEAIq86Z.jpg:small">'
'</a>'
'</div>'))))
msg = 'http://twitter.com/wdaher/status/287977969287315460'
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p>{}</p>\n{}'.format(
make_link('http://twitter.com/wdaher/status/287977969287315460'),
make_inline_twitter_preview('http://twitter.com/wdaher/status/287977969287315460', emoji_in_tweet_html)))
def test_fetch_tweet_data_settings_validation(self) -> None:
with self.settings(TEST_SUITE=False, TWITTER_CONSUMER_KEY=None):
self.assertIs(None, bugdown.fetch_tweet_data('287977969287315459'))
def test_content_has_emoji(self) -> None:
self.assertFalse(bugdown.content_has_emoji_syntax('boring'))
self.assertFalse(bugdown.content_has_emoji_syntax('hello: world'))
self.assertFalse(bugdown.content_has_emoji_syntax(':foobar'))
self.assertFalse(bugdown.content_has_emoji_syntax('::: hello :::'))
self.assertTrue(bugdown.content_has_emoji_syntax('foo :whatever:'))
self.assertTrue(bugdown.content_has_emoji_syntax('\n:whatever:'))
self.assertTrue(bugdown.content_has_emoji_syntax(':smile: ::::::'))
def test_realm_emoji(self) -> None:
def emoji_img(name: str, file_name: str, realm_id: int) -> str:
return '<img alt="{}" class="emoji" src="{}" title="{}">'.format(
name, get_emoji_url(file_name, realm_id), name[1:-1].replace("_", " "))
realm = get_realm('zulip')
# Needs to mock an actual message because that's how bugdown obtains the realm
msg = Message(sender=self.example_user('hamlet'))
converted = bugdown.convert(":green_tick:", message_realm=realm, message=msg)
realm_emoji = RealmEmoji.objects.filter(realm=realm,
name='green_tick',
deactivated=False).get()
self.assertEqual(converted, '<p>{}</p>'.format(emoji_img(':green_tick:', realm_emoji.file_name, realm.id)))
# Deactivate realm emoji.
do_remove_realm_emoji(realm, 'green_tick')
converted = bugdown.convert(":green_tick:", message_realm=realm, message=msg)
self.assertEqual(converted, '<p>:green_tick:</p>')
def test_deactivated_realm_emoji(self) -> None:
# Deactivate realm emoji.
realm = get_realm('zulip')
do_remove_realm_emoji(realm, 'green_tick')
msg = Message(sender=self.example_user('hamlet'))
converted = bugdown.convert(":green_tick:", message_realm=realm, message=msg)
self.assertEqual(converted, '<p>:green_tick:</p>')
def test_unicode_emoji(self) -> None:
msg = '\u2615' # ☕
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><span aria-label=\"coffee\" class="emoji emoji-2615" role=\"img\" title="coffee">:coffee:</span></p>')
msg = '\u2615\u2615' # ☕☕
converted = bugdown_convert(msg)
self.assertEqual(converted, '<p><span aria-label=\"coffee\" class="emoji emoji-2615" role=\"img\" title="coffee">:coffee:</span><span aria-label=\"coffee\" class="emoji emoji-2615" role=\"img\" title="coffee">:coffee:</span></p>')
def test_no_translate_emoticons_if_off(self) -> None:
user_profile = self.example_user('othello')
do_set_user_display_setting(user_profile, 'translate_emoticons', False)
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = ':)'
expected = '<p>:)</p>'
converted = render_markdown(msg, content)
self.assertEqual(converted, expected)
def test_same_markup(self) -> None:
msg = '\u2615' # ☕
unicode_converted = bugdown_convert(msg)
msg = ':coffee:' # ☕☕
converted = bugdown_convert(msg)
self.assertEqual(converted, unicode_converted)
def test_links_in_topic_name(self) -> None:
realm = get_realm('zulip')
msg = Message(sender=self.example_user('othello'))
msg.set_topic_name("https://google.com/hello-world")
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, ['https://google.com/hello-world'])
msg.set_topic_name("http://google.com/hello-world")
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, ['http://google.com/hello-world'])
msg.set_topic_name("Without scheme google.com/hello-world")
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, ['https://google.com/hello-world'])
msg.set_topic_name("Without scheme random.words/hello-world")
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, [])
msg.set_topic_name("Try out http://ftp.debian.org, https://google.com/ and https://google.in/.")
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, ['http://ftp.debian.org', 'https://google.com/', 'https://google.in/'])
def test_realm_patterns(self) -> None:
realm = get_realm('zulip')
url_format_string = r"https://trac.example.com/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
self.assertEqual(
realm_filter.__str__(),
'<RealmFilter(zulip): #(?P<id>[0-9]{2,8})'
' https://trac.example.com/ticket/%(id)s>')
msg = Message(sender=self.example_user('othello'))
msg.set_topic_name("#444")
flush_per_request_caches()
content = "We should fix #224 and #115, but not issue#124 or #1124z or [trac #15](https://trac.example.com/ticket/16) today."
converted = bugdown.convert(content, message_realm=realm, message=msg)
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted, '<p>We should fix <a href="https://trac.example.com/ticket/224">#224</a> and <a href="https://trac.example.com/ticket/115">#115</a>, but not issue#124 or #1124z or <a href="https://trac.example.com/ticket/16">trac #15</a> today.</p>')
self.assertEqual(converted_topic, ['https://trac.example.com/ticket/444'])
msg.set_topic_name("#444 https://google.com")
converted_topic = bugdown.topic_links(realm.id, msg.topic_name())
self.assertEqual(converted_topic, ['https://trac.example.com/ticket/444', 'https://google.com'])
RealmFilter(realm=realm, pattern=r'#(?P<id>[a-zA-Z]+-[0-9]+)',
url_format_string=r'https://trac.example.com/ticket/%(id)s').save()
msg = Message(sender=self.example_user('hamlet'))
content = '#ZUL-123 was fixed and code was deployed to production, also #zul-321 was deployed to staging'
converted = bugdown.convert(content, message_realm=realm, message=msg)
self.assertEqual(converted, '<p><a href="https://trac.example.com/ticket/ZUL-123">#ZUL-123</a> was fixed and code was deployed to production, also <a href="https://trac.example.com/ticket/zul-321">#zul-321</a> was deployed to staging</p>')
def assert_conversion(content: str, convert: bool=True) -> None:
converted = bugdown.convert(content, message_realm=realm, message=msg)
converted_topic = bugdown.topic_links(realm.id, content)
if convert:
self.assertTrue('trac.example.com' in converted)
self.assertEqual(len(converted_topic), 1)
self.assertTrue('trac.example.com' in converted_topic[0])
else:
self.assertTrue('trac.example.com' not in converted)
self.assertEqual(len(converted_topic), 0)
assert_conversion('Hello #123 World')
assert_conversion('Hello #123World', False)
assert_conversion('Hello#123 World', False)
assert_conversion('Hello#123World', False)
# Ideally, these should be converted, but bugdown doesn't
# handle word boundary detection in languages that don't use
# whitespace for that correctly yet.
assert_conversion('チケットは#123です', False)
assert_conversion('チケットは #123です', False)
assert_conversion('チケットは#123 です', False)
assert_conversion('チケットは #123 です')
assert_conversion('(#123)')
assert_conversion('#123>')
assert_conversion('"#123"')
assert_conversion('#123@')
assert_conversion(')#123(', False)
assert_conversion('##123', False)
# test nested realm patterns should avoid double matching
RealmFilter(realm=realm, pattern=r'hello#(?P<id>[0-9]+)',
url_format_string=r'https://trac.example.com/hello/%(id)s').save()
converted_topic = bugdown.topic_links(realm.id, 'hello#123 #234')
self.assertEqual(converted_topic, ['https://trac.example.com/ticket/234', 'https://trac.example.com/hello/123'])
def test_maybe_update_markdown_engines(self) -> None:
realm = get_realm('zulip')
url_format_string = r"https://trac.example.com/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
bugdown.realm_filter_data = {}
bugdown.maybe_update_markdown_engines(None, False)
all_filters = bugdown.realm_filter_data
zulip_filters = all_filters[realm.id]
self.assertEqual(len(zulip_filters), 1)
self.assertEqual(zulip_filters[0],
('#(?P<id>[0-9]{2,8})', 'https://trac.example.com/ticket/%(id)s', realm_filter.id))
def test_flush_realm_filter(self) -> None:
realm = get_realm('zulip')
def flush() -> None:
'''
flush_realm_filter is a post-save hook, so calling it
directly for testing is kind of awkward
'''
class Instance:
realm_id: Optional[int] = None
instance = Instance()
instance.realm_id = realm.id
flush_realm_filter(sender=None, instance=instance)
def save_new_realm_filter() -> None:
realm_filter = RealmFilter(realm=realm,
pattern=r"whatever",
url_format_string='whatever')
realm_filter.save()
# start fresh for our realm
flush()
self.assertFalse(realm_in_local_realm_filters_cache(realm.id))
# call this just for side effects of populating the cache
realm_filters_for_realm(realm.id)
self.assertTrue(realm_in_local_realm_filters_cache(realm.id))
# Saving a new RealmFilter should have the side effect of
# flushing the cache.
save_new_realm_filter()
self.assertFalse(realm_in_local_realm_filters_cache(realm.id))
# and flush it one more time, to make sure we don't get a KeyError
flush()
self.assertFalse(realm_in_local_realm_filters_cache(realm.id))
def test_realm_patterns_negative(self) -> None:
realm = get_realm('zulip')
RealmFilter(realm=realm, pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=r"https://trac.example.com/ticket/%(id)s").save()
boring_msg = Message(sender=self.example_user('othello'))
boring_msg.set_topic_name("no match here")
converted_boring_topic = bugdown.topic_links(realm.id, boring_msg.topic_name())
self.assertEqual(converted_boring_topic, [])
def test_is_status_message(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = '/me makes a list\n* one\n* two'
rendered_content = render_markdown(msg, content)
self.assertEqual(
rendered_content,
'<p>/me makes a list</p>\n<ul>\n<li>one</li>\n<li>two</li>\n</ul>',
)
self.assertTrue(Message.is_status_message(content, rendered_content))
content = '/me takes a walk'
rendered_content = render_markdown(msg, content)
self.assertEqual(
rendered_content,
'<p>/me takes a walk</p>',
)
self.assertTrue(Message.is_status_message(content, rendered_content))
content = '/me writes a second line\nline'
rendered_content = render_markdown(msg, content)
self.assertEqual(
rendered_content,
'<p>/me writes a second line<br>\nline</p>',
)
self.assertTrue(Message.is_status_message(content, rendered_content))
def test_alert_words(self) -> None:
user_profile = self.example_user('othello')
do_add_alert_words(user_profile, ["ALERTWORD", "scaryword"])
msg = Message(sender=user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = "We have an ALERTWORD day today!"
self.assertEqual(render(msg, content), "<p>We have an ALERTWORD day today!</p>")
self.assertEqual(msg.user_ids_with_alert_words, {user_profile.id})
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "We have a NOTHINGWORD day today!"
self.assertEqual(render(msg, content), "<p>We have a NOTHINGWORD day today!</p>")
self.assertEqual(msg.user_ids_with_alert_words, set())
def test_alert_words_returns_user_ids_with_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
'hamlet': ['how'], 'cordelia': ['this possible'],
'iago': ['hello'], 'prospero': ['hello'],
'othello': ['how are you'], 'aaron': ['hey'],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user('polonius')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = "hello how is this possible how are you doing today"
render(msg, content)
expected_user_ids: Set[int] = {
user_profiles['hamlet'].id, user_profiles['cordelia'].id, user_profiles['iago'].id,
user_profiles['prospero'].id, user_profiles['othello'].id,
}
# All users except aaron have their alert word appear in the message content
self.assertEqual(msg.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_user_ids_with_alert_words_1(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
'hamlet': ['provisioning', 'Prod deployment'],
'cordelia': ['test', 'Prod'],
'iago': ['prod'], 'prospero': ['deployment'],
'othello': ['last'],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user('polonius')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = """Hello, everyone. Prod deployment has been completed
And this is a new line
to test out how markdown convert this into something line ending splitted array
and this is a new line
last"""
render(msg, content)
expected_user_ids: Set[int] = {
user_profiles['hamlet'].id,
user_profiles['cordelia'].id,
user_profiles['iago'].id,
user_profiles['prospero'].id,
user_profiles['othello'].id,
}
# All users have their alert word appear in the message content
self.assertEqual(msg.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_user_ids_with_alert_words_in_french(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
'hamlet': ['réglementaire', 'une politique', 'une merveille'],
'cordelia': ['énormément', 'Prod'],
'iago': ['prod'], 'prospero': ['deployment'],
'othello': ['last'],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user('polonius')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = """This is to test out alert words work in languages with accented characters too
bonjour est (énormément) ce a quoi ressemble le français
et j'espère qu'il n'y n' réglementaire a pas de mots d'alerte dans ce texte français
"""
render(msg, content)
expected_user_ids: Set[int] = {user_profiles['hamlet'].id, user_profiles['cordelia'].id}
# Only hamlet and cordelia have their alert-words appear in the message content
self.assertEqual(msg.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_returns_empty_user_ids_with_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
'hamlet': [], 'cordelia': [], 'iago': [], 'prospero': [],
'othello': [], 'aaron': [],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user('polonius')
msg = Message(sender=user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = """hello how is this possible how are you doing today
This is to test that the no user_ids who have alrert wourldword is participating
in sending of the message
"""
render(msg, content)
expected_user_ids: Set[int] = set()
# None of the users have their alert-words appear in the message content
self.assertEqual(msg.user_ids_with_alert_words, expected_user_ids)
def get_mock_alert_words(self, num_words: int, word_length: int) -> List[str]:
alert_words = ['x' * word_length] * num_words # type List[str]
return alert_words
def test_alert_words_with_empty_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
'hamlet': [],
'cordelia': [],
'iago': [],
'othello': [],
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user('polonius')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = """This is to test a empty alert words i.e. no user has any alert-words set"""
render(msg, content)
expected_user_ids: Set[int] = set()
self.assertEqual(msg.user_ids_with_alert_words, expected_user_ids)
def test_alert_words_retuns_user_ids_with_alert_words_with_huge_alert_words(self) -> None:
alert_words_for_users: Dict[str, List[str]] = {
'hamlet': ['issue124'],
'cordelia': self.get_mock_alert_words(500, 10),
'iago': self.get_mock_alert_words(500, 10),
'othello': self.get_mock_alert_words(500, 10),
}
user_profiles: Dict[str, UserProfile] = {}
for (username, alert_words) in alert_words_for_users.items():
user_profile = self.example_user(username)
user_profiles.update({username: user_profile})
do_add_alert_words(user_profile, alert_words)
sender_user_profile = self.example_user('polonius')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm_alert_words_automaton = get_alert_word_automaton(sender_user_profile.realm)
def render(msg: Message, content: str) -> str:
return render_markdown(msg,
content,
realm_alert_words_automaton=realm_alert_words_automaton)
content = """The code above will print 10 random values of numbers between 1 and 100.
The second line, for x in range(10), determines how many values will be printed (when you use
range(x), the number that you use in place of x will be the amount of values that you'll have
printed. if you want 20 values, use range(20). use range(5) if you only want 5 values returned,
etc.). I was talking abou the issue124 on github. Then the third line: print random.randint(1,101) will automatically select a random integer
between 1 and 100 for you. The process is fairly simple
"""
render(msg, content)
expected_user_ids: Set[int] = {user_profiles['hamlet'].id}
# Only hamlet has alert-word 'issue124' present in the message content
self.assertEqual(msg.user_ids_with_alert_words, expected_user_ids)
def test_default_code_block_language(self) -> None:
realm = get_realm('zulip')
self.assertEqual(realm.default_code_block_language, None)
text = "```{}\nconsole.log('Hello World');\n```\n"
# Render without default language
msg_with_js = bugdown_convert(text.format('js'))
msg_with_python = bugdown_convert(text.format('python'))
msg_without_language = bugdown_convert(text.format(''))
msg_with_quote = bugdown_convert(text.format('quote'))
msg_with_math = bugdown_convert(text.format('math'))
# Render with default=javascript
do_set_realm_property(realm, 'default_code_block_language', 'javascript')
msg_without_language_default_js = bugdown_convert(text.format(''))
msg_with_python_default_js = bugdown_convert(text.format('python'))
# Render with default=python
do_set_realm_property(realm, 'default_code_block_language', 'python')
msg_without_language_default_py = bugdown_convert(text.format(''))
msg_with_none_default_py = bugdown_convert(text.format('none'))
# Render with default=quote
do_set_realm_property(realm, 'default_code_block_language', 'quote')
msg_without_language_default_quote = bugdown_convert(text.format(''))
# Render with default=math
do_set_realm_property(realm, 'default_code_block_language', 'math')
msg_without_language_default_math = bugdown_convert(text.format(''))
# Render without default language
do_set_realm_property(realm, 'default_code_block_language', None)
msg_without_language_final = bugdown_convert(text.format(''))
self.assertTrue(msg_with_js == msg_without_language_default_js)
self.assertTrue(msg_with_python == msg_with_python_default_js == msg_without_language_default_py)
self.assertTrue(msg_with_quote == msg_without_language_default_quote)
self.assertTrue(msg_with_math == msg_without_language_default_math)
self.assertTrue(msg_without_language == msg_with_none_default_py == msg_without_language_final)
# Test checking inside nested quotes
nested_text = "````quote\n\n{}\n\n{}````".format(text.format('js'), text.format(''))
do_set_realm_property(realm, 'default_code_block_language', 'javascript')
rendered = bugdown_convert(nested_text)
with_language, without_language = re.findall(r'<pre>(.*?)$', rendered, re.MULTILINE)
self.assertTrue(with_language == without_language)
do_set_realm_property(realm, 'default_code_block_language', None)
rendered = bugdown_convert(nested_text)
with_language, without_language = re.findall(r'<pre>(.*?)$', rendered, re.MULTILINE)
self.assertFalse(with_language == without_language)
def test_mention_wildcard(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**all** test"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" data-user-id="*">'
'@all'
'</span> test</p>')
self.assertTrue(msg.mentions_wildcard)
def test_mention_everyone(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**everyone** test"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" data-user-id="*">'
'@everyone'
'</span> test</p>')
self.assertTrue(msg.mentions_wildcard)
def test_mention_stream(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@**stream** test"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" data-user-id="*">'
'@stream'
'</span> test</p>')
self.assertTrue(msg.mentions_wildcard)
def test_mention_at_wildcard(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@all test"
self.assertEqual(render_markdown(msg, content),
'<p>@all test</p>')
self.assertFalse(msg.mentions_wildcard)
self.assertEqual(msg.mentions_user_ids, set())
def test_mention_at_everyone(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@everyone test"
self.assertEqual(render_markdown(msg, content),
'<p>@everyone test</p>')
self.assertFalse(msg.mentions_wildcard)
self.assertEqual(msg.mentions_user_ids, set())
def test_mention_word_starting_with_at_wildcard(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "test @alleycat.com test"
self.assertEqual(render_markdown(msg, content),
'<p>test @alleycat.com test</p>')
self.assertFalse(msg.mentions_wildcard)
self.assertEqual(msg.mentions_user_ids, set())
def test_mention_at_normal_user(self) -> None:
user_profile = self.example_user('othello')
msg = Message(sender=user_profile, sending_client=get_client("test"))
content = "@aaron test"
self.assertEqual(render_markdown(msg, content),
'<p>@aaron test</p>')
self.assertFalse(msg.mentions_wildcard)
self.assertEqual(msg.mentions_user_ids, set())
def test_mention_single(self) -> None:
sender_user_profile = self.example_user('othello')
user_profile = self.example_user('hamlet')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@**King Hamlet**"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
'@King Hamlet</span></p>')
self.assertEqual(msg.mentions_user_ids, {user_profile.id})
def test_mention_silent(self) -> None:
sender_user_profile = self.example_user('othello')
user_profile = self.example_user('hamlet')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
content = "@_**King Hamlet**"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention silent" '
f'data-user-id="{user_id}">'
'King Hamlet</span></p>')
self.assertEqual(msg.mentions_user_ids, set())
def test_possible_mentions(self) -> None:
def assert_mentions(content: str, names: Set[str], has_wildcards: bool=False) -> None:
self.assertEqual(possible_mentions(content), (names, has_wildcards))
assert_mentions('', set())
assert_mentions('boring', set())
assert_mentions('@**all**', set(), True)
assert_mentions('smush@**steve**smush', set())
assert_mentions(
'Hello @**King Hamlet** and @**Cordelia Lear**\n@**Foo van Barson|1234** @**all**',
{'King Hamlet', 'Cordelia Lear', 'Foo van Barson|1234'}, True,
)
def test_mention_multiple(self) -> None:
sender_user_profile = self.example_user('othello')
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "@**King Hamlet** and @**Cordelia Lear**, check this out"
self.assertEqual(render_markdown(msg, content),
'<p>'
'<span class="user-mention" '
f'data-user-id="{hamlet.id}">@King Hamlet</span> and '
'<span class="user-mention" '
f'data-user-id="{cordelia.id}">@Cordelia Lear</span>, '
'check this out</p>')
self.assertEqual(msg.mentions_user_ids, {hamlet.id, cordelia.id})
def test_mention_in_quotes(self) -> None:
othello = self.example_user('othello')
hamlet = self.example_user('hamlet')
cordelia = self.example_user('cordelia')
msg = Message(sender=othello, sending_client=get_client("test"))
content = "> @**King Hamlet** and @**Othello, the Moor of Venice**\n\n @**King Hamlet** and @**Cordelia Lear**"
self.assertEqual(render_markdown(msg, content),
'<blockquote>\n<p>'
f'<span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span>'
' and '
f'<span class="user-mention silent" data-user-id="{othello.id}">Othello, the Moor of Venice</span>'
'</p>\n</blockquote>\n'
'<p>'
f'<span class="user-mention" data-user-id="{hamlet.id}">@King Hamlet</span>'
' and '
f'<span class="user-mention" data-user-id="{cordelia.id}">@Cordelia Lear</span>'
'</p>')
self.assertEqual(msg.mentions_user_ids, {hamlet.id, cordelia.id})
# Both fenced quote and > quote should be identical for both silent and regular syntax.
expected = ('<blockquote>\n<p>'
f'<span class="user-mention silent" data-user-id="{hamlet.id}">King Hamlet</span>'
'</p>\n</blockquote>')
content = "```quote\n@**King Hamlet**\n```"
self.assertEqual(render_markdown(msg, content), expected)
self.assertEqual(msg.mentions_user_ids, set())
content = "> @**King Hamlet**"
self.assertEqual(render_markdown(msg, content), expected)
self.assertEqual(msg.mentions_user_ids, set())
content = "```quote\n@_**King Hamlet**\n```"
self.assertEqual(render_markdown(msg, content), expected)
self.assertEqual(msg.mentions_user_ids, set())
content = "> @_**King Hamlet**"
self.assertEqual(render_markdown(msg, content), expected)
self.assertEqual(msg.mentions_user_ids, set())
def test_mention_duplicate_full_name(self) -> None:
realm = get_realm('zulip')
def make_user(email: str, full_name: str) -> UserProfile:
return create_user(
email=email,
password='whatever',
realm=realm,
full_name=full_name,
short_name='whatever',
)
sender_user_profile = self.example_user('othello')
twin1 = make_user('twin1@example.com', 'Mark Twin')
twin2 = make_user('twin2@example.com', 'Mark Twin')
cordelia = self.example_user('cordelia')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = f"@**Mark Twin|{twin1.id}**, @**Mark Twin|{twin2.id}** and @**Cordelia Lear**, hi."
self.assertEqual(render_markdown(msg, content),
'<p>'
'<span class="user-mention" '
f'data-user-id="{twin1.id}">@Mark Twin</span>, '
'<span class="user-mention" '
f'data-user-id="{twin2.id}">@Mark Twin</span> and '
'<span class="user-mention" '
f'data-user-id="{cordelia.id}">@Cordelia Lear</span>, '
'hi.</p>')
self.assertEqual(msg.mentions_user_ids, {twin1.id, twin2.id, cordelia.id})
def test_mention_invalid(self) -> None:
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @**Nonexistent User**"
self.assertEqual(render_markdown(msg, content),
'<p>Hey @<strong>Nonexistent User</strong></p>')
self.assertEqual(msg.mentions_user_ids, set())
def test_user_mention_atomic_string(self) -> None:
sender_user_profile = self.example_user('othello')
realm = get_realm('zulip')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
# Create a linkifier.
url_format_string = r"https://trac.example.com/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
self.assertEqual(
realm_filter.__str__(),
'<RealmFilter(zulip): #(?P<id>[0-9]{2,8})'
' https://trac.example.com/ticket/%(id)s>')
# Create a user that potentially interferes with the pattern.
test_user = create_user(email='atomic@example.com',
password='whatever',
realm=realm,
full_name='Atomic #123',
short_name='whatever')
content = "@**Atomic #123**"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" '
f'data-user-id="{test_user.id}">'
'@Atomic #123</span></p>')
self.assertEqual(msg.mentions_user_ids, {test_user.id})
content = "@_**Atomic #123**"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention silent" '
f'data-user-id="{test_user.id}">'
'Atomic #123</span></p>')
self.assertEqual(msg.mentions_user_ids, set())
def create_user_group_for_test(self, user_group_name: str) -> UserGroup:
othello = self.example_user('othello')
return create_user_group(user_group_name, [othello], get_realm('zulip'))
def test_user_group_mention_single(self) -> None:
sender_user_profile = self.example_user('othello')
user_profile = self.example_user('hamlet')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_id = user_profile.id
user_group = self.create_user_group_for_test('support')
content = "@**King Hamlet** @*support*"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
'@King Hamlet</span> '
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
'@support</span></p>')
self.assertEqual(msg.mentions_user_ids, {user_profile.id})
self.assertEqual(msg.mentions_user_group_ids, {user_group.id})
def test_user_group_mention_atomic_string(self) -> None:
sender_user_profile = self.example_user('othello')
realm = get_realm('zulip')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_profile = self.example_user('hamlet')
# Create a linkifier.
url_format_string = r"https://trac.example.com/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
self.assertEqual(
realm_filter.__str__(),
'<RealmFilter(zulip): #(?P<id>[0-9]{2,8})'
' https://trac.example.com/ticket/%(id)s>')
# Create a user-group that potentially interferes with the pattern.
user_id = user_profile.id
user_group = self.create_user_group_for_test('support #123')
content = "@**King Hamlet** @*support #123*"
self.assertEqual(render_markdown(msg, content),
'<p><span class="user-mention" '
f'data-user-id="{user_id}">'
'@King Hamlet</span> '
'<span class="user-group-mention" '
f'data-user-group-id="{user_group.id}">'
'@support #123</span></p>')
self.assertEqual(msg.mentions_user_ids, {user_profile.id})
self.assertEqual(msg.mentions_user_group_ids, {user_group.id})
def test_possible_user_group_mentions(self) -> None:
def assert_mentions(content: str, names: Set[str]) -> None:
self.assertEqual(possible_user_group_mentions(content), names)
assert_mentions('', set())
assert_mentions('boring', set())
assert_mentions('@**all**', set())
assert_mentions('smush@*steve*smush', set())
assert_mentions(
'@*support* Hello @**King Hamlet** and @**Cordelia Lear**\n'
'@**Foo van Barson** @**all**', {'support'},
)
assert_mentions(
'Attention @*support*, @*frontend* and @*backend*\ngroups.',
{'support', 'frontend', 'backend'},
)
def test_user_group_mention_multiple(self) -> None:
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
support = self.create_user_group_for_test('support')
backend = self.create_user_group_for_test('backend')
content = "@*support* and @*backend*, check this out"
self.assertEqual(render_markdown(msg, content),
'<p>'
'<span class="user-group-mention" '
f'data-user-group-id="{support.id}">'
'@support</span> '
'and '
'<span class="user-group-mention" '
f'data-user-group-id="{backend.id}">'
'@backend</span>, '
'check this out'
'</p>')
self.assertEqual(msg.mentions_user_group_ids, {support.id, backend.id})
def test_user_group_mention_edit(self) -> None:
sender_user_profile = self.example_user('hamlet')
user_profile = self.example_user('othello')
self.create_user_group_for_test('support')
self.login('hamlet')
msg_id = self.send_stream_message(sender_user_profile,
"Denmark",
topic_name="editing",
content='test')
def update_message_and_check_flag(content: str, mentioned: bool) -> None:
result = self.client_patch("/json/messages/" + str(msg_id), {
'message_id': msg_id, 'content': content,
})
self.assert_json_success(result)
um = UserMessage.objects.get(
user_profile_id=user_profile.id,
message_id=msg_id,
)
if mentioned:
self.assertIn('mentioned', um.flags_list())
else:
self.assertNotIn('mentioned', um.flags_list())
update_message_and_check_flag("@*support*", True)
update_message_and_check_flag("@*support-invalid* edited", False)
update_message_and_check_flag("@*support* edited", True)
update_message_and_check_flag("edited", False)
update_message_and_check_flag("@*support*", True)
def test_user_group_mention_invalid(self) -> None:
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "Hey @*Nonexistent group*"
self.assertEqual(render_markdown(msg, content),
'<p>Hey @<em>Nonexistent group</em></p>')
self.assertEqual(msg.mentions_user_group_ids, set())
def test_stream_single(self) -> None:
denmark = get_stream('Denmark', get_realm('zulip'))
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark**"
self.assertEqual(
render_markdown(msg, content),
'<p><a class="stream" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark">#{d.name}</a></p>'.format(
d=denmark,
))
def test_stream_multiple(self) -> None:
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
realm = get_realm('zulip')
denmark = get_stream('Denmark', realm)
scotland = get_stream('Scotland', realm)
content = "Look to #**Denmark** and #**Scotland**, there something"
self.assertEqual(render_markdown(msg, content),
'<p>Look to '
'<a class="stream" '
'data-stream-id="{denmark.id}" '
'href="/#narrow/stream/{denmark.id}-Denmark">#{denmark.name}</a> and '
'<a class="stream" '
'data-stream-id="{scotland.id}" '
'href="/#narrow/stream/{scotland.id}-Scotland">#{scotland.name}</a>, '
'there something</p>'.format(denmark=denmark, scotland=scotland))
def test_stream_case_sensitivity(self) -> None:
realm = get_realm('zulip')
case_sens = Stream.objects.create(name='CaseSens', realm=realm)
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**CaseSens**"
self.assertEqual(
render_markdown(msg, content),
'<p><a class="stream" data-stream-id="{s.id}" href="/#narrow/stream/{s.id}-{s.name}">#{s.name}</a></p>'.format(
s=case_sens,
))
def test_stream_case_sensitivity_nonmatching(self) -> None:
"""#StreamName requires the stream be spelled with the correct case
currently. If we change that in the future, we'll need to change this
test."""
realm = get_realm('zulip')
Stream.objects.create(name='CaseSens', realm=realm)
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**casesens**"
self.assertEqual(
render_markdown(msg, content),
'<p>#<strong>casesens</strong></p>')
def test_topic_single(self) -> None:
denmark = get_stream('Denmark', get_realm('zulip'))
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark>some topic**"
self.assertEqual(
render_markdown(msg, content),
'<p><a class="stream-topic" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark/topic/some.20topic">#{d.name} > some topic</a></p>'.format(
d=denmark,
))
def test_topic_atomic_string(self) -> None:
realm = get_realm('zulip')
# Create a linkifier.
sender_user_profile = self.example_user('othello')
url_format_string = r"https://trac.example.com/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
self.assertEqual(
realm_filter.__str__(),
'<RealmFilter(zulip): #(?P<id>[0-9]{2,8})'
' https://trac.example.com/ticket/%(id)s>')
# Create a topic link that potentially interferes with the pattern.
denmark = get_stream('Denmark', realm)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Denmark>#1234**"
self.assertEqual(
render_markdown(msg, content),
'<p><a class="stream-topic" data-stream-id="{d.id}" href="/#narrow/stream/{d.id}-Denmark/topic/.231234">#{d.name} > #1234</a></p>'.format(
d=denmark,
))
def test_topic_multiple(self) -> None:
denmark = get_stream('Denmark', get_realm('zulip'))
scotland = get_stream('Scotland', get_realm('zulip'))
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "This has two links: #**Denmark>some topic** and #**Scotland>other topic**."
self.assertEqual(
render_markdown(msg, content),
'<p>This has two links: '
'<a class="stream-topic" data-stream-id="{denmark.id}" '
'href="/#narrow/stream/{denmark.id}-{denmark.name}/topic/some.20topic">'
'#{denmark.name} > some topic</a>'
' and '
'<a class="stream-topic" data-stream-id="{scotland.id}" '
'href="/#narrow/stream/{scotland.id}-{scotland.name}/topic/other.20topic">'
'#{scotland.name} > other topic</a>'
'.</p>'.format(denmark=denmark, scotland=scotland))
def test_possible_stream_names(self) -> None:
content = '''#**test here**
This mentions #**Denmark** too.
#**garçon** #**천국** @**Ignore Person**
'''
self.assertEqual(
bugdown.possible_linked_stream_names(content),
{'test here', 'Denmark', 'garçon', '천국'},
)
def test_stream_unicode(self) -> None:
realm = get_realm('zulip')
uni = Stream.objects.create(name='привет', realm=realm)
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**привет**"
quoted_name = '.D0.BF.D1.80.D0.B8.D0.B2.D0.B5.D1.82'
href = f'/#narrow/stream/{uni.id}-{quoted_name}'
self.assertEqual(
render_markdown(msg, content),
'<p><a class="stream" data-stream-id="{s.id}" href="{href}">#{s.name}</a></p>'.format(
s=uni,
href=href,
))
def test_stream_atomic_string(self) -> None:
realm = get_realm('zulip')
# Create a linkifier.
sender_user_profile = self.example_user('othello')
url_format_string = r"https://trac.example.com/ticket/%(id)s"
realm_filter = RealmFilter(realm=realm,
pattern=r"#(?P<id>[0-9]{2,8})",
url_format_string=url_format_string)
realm_filter.save()
self.assertEqual(
realm_filter.__str__(),
'<RealmFilter(zulip): #(?P<id>[0-9]{2,8})'
' https://trac.example.com/ticket/%(id)s>')
# Create a stream that potentially interferes with the pattern.
stream = Stream.objects.create(name='Stream #1234', realm=realm)
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "#**Stream #1234**"
href = f'/#narrow/stream/{stream.id}-Stream-.231234'
self.assertEqual(
render_markdown(msg, content),
'<p><a class="stream" data-stream-id="{s.id}" href="{href}">#{s.name}</a></p>'.format(
s=stream,
href=href,
))
def test_stream_invalid(self) -> None:
sender_user_profile = self.example_user('othello')
msg = Message(sender=sender_user_profile, sending_client=get_client("test"))
content = "There #**Nonexistentstream**"
self.assertEqual(render_markdown(msg, content),
'<p>There #<strong>Nonexistentstream</strong></p>')
self.assertEqual(msg.mentions_user_ids, set())
def test_image_preview_title(self) -> None:
msg = '[My favorite image](https://example.com/testimage.png)'
converted = bugdown_convert(msg)
self.assertEqual(
converted,
'<p>'
'<a href="https://example.com/testimage.png">My favorite image</a>'
'</p>\n'
'<div class="message_inline_image">'
'<a href="https://example.com/testimage.png" title="My favorite image">'
'<img data-src-fullsize="/thumbnail?url=https%3A%2F%2Fexample.com%2Ftestimage.png&size=full" src="/thumbnail?url=https%3A%2F%2Fexample.com%2Ftestimage.png&size=thumbnail">'
'</a>'
'</div>',
)
def test_mit_rendering(self) -> None:
"""Test the markdown configs for the MIT Zephyr mirroring system;
verifies almost all inline patterns are disabled, but
inline_interesting_links is still enabled"""
msg = "**test**"
realm = get_realm("zephyr")
client = get_client("zephyr_mirror")
message = Message(sending_client=client,
sender=self.mit_user("sipbtest"))
converted = bugdown.convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted,
"<p>**test**</p>",
)
msg = "* test"
converted = bugdown.convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted,
"<p>* test</p>",
)
msg = "https://lists.debian.org/debian-ctte/2014/02/msg00173.html"
converted = bugdown.convert(msg, message_realm=realm, message=message)
self.assertEqual(
converted,
'<p><a href="https://lists.debian.org/debian-ctte/2014/02/msg00173.html">https://lists.debian.org/debian-ctte/2014/02/msg00173.html</a></p>',
)
def test_url_to_a(self) -> None:
url = 'javascript://example.com/invalidURL'
converted = bugdown.url_to_a(db_data=None, url=url, text=url)
self.assertEqual(
converted,
'javascript://example.com/invalidURL',
)
def test_disabled_code_block_processor(self) -> None:
msg = "Hello,\n\n" + \
" I am writing this message to test something. I am writing this message to test something."
converted = bugdown_convert(msg)
expected_output = '<p>Hello,</p>\n' + \
'<div class="codehilite"><pre><span></span><code>I am writing this message to test something. I am writing this message to test something.\n' + \
'</code></pre></div>'
self.assertEqual(converted, expected_output)
realm = Realm.objects.create(string_id='code_block_processor_test')
bugdown.maybe_update_markdown_engines(realm.id, True)
converted = bugdown.convert(msg, message_realm=realm, email_gateway=True)
expected_output = '<p>Hello,</p>\n' + \
'<p>I am writing this message to test something. I am writing this message to test something.</p>'
self.assertEqual(converted, expected_output)
def test_normal_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://example.com/#settings/"
self.assertEqual(
bugdown.convert(msg, message_realm=realm, message=message),
'<p><a href="http://example.com/#settings/">http://example.com/#settings/</a></p>',
)
def test_relative_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://zulip.testserver/#narrow/stream/999-hello"
self.assertEqual(
bugdown.convert(msg, message_realm=realm, message=message),
'<p><a href="#narrow/stream/999-hello">http://zulip.testserver/#narrow/stream/999-hello</a></p>',
)
def test_relative_link_streams_page(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "http://zulip.testserver/#streams/all"
self.assertEqual(
bugdown.convert(msg, message_realm=realm, message=message),
'<p><a href="#streams/all">http://zulip.testserver/#streams/all</a></p>',
)
def test_md_relative_link(self) -> None:
realm = get_realm("zulip")
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
msg = "[hello](http://zulip.testserver/#narrow/stream/999-hello)"
self.assertEqual(
bugdown.convert(msg, message_realm=realm, message=message),
'<p><a href="#narrow/stream/999-hello">hello</a></p>',
)
class BugdownApiTests(ZulipTestCase):
def test_render_message_api(self) -> None:
content = 'That is a **bold** statement'
result = self.api_post(
self.example_user("othello"),
'/api/v1/messages/render',
dict(content=content),
)
self.assert_json_success(result)
self.assertEqual(result.json()['rendered'],
'<p>That is a <strong>bold</strong> statement</p>')
def test_render_mention_stream_api(self) -> None:
"""Determines whether we're correctly passing the realm context"""
content = 'This mentions #**Denmark** and @**King Hamlet**.'
result = self.api_post(
self.example_user("othello"),
'/api/v1/messages/render',
dict(content=content),
)
self.assert_json_success(result)
user_id = self.example_user('hamlet').id
stream_id = get_stream('Denmark', get_realm('zulip')).id
self.assertEqual(result.json()['rendered'],
f'<p>This mentions <a class="stream" data-stream-id="{stream_id}" href="/#narrow/stream/{stream_id}-Denmark">#Denmark</a> and <span class="user-mention" data-user-id="{user_id}">@King Hamlet</span>.</p>')
class BugdownErrorTests(ZulipTestCase):
def test_bugdown_error_handling(self) -> None:
with self.simulated_markdown_failure():
with self.assertRaises(BugdownRenderingException):
bugdown_convert('')
def test_send_message_errors(self) -> None:
message = 'whatever'
with self.simulated_markdown_failure():
# We don't use assertRaisesRegex because it seems to not
# handle i18n properly here on some systems.
with self.assertRaises(JsonableError):
self.send_stream_message(self.example_user("othello"), "Denmark", message)
def test_ultra_long_rendering(self) -> None:
"""A rendered message with an ultra-long lenght (> 10 * MAX_MESSAGE_LENGTH)
throws an exception"""
msg = 'mock rendered message\n' * MAX_MESSAGE_LENGTH
with mock.patch('zerver.lib.bugdown.timeout', return_value=msg), \
mock.patch('zerver.lib.bugdown.bugdown_logger'):
with self.assertRaises(BugdownRenderingException):
bugdown_convert(msg)
def test_curl_code_block_validation(self) -> None:
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
processor.run_content_validators = True
# Simulate code formatting.
processor.format_code = lambda lang, code: lang + ':' + code # type: ignore[assignment] # mypy doesn't allow monkey-patching functions
processor.placeholder = lambda s: '**' + s.strip('\n') + '**' # type: ignore[assignment] # https://github.com/python/mypy/issues/708
markdown = [
'``` curl',
'curl {{ api_url }}/v1/register',
' -u BOT_EMAIL_ADDRESS:BOT_API_KEY',
' -d "queue_id=1375801870:2942"',
'```',
]
with self.assertRaises(BugdownRenderingException):
processor.run(markdown)
def test_curl_code_block_without_validation(self) -> None:
processor = bugdown.fenced_code.FencedBlockPreprocessor(None)
# Simulate code formatting.
processor.format_code = lambda lang, code: lang + ':' + code # type: ignore[assignment] # mypy doesn't allow monkey-patching functions
processor.placeholder = lambda s: '**' + s.strip('\n') + '**' # type: ignore[assignment] # https://github.com/python/mypy/issues/708
markdown = [
'``` curl',
'curl {{ api_url }}/v1/register',
' -u BOT_EMAIL_ADDRESS:BOT_API_KEY',
' -d "queue_id=1375801870:2942"',
'```',
]
expected = [
'',
'**curl:curl {{ api_url }}/v1/register',
' -u BOT_EMAIL_ADDRESS:BOT_API_KEY',
' -d "queue_id=1375801870:2942"**',
'',
'',
]
result = processor.run(markdown)
self.assertEqual(result, expected)
class BugdownAvatarTestCase(ZulipTestCase):
def test_possible_avatar_emails(self) -> None:
content = '''
hello !avatar(foo@example.com) my email is ignore@ignore.com
!gravatar(bar@yo.tv)
smushing!avatar(hamlet@example.org) is allowed
'''
self.assertEqual(
bugdown.possible_avatar_emails(content),
{'foo@example.com', 'bar@yo.tv', 'hamlet@example.org'},
)
def test_avatar_with_id(self) -> None:
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
user_profile = self.example_user('hamlet')
msg = f'!avatar({user_profile.email})'
converted = bugdown.convert(msg, message=message)
values = {'email': user_profile.email, 'id': user_profile.id}
self.assertEqual(
converted,
'<p><img alt="{email}" class="message_body_gravatar" src="/avatar/{id}?s=30" title="{email}"></p>'.format(**values))
def test_avatar_of_unregistered_user(self) -> None:
sender_user_profile = self.example_user('othello')
message = Message(sender=sender_user_profile, sending_client=get_client("test"))
email = 'fakeuser@example.com'
msg = f'!avatar({email})'
converted = bugdown.convert(msg, message=message)
self.assertEqual(
converted,
'<p><img alt="{0}" class="message_body_gravatar" src="/avatar/{0}?s=30" title="{0}"></p>'.format(email))
| 52.011204 | 1,845 | 0.635322 |
f711f53dbfad008c2626bad9630cebfb74095511 | 1,685 | py | Python | rat-sql-gap/seq2struct/commands/preprocess.py | JuruoMP/gap-exp | 2d7af8a1da2f0ff8f9d3a2c6e15cc6383c716c05 | [
"Apache-2.0"
] | null | null | null | rat-sql-gap/seq2struct/commands/preprocess.py | JuruoMP/gap-exp | 2d7af8a1da2f0ff8f9d3a2c6e15cc6383c716c05 | [
"Apache-2.0"
] | null | null | null | rat-sql-gap/seq2struct/commands/preprocess.py | JuruoMP/gap-exp | 2d7af8a1da2f0ff8f9d3a2c6e15cc6383c716c05 | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import os
import _jsonnet
import tqdm
from seq2struct import datasets
from seq2struct import models
from seq2struct.utils import registry
from seq2struct.utils import vocab
class Preprocessor:
def __init__(self, config):
self.config = config
self.model_preproc = registry.instantiate(
registry.lookup('model', config['model']).Preproc,
config['model'])
def preprocess(self):
self.model_preproc.clear_items()
for section in self.config['data']:
# if section=="train":
# continue
data = registry.construct('dataset', self.config['data'][section])
for item in tqdm.tqdm(data, desc=section, dynamic_ncols=True):
if True:
to_add, validation_info = self.model_preproc.validate_item(item, section)
if to_add:
self.model_preproc.add_item(item, section, validation_info)
else:
print("======== Error parsing: {}".format(" ".join(item.text)))
self.model_preproc.save()
def add_parser():
parser = argparse.ArgumentParser()
parser.add_argument('--config', required=True)
parser.add_argument('--config-args')
args = parser.parse_args()
return args
def main(args):
if args.config_args:
config = json.loads(_jsonnet.evaluate_file(args.config, tla_codes={'args': args.config_args}))
else:
config = json.loads(_jsonnet.evaluate_file(args.config))
preprocessor = Preprocessor(config)
preprocessor.preprocess()
if __name__ == '__main__':
args = add_parser()
main(args)
| 31.203704 | 102 | 0.633828 |
f7120a6ba7fb9a76d9c012b1b2b2b4711cfb483e | 1,765 | py | Python | google-cloud-sdk/lib/surface/dns/managed_zones/list.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2017-11-29T18:52:27.000Z | 2017-11-29T18:52:27.000Z | google-cloud-sdk/.install/.backup/lib/surface/dns/managed_zones/list.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | null | null | null | google-cloud-sdk/.install/.backup/lib/surface/dns/managed_zones/list.py | KaranToor/MA450 | c98b58aeb0994e011df960163541e9379ae7ea06 | [
"Apache-2.0"
] | 1 | 2020-07-25T12:09:01.000Z | 2020-07-25T12:09:01.000Z | # Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""gcloud dns managed-zones list command."""
from apitools.base.py import list_pager
from googlecloudsdk.calliope import base
from googlecloudsdk.core import apis
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
class List(base.ListCommand):
"""View the list of all your managed-zones.
This command displays the list of your managed-zones.
## EXAMPLES
To see the list of all managed-zones, run:
$ {command}
To see the list of first 10 managed-zones, run:
$ {command} --limit=10
"""
def Collection(self):
return 'dns.managedZones'
def GetUriFunc(self):
def _GetUri(resource):
return resources.REGISTRY.Create(
self.Collection(), managedZone=resource.name).SelfLink()
return _GetUri
def Run(self, args):
dns_client = apis.GetClientInstance('dns', 'v1')
dns_messages = apis.GetMessagesModule('dns', 'v1')
project_id = properties.VALUES.core.project.Get(required=True)
return list_pager.YieldFromList(
dns_client.managedZones,
dns_messages.DnsManagedZonesListRequest(project=project_id),
limit=args.limit, field='managedZones')
| 29.416667 | 74 | 0.735411 |
f7120db0ad7e024065400616a724ec4766012512 | 26,403 | py | Python | dataFetcher.py | Jmion/SwisscomMIP | d29b0de222be44f85a84bc7dc3f4521741fdeda1 | [
"MIT"
] | 1 | 2021-10-06T06:57:55.000Z | 2021-10-06T06:57:55.000Z | dataFetcher.py | Jmion/SwisscomMIP | d29b0de222be44f85a84bc7dc3f4521741fdeda1 | [
"MIT"
] | null | null | null | dataFetcher.py | Jmion/SwisscomMIP | d29b0de222be44f85a84bc7dc3f4521741fdeda1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# # Loading data
import pandas as pd
import plotly.express as px
from tqdm import tqdm
import functools
import numpy as np
from difflib import SequenceMatcher
from oauthlib.oauth2 import BackendApplicationClient
from requests_oauthlib import OAuth2Session
from datetime import datetime, timedelta
import pprint
import requests
import os
import getpass
import json
from queue import Queue
from threading import Thread
from time import time
import logging
import os
#cashing in case of multiple calls.
@functools.lru_cache(maxsize=128)
def get_tiles(municipalityId: int) -> pd.DataFrame:
"""Fetches tile information for a municipality id.
Args:
municipalityId: id of the municipality as defined in by the federal office of statistics,
https://www.bfs.admin.ch/bfs/fr/home/bases-statistiques/repertoire-officiel-communes-suisse.assetdetail.11467406.html
Return:
A dataframe containing the following columns:
[tileId, ll_lon, ll_lat, urL-lon, ur_lat]
tileID: corresponds to a unique ID as defined in the Swisscom FAQ page.
ll_lon: longitude coordinate of the lower left corner of the tile.
ll_lat: latitude coordinate of the lower left corner of the tile.
ur_lon: longitude coordinate of the upper right corner of the tile.
ur_lat: latitude coordinate of the upper right corner of the tile.
If municipalityId is invalid will print an error message and return an empty DataFrame
"""
api_request = (
BASE_URL
+ f'/grids/municipalities/{municipalityId}'
)
data = oauth.get(api_request, headers=headers).json()
if(data.get('status') == None):
tileID = [t['tileId'] for t in data['tiles']]
ll_lon = [t['ll']['x'] for t in data['tiles']]
ll_lat= [t['ll']['y'] for t in data['tiles']]
ur_lon = [t['ur']['x'] for t in data['tiles']]
ur_lat = [t['ur']['y'] for t in data['tiles']]
else:
print(f'get_tiles: failed with status code {data.get("status")}. {data.get("message")}')
return pd.DataFrame(data={'tileID': [], 'll_lat': [], 'll_lon': [], 'ur_lat': [], 'ur_lon': []})
return pd.DataFrame(data={'tileID': tileID, 'll_lat': ll_lat, 'll_lon': ll_lon, 'ur_lat': ur_lat, 'ur_lon': ur_lon})
def get_municipalityID(name: str) -> np.array(int):
"""Converts a municipality name to ID
Args:
name of municipality.
Returns:
An array containing all the municipality ID's corresponding to the name.
If the name invalid will return an empty array.
"""
return commune.loc[commune.GDENAME == name].GDENR.to_numpy()
def visualize_coordinates(df: pd.DataFrame, latitude: str, longitude: str) -> None :
"""Visualizes coordinates in dataframe on map
Retrieves columns with name latitude and logitude and visualizes it on a map.
Args:
df: A dataframe containing the coordinates.
latitude: String key of the column in the dataframe containing the latitude.
longitude: String key of the column in the dataframe containing the longitude.
"""
fig = px.scatter_mapbox(df, lat=latitude, lon=longitude,
color_continuous_scale=px.colors.cyclical.IceFire, size_max=15, zoom=10,
mapbox_style="carto-positron")
fig.show()
def get_all_tiles_switzerland() -> pd.DataFrame:
"""Fetches the tile information for all the tiles in Switzerland.
Returns:
A Dataframe containg the tile information for every tile in switzerland.
The format of the DataFrame is the same as the return of get_tiles()
"""
tiles = get_tiles(commune.GDENR.unique()[0])
for c in tqdm(commune.GDENR.unique().tolist()):
tiles = tiles.append(get_tiles(c))
return tiles
def get_daily_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):
"""Fetches daily demographics
Fetches the daily demographics, age distribution, of the tiles.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
A dataframe containing as a key the tileID and as columns ageDistribution and the maleProportion
+----------+-----------------------+---------------------+
| | ageDistribution | maleProportion |
+----------+-----------------------+---------------------+
| 44554639 | NaN | 0.49828359484672546 |
+----------+-----------------------+---------------------+
| 44271906 | [0.21413850784301758, | 0.493218 |
| | 0.27691012620925903, | |
| | 0.37422287464141846, | |
| | 0.13472850620746613] | |
+----------+-----------------------+---------------------+
In the example above tile 44554639 does not have any age distribution data.
The data is k-anonymized. Therefor is some tiles are missing data it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score[t['tileId']] = {"ageDistribution": t.get("ageDistribution"),"maleProportion": t.get("maleProportion")}
return pd.DataFrame.from_dict(date2score).transpose()
def get_hourly_demographics_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
"""Fetches hourly demographics of age categories for 24 hours
Fetches the hourly demographics, age distribution, of the tiles.
Age categories are the following 0 - 19, 20 - 39, 40 - 64, >64
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the demographics. The name
of the collumns are:
[age_cat, age_distribution, male_proportion]
+----------+---------------------+---------+------------------+-----------------+
| | | age_cat | age_distribution | male_proportion |
+----------+---------------------+---------+------------------+-----------------+
| tileID | time | | | |
+----------+---------------------+---------+------------------+-----------------+
| 44394309 | 2020-01-27T00:00:00 | NaN | NaN | 0.474876 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T01:00:00 | NaN | NaN | 0.483166 |
+----------+---------------------+---------+------------------+-----------------+
| | ... | | | |
+----------+---------------------+---------+------------------+-----------------+
| 44290729 | 2020-01-27T06:00:00 | 0.0 | 0.192352 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 1.0 | 0.269984 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 2.0 | 0.363481 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
| | 2020-01-27T06:00:00 | 3.0 | 0.174183 | 0.497038 |
+----------+---------------------+---------+------------------+-----------------+
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
def get_hourly_demographics(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0) ):
"""Fetches hourly male proportion and age categories for 24 hours
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
Returns a dictionary with as a key the tileID, and as a value an object that is as follows:
{tileID: {dateTime:{ "ageDistribution": [0-19, 20-39, 40-64, 64+], "maleProportion": value},
{dateTime2: ...}}}
26994514: {'2020-01-27T00:00:00': {'ageDistribution': [0.1925136297941208,
0.2758632302284241,
0.362215131521225,
0.16940800845623016],
'maleProportion': 0.4727686941623688},
'2020-01-27T01:00:00': {'ageDistribution': None,
'maleProportion': 0.4896690547466278},
'2020-01-27T02:00:00': {'ageDistribution': None,
'maleProportion': 0.48882684111595154},
The data is k-anonymized. Therefor is some values are None it means that no data was available
To find out more about demographics visit the Heatmap FAQ.
"""
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
for dt in tqdm(dates, desc="get_hourly_demographics: hours", leave=True):
for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/hourly/{dt.isoformat()}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score.get(t['tileId'])[dt.isoformat()] = {"ageDistribution": t.get("ageDistribution"),"maleProportion": t.get("maleProportion")}
return date2score
data = get_hourly_demographics(tiles, day)
tile_id = []
time_data = []
age_distribution = []
age_cat = []
male_proportion = []
for i in data:
for time in data[i]:
if data[i][time].get("ageDistribution") != None:
for (idx,a) in enumerate(data[i][time].get("ageDistribution", [])):
age_cat.append(idx)
age_distribution.append(a)
tile_id.append(i)
time_data.append(time)
male_proportion.append(data[i][time].get("maleProportion"))
else:
tile_id.append(i)
time_data.append(time)
age_distribution.append(None)
male_proportion.append(data[i][time].get("maleProportion"))
age_cat.append(None)
return pd.DataFrame(data={'tileID': tile_id, "age_cat": age_cat, 'age_distribution':age_distribution, "male_proportion": male_proportion, 'time': time_data}).set_index(['tileID', 'time'])
def get_daily_density(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches the daily density of tiles.
Fetches the daily density of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's that daily density data needs to be fetched.
day: Day to fetch the density data for.
Returns:
DataFrame containg the tileId and the score. The name of the collumns are:
[score]
The identifier of the row is bassed on the tileID
+----------+-------+
| | score |
+----------+-------+
| tileID | |
+----------+-------+
| 44394309 | 1351 |
+----------+-------+
| 44394315 | 1103 |
+----------+-------+
| 44460297 | 875 |
+----------+-------+
| 44488589 | 1387 |
+----------+-------+
| 44498028 | 678 |
+----------+-------+
Tile with k-anonymized dwell density score. If tile not present Swisscom is
unable to provide a value due to k-anonymization. To find out more on density
scores read the Heatmap FAQ.
"""
tileID = []
score = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-density/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
if data.get("tiles") != None:
for t in data["tiles"]:
tileID.append(t['tileId'])
score.append(t["score"])
return pd.DataFrame(data={'tileID': tileID, 'score':score}).set_index("tileID")
def get_hourly_density_dataframe(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
"""Fetches the hourly density of tiles for 24 hours.
Fetches the hourly density of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's that daily density data needs to be fetched.
day: Day to fetch the density data for.
Returns:
DataFrame containg the tileId and the score. The name of the collumns are:
[score]
The identifier of the row is bassed on the [tileID, time]
+----------+---------------------+-------+
| | | score |
+----------+---------------------+-------+
| tileID | time | |
+----------+---------------------+-------+
| 44394309 | 2020-01-27T00:00:00 | 52 |
| +---------------------+-------+
| | 2020-01-27T01:00:00 | 68 |
| +---------------------+-------+
| | 2020-01-27T02:00:00 | 69 |
| +---------------------+-------+
| | 2020-01-27T03:00:00 | 69 |
| +---------------------+-------+
| | 2020-01-27T04:00:00 | 69 |
+----------+---------------------+-------+
Tile with k-anonymized dwell density score. If tile not present Swisscom is
unable to provide a value due to k-anonymization. To find out more on density
scores read the Heatmap FAQ.
"""
def get_hourly_density(tiles, day=datetime(year=2020, month=1, day=27, hour=0, minute=0)):
dates = [(day + timedelta(hours=delta)) for delta in range(24)]
date2score = dict()
print("getHourlyDensity")
for dt in tqdm(dates, desc="get_hourly_density: hours", leave=True):
for tiles_subset in [tiles[i:i + 100] for i in range(0, len(tiles), 100)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-density/hourly/{dt.isoformat()}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
for t in oauth.get(api_request, headers=headers).json().get("tiles",[]):
if date2score.get(t['tileId']) == None:
date2score[t['tileId']] = dict()
date2score.get(t['tileId'])[dt.isoformat()] = t['score']
return date2score
tiles_data = []
time_data = []
score = []
data = get_hourly_density(tiles, day)
for t in data:
for time in data[t]:
time_data.append(time)
tiles_data.append(t)
score.append(data[t][time])
return pd.DataFrame(data={'tileID': tiles_data, 'score':score, 'time': time_data}).set_index(['tileID', 'time'])
def fetch_data_city(city: str) -> None:
"""Fetches the data for a city if the data is not yet cashed on the computer.
"""
compression = ".xz"
folder = os.path.join(".","data")
def file_path(file_name: str) -> str:
return os.path.join(folder, file_name)
if not(os.path.exists(folder)):
os.mkdir(folder)
tiles_path = file_path(f'{city}Tiles.pkl{compression}')
hourly_dem_path = file_path(f'{city}HourlyDemographics.pkl{compression}')
hourly_density_path = file_path(f'{city}HourlyDensity.pkl{compression}')
daily_density_path = file_path(f'{city}DensityDaily.pkl{compression}')
daily_demographics_path = file_path(f'{city}DemographicsDaily.pkl{compression}')
if not(os.path.isfile(tiles_path)):
tiles = get_tiles(get_municipalityID(city)[0])
tiles.to_pickle(tiles_path)
else:
tiles = pd.read_pickle(tiles_path)
if not(os.path.isfile(hourly_dem_path)):
hourly_dem = get_hourly_demographics_dataframe(tiles['tileID'].to_numpy())
hourly_dem.to_pickle(hourly_dem_path)
if not(os.path.isfile(hourly_density_path)):
hourly_dens = get_hourly_density_dataframe(tiles['tileID'].to_numpy())
hourly_dens.to_pickle(hourly_density_path)
if not(os.path.isfile(daily_density_path)):
get_daily_density(tiles['tileID'].to_numpy()).to_pickle(daily_density_path)
if not(os.path.isfile(daily_demographics_path)):
get_daily_demographics(tiles['tileID'].to_numpy()).to_pickle(daily_demographics_path)
def clean_cities_list(cities: [str]) -> [str]:
"""Cleans the list of cities by removing all the cities that are not found in the
official list of cities provided by the Federal Statisitics Office.
Args:
List of cities to check and clean.
Return:
List containing a subset of the input list such that all elements are valid.
"""
invalid_cities = []
#validation that the cities names are valid
for c in cities:
if len(commune.loc[commune.GDENAME == c].GDENR.to_numpy()) == 0:
city = []
sim_value = []
for f in commune.GDENAME:
r = SequenceMatcher(None, c, f).ratio()
if r > 0.5:
city.append(f)
sim_value.append(r)
d = pd.DataFrame(data={"city": city, "value": sim_value})
potential_cities = d.sort_values("value", ascending=False).head(5).city.to_numpy()
print(f"City nammed: {c} cannot be found in official records. Did you mean: {potential_cities} ? {c} will be ignored.")
invalid_cities.append(c)
return [c for c in cities if not(c in invalid_cities)]
# Multithread fetch implementation
class DownloadWorker(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# Get the work from the queue and expand the tuple
city = self.queue.get()
if city == -1:
self.queue.put(-1)
break
try:
fetch_data_city(city)
finally:
self.queue.task_done()
def download_commune_excel() -> None:
'''
Downloads the excel spreadsheet from the Swiss Federal Statistical Office that maps the town name to unique ID
'''
print('Beginning commune file download with requests')
folder = os.path.join(".","data")
if not(os.path.exists(folder)):
os.mkdir(folder)
url = 'https://www.bfs.admin.ch/bfsstatic/dam/assets/11467406/master'
r = requests.get(url)
with open(os.path.join(".", "data", 'commune.xlsx'), 'wb') as f:
f.write(r.content)
print("End of commune file download")
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
BASE_URL = "https://api.swisscom.com/layer/heatmaps/demo"
TOKEN_URL = "https://consent.swisscom.com/o/oauth2/token"
MAX_NB_TILES_REQUEST = 100
headers = {"scs-version": "2"}
client_id = "" # customer key in the Swisscom digital market place
client_secret = "" # customer secret in the Swisscom digital market place
if client_id == "":
client_id = os.environ.get("CLIENT_ID", "")
if client_id == "":
client_id = input("Enter MIP Client ID: ")
os.environ["CLIENT_ID"] = client_id
if client_secret == "":
client_secret = os.environ.get("CLIENT_SECRET", "")
if client_secret == "":
client_secret = getpass.getpass('Enter MIP client secret:')
os.environ["CLIENT_SECRET"] = client_secret
# Fetch an access token
client = BackendApplicationClient(client_id=client_id)
oauth = OAuth2Session(client=client)
oauth.fetch_token(token_url=TOKEN_URL, client_id=client_id,
client_secret=client_secret)
def main():
ts = time()
if not(os.path.exists(os.path.join(".", "data", 'commune.xlsx'))):
download_commune_excel()
global commune
commune = pd.read_excel(os.path.join(".", "data", 'commune.xlsx'), sheet_name='GDE')
cities = ["Saas-Fee", "Arosa", "Bulle", "Laax","Belp" ,"Saanen","Adelboden", "Andermatt", "Davos", "Bulle", "Bern", "Genève", "Lausanne", "Zürich", "Neuchâtel", "Sion", "St. Gallen", "Appenzell", "Solothurn", "Zug", "Fribourg", "Luzern", "Ecublens (VD)", "Kloten", "Le Grand-Saconnex", "Nyon", "Zermatt", "Lugano"]
cities = clean_cities_list(cities)
queue = Queue()
for x in range(2):
worker = DownloadWorker(queue)
worker.deamen = True
worker.start()
for c in cities:
logger.info('Queueing {}'.format(c))
queue.put(c)
queue.join()
queue.put(-1)
logger.info('Took %s', time() - ts)
list_of_cities_path = os.path.join(".", "data","CityList.json")
cityList=[]
if os.path.isfile(list_of_cities_path):
with open(list_of_cities_path, "r") as filehandle:
cityList = json.load(filehandle)
with open(list_of_cities_path, "w") as filehandle:
for city in cities:
if not(city in cityList):
cityList.append(city)
json.dump(cityList, filehandle)
if __name__ == "__main__":
main()
# Other functions not currently used
def get_daily_demographics_male(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches Daily demographics.
Fetches the daily male proportion of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the tileId and the proportion of male. The name of the collumns are:
[tileID, maleProportion]
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
tileID = []
maleProportion = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
if data.get("tiles") != None:
for t in data["tiles"]:
if t.get("maleProportion") != None:
tileID.append(t['tileId'])
maleProportion.append(t["maleProportion"])
return pd.DataFrame(data={'tileID': tileID, 'maleProportion':maleProportion})
def get_daily_demographics_age(tiles: np.array(int), day=datetime(year=2020, month=1, day=27)) -> pd.DataFrame:
"""Fetches daily demographics of age categories
Fetches the daily demographics, age distribution, of the tiles and creates a dataframe of the fetched data.
Args:
tiles: Array of tile id's, what will be used to querry demographic data.
day: date of the data to be fetched.
Returns:
DataFrame containing the tileId and a array of values corresponding to the age distribution. The name
of the collumns are:
[tileID, ageDistribution]
The data is k-anonymized. Therefor is some tiles are not present in the output dataframe it
means that the data is not available. To find out more about demographics visit the Heatmap FAQ.
"""
tileID = []
ageDistribution = []
for tiles_subset in [tiles[i:i + MAX_NB_TILES_REQUEST] for i in range(0, len(tiles), MAX_NB_TILES_REQUEST)]:
api_request = (
BASE_URL
+ f'/heatmaps/dwell-demographics/daily/{day.isoformat().split("T")[0]}'
+ "?tiles="
+ "&tiles=".join(map(str, tiles_subset))
)
data = oauth.get(api_request, headers=headers).json()
for t in data.get("tiles", []):
if t.get("ageDistribution") != None:
tileID.append(t['tileId'])
ageDistribution.append(t["ageDistribution"])
return pd.DataFrame(data={'tileID': tileID, 'ageDistribution':ageDistribution})
| 40.37156 | 318 | 0.553308 |
f712372c535bdd51773b69555e9bde147a05679f | 1,811 | py | Python | src/speaking_eye/activity_stat.py | alena-bartosh/speaking-eye | 86e692cf5d3182f77b28e5264da74657d1972303 | [
"MIT"
] | 6 | 2020-04-30T17:35:09.000Z | 2021-11-18T09:41:50.000Z | src/speaking_eye/activity_stat.py | alena-bartosh/speaking-eye | 86e692cf5d3182f77b28e5264da74657d1972303 | [
"MIT"
] | 6 | 2020-05-16T17:52:45.000Z | 2021-08-16T11:47:11.000Z | src/speaking_eye/activity_stat.py | alena-bartosh/speaking-eye | 86e692cf5d3182f77b28e5264da74657d1972303 | [
"MIT"
] | null | null | null | from datetime import timedelta
from .activity import Activity
from .activity_helper import ActivityHelper
class ActivityStat:
"""Store and update the amount of time spent in a certain activity"""
def __init__(self, work_time: timedelta = timedelta(),
off_time: timedelta = timedelta()) -> None:
"""
Can be used for the first reading ApplicationInfo
from detailed/distracting lists when no activity has started yet
"""
self.work_time = work_time
self.off_time = off_time
@staticmethod
def from_activity(activity: Activity) -> 'ActivityStat':
"""For creating ActivityStat when Activity has already started"""
ActivityHelper.raise_if_not_finished(activity)
# NOTE: for distracting activity work_time is distracting time
if activity.is_work_time:
work_time = ActivityHelper.get_activity_time(activity)
off_time = timedelta()
else:
work_time = timedelta()
off_time = ActivityHelper.get_activity_time(activity)
return ActivityStat(work_time, off_time)
def update(self, activity: Activity) -> None:
ActivityHelper.raise_if_not_finished(activity)
if activity.is_work_time:
self.work_time += ActivityHelper.get_activity_time(activity)
else:
self.off_time += ActivityHelper.get_activity_time(activity)
def __eq__(self, other: object) -> bool:
"""
Overrides the default implementation
to use the object values instead of identifiers for comparison
"""
if not isinstance(other, ActivityStat):
return False
if self.work_time != other.work_time:
return False
return self.off_time == other.off_time
| 32.339286 | 73 | 0.662065 |
f7123e1717463d041806ea6c1f2fb3540327f441 | 142 | py | Python | src/ex1/data_perturb/__init__.py | unica-isde/isde | a9603d8b8d1a347447cec483108132aa1e8457eb | [
"Apache-2.0"
] | 7 | 2021-01-20T09:11:53.000Z | 2022-03-15T12:19:06.000Z | src/ex1/data_perturb/__init__.py | unica-isde/isde | a9603d8b8d1a347447cec483108132aa1e8457eb | [
"Apache-2.0"
] | null | null | null | src/ex1/data_perturb/__init__.py | unica-isde/isde | a9603d8b8d1a347447cec483108132aa1e8457eb | [
"Apache-2.0"
] | 10 | 2020-11-01T09:47:02.000Z | 2021-11-02T12:59:50.000Z | from .data_perturb import DataPerturb
from .data_perturb_uniform import DataPerturbUniform
from .data_perturb_normal import DataPerturbNormal
| 35.5 | 52 | 0.894366 |
f712644582d0d626da065aaa558a9d554e67fef2 | 607 | py | Python | tree/0235-lowest-common-ancestor-of-a-binary-search-tree.py | ZHUANGHP/LeetCode-Solution-Python | af2b14abb7f50ee061bcd601c8666b32e448cbd8 | [
"Apache-2.0"
] | 1 | 2021-01-10T17:03:21.000Z | 2021-01-10T17:03:21.000Z | tree/Python/0235-lowest-common-ancestor-of-a-binary-search-tree.py | Eddiehugh/LeetCode-Solution-Well-Formed | bdc1e7153de737b84890153434bf8df5838d0be5 | [
"Apache-2.0"
] | null | null | null | tree/Python/0235-lowest-common-ancestor-of-a-binary-search-tree.py | Eddiehugh/LeetCode-Solution-Well-Formed | bdc1e7153de737b84890153434bf8df5838d0be5 | [
"Apache-2.0"
] | 1 | 2021-07-25T07:53:14.000Z | 2021-07-25T07:53:14.000Z | class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def lowestCommonAncestor(self, root: 'TreeNode', p: 'TreeNode', q: 'TreeNode') -> 'TreeNode':
# 介于二者之间
if p.val <= root.val <= q.val or q.val <= root.val <= p.val:
return root
if root.val < p.val and root.val < q.val:
# 比二者都小
return self.lowestCommonAncestor(root.right, p, q)
if root.val > p.val and root.val > q.val:
# 比二者都大
return self.lowestCommonAncestor(root.left, p, q)
| 30.35 | 97 | 0.551895 |
f71272e8e92da38f391aee78dd99a6c81fa425f7 | 2,717 | py | Python | src/gedml/core/collectors/__init__.py | wangck20/GeDML | 1f76ac2094d7b88be7fd4eb6145e5586e547b9ca | [
"MIT"
] | null | null | null | src/gedml/core/collectors/__init__.py | wangck20/GeDML | 1f76ac2094d7b88be7fd4eb6145e5586e547b9ca | [
"MIT"
] | null | null | null | src/gedml/core/collectors/__init__.py | wangck20/GeDML | 1f76ac2094d7b88be7fd4eb6145e5586e547b9ca | [
"MIT"
] | null | null | null | """
Collectors have two main functions: synthesizing (or collecting) samples and compute metric matrix (which will be passed to selectors and losses).
All methods are listed below:
+-----------------------+-------------------------------------------------------------------------------+
| method | description |
+=======================+===============================================================================+
| BaseCollector | Base class. |
+-----------------------+-------------------------------------------------------------------------------+
| DefaultCollector | Do nothing. |
+-----------------------+-------------------------------------------------------------------------------+
| ProxyCollector | Maintain a set of proxies |
+-----------------------+-------------------------------------------------------------------------------+
| MoCoCollector | paper: **Momentum Contrast for Unsupervised Visual Representation Learning** |
+-----------------------+-------------------------------------------------------------------------------+
| SimSiamCollector | paper: **Exploring Simple Siamese Representation Learning** |
+-----------------------+-------------------------------------------------------------------------------+
| HDMLCollector | paper: **Hardness-Aware Deep Metric Learning** |
+-----------------------+-------------------------------------------------------------------------------+
| DAMLCollector | paper: **Deep Adversarial Metric Learning** |
+-----------------------+-------------------------------------------------------------------------------+
| DVMLCollector | paper: **Deep Variational Metric Learning** |
+-----------------------+-------------------------------------------------------------------------------+
Notes:
``embedders`` have significent difference with ``collectors``. ``embedders`` also take charge of generating embeddings which will be used to compute metrics.
Todo:
``epoch-based collector``
"""
from .iteration_collectors import (
DefaultCollector,
ProxyCollector,
MoCoCollector,
SimSiamCollector,
HDMLCollector,
DAMLCollector,
DVMLCollector
)
from .epoch_collectors import (
GlobalProxyCollector,
_DefaultGlobalCollector
)
from .base_collector import BaseCollector | 57.808511 | 161 | 0.3364 |
f7127631ea7b180440afc8e03d98802793f87c99 | 3,194 | py | Python | alipay/aop/api/request/AnttechBlockchainQueryconditionQueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AnttechBlockchainQueryconditionQueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/request/AnttechBlockchainQueryconditionQueryRequest.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.FileItem import FileItem
from alipay.aop.api.constant.ParamConstants import *
class AnttechBlockchainQueryconditionQueryRequest(object):
def __init__(self, biz_model=None):
self._biz_model = biz_model
self._version = "1.0"
self._terminal_type = None
self._terminal_info = None
self._prod_code = None
self._notify_url = None
self._return_url = None
self._udf_params = None
self._need_encrypt = False
@property
def biz_model(self):
return self._biz_model
@biz_model.setter
def biz_model(self, value):
self._biz_model = value
@property
def version(self):
return self._version
@version.setter
def version(self, value):
self._version = value
@property
def terminal_type(self):
return self._terminal_type
@terminal_type.setter
def terminal_type(self, value):
self._terminal_type = value
@property
def terminal_info(self):
return self._terminal_info
@terminal_info.setter
def terminal_info(self, value):
self._terminal_info = value
@property
def prod_code(self):
return self._prod_code
@prod_code.setter
def prod_code(self, value):
self._prod_code = value
@property
def notify_url(self):
return self._notify_url
@notify_url.setter
def notify_url(self, value):
self._notify_url = value
@property
def return_url(self):
return self._return_url
@return_url.setter
def return_url(self, value):
self._return_url = value
@property
def udf_params(self):
return self._udf_params
@udf_params.setter
def udf_params(self, value):
if not isinstance(value, dict):
return
self._udf_params = value
@property
def need_encrypt(self):
return self._need_encrypt
@need_encrypt.setter
def need_encrypt(self, value):
self._need_encrypt = value
def add_other_text_param(self, key, value):
if not self.udf_params:
self.udf_params = dict()
self.udf_params[key] = value
def get_params(self):
params = dict()
params[P_METHOD] = 'anttech.blockchain.querycondition.query'
params[P_VERSION] = self.version
if self.biz_model:
params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), ensure_ascii=False, sort_keys=True, separators=(',', ':'))
if self.terminal_type:
params['terminal_type'] = self.terminal_type
if self.terminal_info:
params['terminal_info'] = self.terminal_info
if self.prod_code:
params['prod_code'] = self.prod_code
if self.notify_url:
params['notify_url'] = self.notify_url
if self.return_url:
params['return_url'] = self.return_url
if self.udf_params:
params.update(self.udf_params)
return params
def get_multipart_params(self):
multipart_params = dict()
return multipart_params
| 24.953125 | 142 | 0.637445 |
f712987253d445bca87ff8f58246aad6d0a02786 | 800 | py | Python | Code/Collecting Statistics/plotStats.py | KunalSin9h/Playlist_Analysis | e8f7313f7c6dfcd3b3cabdfde89c8cdcc1f72f06 | [
"MIT"
] | 3 | 2021-09-23T12:08:40.000Z | 2021-09-25T08:38:29.000Z | Code/Collecting Statistics/plotStats.py | KunalSin9h/Playlist_Analysis | e8f7313f7c6dfcd3b3cabdfde89c8cdcc1f72f06 | [
"MIT"
] | 1 | 2021-09-18T06:13:27.000Z | 2021-09-22T04:55:24.000Z | Code/Collecting Statistics/plotStats.py | KunalSin9h/Playlist_Analysis | e8f7313f7c6dfcd3b3cabdfde89c8cdcc1f72f06 | [
"MIT"
] | null | null | null | """
plotStats() method to collect statistics for the track data.
Author: Kunal Singh
Email: pykunalsingh@gmail.com
"""
def plotStats(fileName):
# read in a playlist
with open(fileName, 'rb') as fp:
plist = plistlib.load(fp)
# get the tracks from the playlist
tracks = plist['Tracks']
# create lists of songs rating and track durations
ratings = []
durations = []
# iterate through the tracks
for trackId, track in tracks.items():
try:
ratings.append(track['Album Rating'])
durations.append(track['Total Time'])
except:
pass
# ensure that vaild data was collected
if ratings == [] or durations == []:
print("No valid Album Rating/Total Time data in %s."%fileName)
return
| 25.806452 | 70 | 0.61625 |
f712c802e34f008430d987035c36a72ff149c984 | 6,193 | py | Python | avanthive/tests/dbapi_test_case.py | amount/PyHive | be525f1ab500916bd1b04de6aed6e1505db4f3d8 | [
"Apache-2.0"
] | null | null | null | avanthive/tests/dbapi_test_case.py | amount/PyHive | be525f1ab500916bd1b04de6aed6e1505db4f3d8 | [
"Apache-2.0"
] | null | null | null | avanthive/tests/dbapi_test_case.py | amount/PyHive | be525f1ab500916bd1b04de6aed6e1505db4f3d8 | [
"Apache-2.0"
] | null | null | null | # encoding: utf-8
"""Shared DB-API test cases"""
from __future__ import absolute_import
from __future__ import unicode_literals
from builtins import object
from builtins import range
from future.utils import with_metaclass
from avanthive import exc
import abc
import contextlib
import functools
def with_cursor(fn):
"""Pass a cursor to the given function and handle cleanup.
The cursor is taken from ``self.connect()``.
"""
@functools.wraps(fn)
def wrapped_fn(self, *args, **kwargs):
with contextlib.closing(self.connect()) as connection:
with contextlib.closing(connection.cursor()) as cursor:
fn(self, cursor, *args, **kwargs)
return wrapped_fn
class DBAPITestCase(with_metaclass(abc.ABCMeta, object)):
@abc.abstractmethod
def connect(self):
raise NotImplementedError # pragma: no cover
@with_cursor
def test_fetchone(self, cursor):
cursor.execute('SELECT * FROM one_row')
self.assertEqual(cursor.rownumber, 0)
self.assertEqual(cursor.fetchone(), (1,))
self.assertEqual(cursor.rownumber, 1)
self.assertIsNone(cursor.fetchone())
@with_cursor
def test_fetchall(self, cursor):
cursor.execute('SELECT * FROM one_row')
self.assertEqual(cursor.fetchall(), [(1,)])
cursor.execute('SELECT a FROM many_rows ORDER BY a')
self.assertEqual(cursor.fetchall(), [(i,) for i in range(10000)])
@with_cursor
def test_null_param(self, cursor):
cursor.execute('SELECT %s FROM one_row', (None,))
self.assertEqual(cursor.fetchall(), [(None,)])
@with_cursor
def test_iterator(self, cursor):
cursor.execute('SELECT * FROM one_row')
self.assertEqual(list(cursor), [(1,)])
self.assertRaises(StopIteration, cursor.__next__)
@with_cursor
def test_description_initial(self, cursor):
self.assertIsNone(cursor.description)
@with_cursor
def test_description_failed(self, cursor):
try:
cursor.execute('blah_blah')
except exc.DatabaseError:
pass
self.assertIsNone(cursor.description)
@with_cursor
def test_bad_query(self, cursor):
def run():
cursor.execute('SELECT does_not_exist FROM this_really_does_not_exist')
cursor.fetchone()
self.assertRaises(exc.DatabaseError, run)
@with_cursor
def test_concurrent_execution(self, cursor):
cursor.execute('SELECT * FROM one_row')
cursor.execute('SELECT * FROM one_row')
self.assertEqual(cursor.fetchall(), [(1,)])
@with_cursor
def test_executemany(self, cursor):
for length in 1, 2:
cursor.executemany(
'SELECT %(x)d FROM one_row',
[{'x': i} for i in range(1, length + 1)]
)
self.assertEqual(cursor.fetchall(), [(length,)])
@with_cursor
def test_executemany_none(self, cursor):
cursor.executemany('should_never_get_used', [])
self.assertIsNone(cursor.description)
self.assertRaises(exc.ProgrammingError, cursor.fetchone)
@with_cursor
def test_fetchone_no_data(self, cursor):
self.assertRaises(exc.ProgrammingError, cursor.fetchone)
@with_cursor
def test_fetchmany(self, cursor):
cursor.execute('SELECT * FROM many_rows LIMIT 15')
self.assertEqual(cursor.fetchmany(0), [])
self.assertEqual(len(cursor.fetchmany(10)), 10)
self.assertEqual(len(cursor.fetchmany(10)), 5)
@with_cursor
def test_arraysize(self, cursor):
cursor.arraysize = 5
cursor.execute('SELECT * FROM many_rows LIMIT 20')
self.assertEqual(len(cursor.fetchmany()), 5)
@with_cursor
def test_polling_loop(self, cursor):
"""Try to trigger the polling logic in fetchone()"""
cursor._poll_interval = 0
cursor.execute('SELECT COUNT(*) FROM many_rows')
self.assertEqual(cursor.fetchone(), (10000,))
@with_cursor
def test_no_params(self, cursor):
cursor.execute("SELECT '%(x)s' FROM one_row")
self.assertEqual(cursor.fetchall(), [('%(x)s',)])
def test_escape(self):
"""Verify that funny characters can be escaped as strings and SELECTed back"""
bad_str = '''`~!@#$%^&*()_+-={}[]|\\;:'",./<>?\n\r\t '''
self.run_escape_case(bad_str)
@with_cursor
def run_escape_case(self, cursor, bad_str):
cursor.execute(
'SELECT %d, %s FROM one_row',
(1, bad_str)
)
self.assertEqual(cursor.fetchall(), [(1, bad_str,)])
cursor.execute(
'SELECT %(a)d, %(b)s FROM one_row',
{'a': 1, 'b': bad_str}
)
self.assertEqual(cursor.fetchall(), [(1, bad_str)])
@with_cursor
def test_invalid_params(self, cursor):
self.assertRaises(exc.ProgrammingError, lambda: cursor.execute('', 'hi'))
self.assertRaises(exc.ProgrammingError, lambda: cursor.execute('', [object]))
def test_open_close(self):
with contextlib.closing(self.connect()):
pass
with contextlib.closing(self.connect()) as connection:
with contextlib.closing(connection.cursor()):
pass
@with_cursor
def test_unicode(self, cursor):
unicode_str = "王兢"
cursor.execute(
'SELECT %s FROM one_row',
(unicode_str,)
)
self.assertEqual(cursor.fetchall(), [(unicode_str,)])
@with_cursor
def test_null(self, cursor):
cursor.execute('SELECT null FROM many_rows')
self.assertEqual(cursor.fetchall(), [(None,)] * 10000)
cursor.execute('SELECT IF(a % 11 = 0, null, a) FROM many_rows')
self.assertEqual(cursor.fetchall(), [(None if a % 11 == 0 else a,) for a in range(10000)])
@with_cursor
def test_sql_where_in(self, cursor):
cursor.execute('SELECT * FROM many_rows where a in %s', ([1, 2, 3],))
self.assertEqual(len(cursor.fetchall()), 3)
cursor.execute('SELECT * FROM many_rows where b in %s limit 10',
(['blah'],))
self.assertEqual(len(cursor.fetchall()), 10)
| 34.21547 | 98 | 0.62942 |
f712ce0b1470383ff19a2fba7bee20d030c85cf0 | 454 | py | Python | models/dist_phold/experiment.py | MISTCARRYYOU/PythonPDEVS | 53cad29832b3c489ab037bdc487affcbf1e3f408 | [
"Apache-2.0"
] | 1 | 2018-09-19T14:42:28.000Z | 2018-09-19T14:42:28.000Z | models/dist_phold/experiment.py | MISTCARRYYOU/PythonPDEVS | 53cad29832b3c489ab037bdc487affcbf1e3f408 | [
"Apache-2.0"
] | null | null | null | models/dist_phold/experiment.py | MISTCARRYYOU/PythonPDEVS | 53cad29832b3c489ab037bdc487affcbf1e3f408 | [
"Apache-2.0"
] | 1 | 2021-01-14T12:21:35.000Z | 2021-01-14T12:21:35.000Z | import model
import logging
import sys
sys.path.append('../../src/')
from simulator import Simulator
sys.setrecursionlimit(50000)
model = model.AutoDistPHOLD(int(sys.argv[1]), int(sys.argv[2]), int(sys.argv[3]))
sim = Simulator(model)
#sim.setVerbose(None)
sim.setTerminationTime(200)
sim.setMessageCopy('custom')
sim.setStateSaving("custom")
sim.setMemoization(True)
sim.setGVTInterval(5)
#sim.setGVTInterval(30)
#sim.setShowProgress()
sim.simulate()
| 22.7 | 81 | 0.768722 |
f712e0eb5555667a488c2bf52ce2443674b5782c | 1,719 | py | Python | Sec24_Design/q0284.py | OctoberChang/LeetCode-Solutions | bb7958194e7b196729611cbad19ee792ba41c429 | [
"MIT"
] | 2 | 2021-01-26T00:59:47.000Z | 2021-11-20T02:55:13.000Z | Sec24_Design/q0284.py | OctoberChang/LeetCode-Solutions | bb7958194e7b196729611cbad19ee792ba41c429 | [
"MIT"
] | null | null | null | Sec24_Design/q0284.py | OctoberChang/LeetCode-Solutions | bb7958194e7b196729611cbad19ee792ba41c429 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
# Below is the interface for Iterator, which is already defined for you.
#
# class Iterator:
# def __init__(self, nums):
# """
# Initializes an iterator object to the beginning of a list.
# :type nums: List[int]
# """
#
# def hasNext(self):
# """
# Returns true if the iteration has more elements.
# :rtype: bool
# """
#
# def next(self):
# """
# Returns the next element in the iteration.
# :rtype: int
# """
class PeekingIterator:
def __init__(self, iterator):
"""
Initialize your data structure here.
:type iterator: Iterator
"""
self.iterator = iterator
self.val_ = None
self.has_next_ = iterator.hasNext()
self.has_peak_ = False
def peek(self):
"""
Returns the next element in the iteration without advancing the iterator.
:rtype: int
"""
if not self.has_peak_:
self.has_peak_ = True
self.val_ = self.iterator.next()
return self.val_
def next(self):
"""
:rtype: int
"""
self.val_ = self.peek()
self.has_peak_ = False
self.has_next_ = self.iterator.hasNext()
return self.val_
def hasNext(self):
"""
:rtype: bool
"""
return self.has_next_
# Your PeekingIterator object will be instantiated and called as such:
# iter = PeekingIterator(Iterator(nums))
# while iter.hasNext():
# val = iter.peek() # Get the next element but not advance the iterator.
# iter.next() # Should return the same value as [val].
| 25.656716 | 81 | 0.556719 |
f712f6558ed50db7fff7120d2677a6ee59fe1aa4 | 15,039 | py | Python | defoe/fmp/document.py | kallewesterling/defoe | d72af2f748fd4363a4718c93bb0b0284b8cb1f3e | [
"MIT"
] | 2 | 2022-02-14T12:10:54.000Z | 2022-02-14T12:35:44.000Z | defoe/fmp/document.py | kallewesterling/defoe | d72af2f748fd4363a4718c93bb0b0284b8cb1f3e | [
"MIT"
] | 17 | 2022-02-09T21:46:14.000Z | 2022-02-25T14:55:09.000Z | defoe/fmp/document.py | kallewesterling/defoe | d72af2f748fd4363a4718c93bb0b0284b8cb1f3e | [
"MIT"
] | 1 | 2022-02-14T13:19:08.000Z | 2022-02-14T13:19:08.000Z | """
Object model representation of a document represented as a collection
of XML files in METS/MODS format.
"""
from defoe.fmp.page import Page
from lxml import etree
import re
class Document(object):
"""
Object model representation of a document represented as a
collection of XML files in METS/MODS format.
"""
def __init__(self, code, archive):
"""
Constructor
:param code: identifier for this document within an archive
:type code: str or unicode
:param archive: archive to which this document belongs
:type archive: defoe.alto.archive.Archive
"""
self.namespaces = {
"mods": "http://www.loc.gov/mods/v3",
"mets": "http://www.loc.gov/METS/",
"xsi": "http://www.w3.org/2001/XMLSchema-instance",
"premis": "info:lc/xmlns/premis-v2",
"dcterms": "http://purl.org/dc/terms/",
"fits": "http://hul.harvard.edu/ois/xml/ns/fits/fits_output",
"xlink": "http://www.w3.org/1999/xlink",
}
self.archive = archive
self.code = code
self.num_pages = 0
self.metadata = self.archive.open_document(self.code)
self.metadata_tree = etree.parse(self.metadata)
self.title = self.single_query("//mods:title/text()")
self.page_codes = sorted(
self.archive.document_codes[self.code], key=Document.sorter
)
self.num_pages = len(self.page_codes)
self.years = Document.parse_year(self.single_query("//mods:dateIssued/text()"))
self.publisher = self.single_query("//mods:publisher/text()")
self.place = self.single_query("//mods:placeTerm/text()")
# place may often have a year in.
self.years += Document.parse_year(self.place)
self.years = sorted(self.years)
self.documentId = self.single_query("//mods:identifier/text()")
if self.years:
self.year = self.years[0]
else:
self.year = None
self.date = self.single_query("//mods:dateIssued/text()")
self.document_type = "newspaper"
self.model = "fmp"
#### New ############
# [art0001, art0002, art0003]
self.articlesId = self.parse_structMap_Logical()
# {'#art0001':['#pa0001001', '#pa0001002', '#pa0001003', '#pa0001004', '#pa0001005', '#pa0001006', '#pa0001007'], '#art0002': ['#pa0001008', '#pa0001009' ..]}
# {'pa0001001': 'page1 area1', 'pa0001003': 'page1 area3'}
self.articlesParts, self.partsPage = self.parse_structLink()
# {'pa0001001': ['RECT', '1220,5,2893,221'], 'pa0001003': ['RECT', '2934,14,3709,211'], 'pa0004044': ['RECT', '5334,2088,5584,2121']}
self.partsCoord = self.parse_structMap_Physical()
self.num_articles = len(self.articlesId)
#######################
@staticmethod
def parse_year(text):
"""
Parse text to extract years of form 16xx to 19xx.
Any date of form NN following a year of form CCYY to CCYY
is used to derive a date CCNN.
As an exception to this rule, single years are parsed
from dates precisely matching the format YYYY-MM-DD.
For example:
* "1862, [1861]" returns [1861, 1862]
* "1847 [1846, 47]" returns [1846, 1847]
* "1873-80" returns [1873, 1880]
* "1870-09-01" returns [1870]
:param text: text to parse
:type text: str or unicode
:return: years
:rtype: set(int)
"""
try:
date_pattern = re.compile(
"(1[6-9]\d{2}(-|/)(0[1-9]|1[0-2])(-|/)(0[1-9]|[12]\d|3[01]))"
)
if date_pattern.match(text):
return [int(text[0:4])]
long_pattern = re.compile("(1[6-9]\d\d)")
short_pattern = re.compile("\d\d")
results = []
chunks = iter(long_pattern.split(text)[1:])
for year, rest in zip(chunks, chunks):
results.append(int(year))
century = year[0:2]
short_years = short_pattern.findall(rest)
for short_year in short_years:
results.append(int(century + short_year))
return sorted(set(results))
except TypeError:
return []
@staticmethod
def sorter(page_code):
"""
Given a page code of form [0-9]*(_[0-9]*), split this
into the sub-codes. For example, given 123_456, return
[123, 456]
:param page_code: page code
:type page_code: str or unicode
:return: list of page codes
:rtype: list(int)
"""
codes = list(map(int, page_code.split("_")))
return codes
def query(self, query):
"""
Run XPath query.
:param query: XPath query
:type query: str or unicode
:return: list of query results or None if none
:rtype: list(lxml.etree.<MODULE>) (depends on query)
"""
return self.metadata_tree.xpath(query, namespaces=self.namespaces)
def single_query(self, query):
"""
Run XPath query and return first result.
:param query: XPath query
:type query: str or unicode
:return: query result or None if none
:rtype: str or unicode
"""
result = self.query(query)
if not result:
return None
return str(result[0])
def page(self, code):
"""
Given a page code, return a new Page object.
:param code: page code
:type code: str or unicode
:return: Page object
:rtype: defoe.alto.page.Page
"""
return Page(self, code)
def get_document_info(self):
"""
Gets information from ZIP file about metadata file
corresponding to this document.
:return: information
:rtype: zipfile.ZipInfo
"""
return self.archive.get_document_info(self.code)
def get_page_info(self, page_code):
"""
Gets information from ZIP file about a page file within
this document.
:param page_code: file code
:type page_code: str or unicode
:return: information
:rtype: zipfile.ZipInfo
"""
return self.archive.get_page_info(self.code, page_code)
def __getitem__(self, index):
"""
Given a page index, return a new Page object.
:param index: page index
:type index: int
:return: Page object
:rtype: defoe.alto.page.Page
"""
return self.page(self.page_codes[index])
def __iter__(self):
"""
Iterate over page codes, returning new Page objects.
:return: Page object
:rtype: defoe.alto.page.Page
"""
for page_code in self.page_codes:
yield self.page(page_code)
def scan_strings(self):
"""
Iterate over strings in pages.
:return: page and string
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for string in page.strings:
yield page, string
def scan_tb(self):
"""
Iterate over textblocks in pages
:return: page and textblock
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for tb in page.tb:
yield page, tb
def scan_words(self):
"""
Iterate over words in pages.
:return: page and word
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for word in page.words:
yield page, word
def scan_wc(self):
"""
Iterate over words cualities in pages.
:return: page and wc
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for wc in page.wc:
yield page, wc
@property
def articles(self):
"""
Iterate calculates the articles in each page.
:return: a dictionary per page with all the articles. Each articles is conformed by one or more textblocks
:rtype: dictionary of articles. Each
{'art0001': ['pa0001001': ['RECT', '1220,5,2893,221', 'page1 area1'], 'pa0001003': ['RECT', '2934,14,3709,211', page1 area3], ...]], ...}
"""
self.document_articles = {}
articlesInfo = self.articles_info()
for page in self:
for tb in page.tb:
for articleId in articlesInfo:
for partId in articlesInfo[articleId]:
if partId == tb.textblock_id:
if articleId not in self.document_articles:
self.document_articles[articleId] = []
tb.textblock_shape = articlesInfo[articleId][partId][0]
tb.textblock_coords = articlesInfo[articleId][partId][1]
tb.textblock_page_area = articlesInfo[articleId][partId][2]
self.document_articles[articleId].append(tb)
return self.document_articles
def scan_cc(self):
"""
Iterate over characters cualities in pages.
:return: page and cc
:rtype: tuple(defoe.alto.page.Page, str or unicode)
"""
for page in self:
for cc in page.cc:
yield page, cc
def scan_images(self):
"""
Iterate over images in pages.
:return: page and XML fragment with image
:rtype: tuple(defoe.alto.page.Page, lxml.etree._Element)
"""
for page in self:
for image in page.images:
yield page, image
def strings(self):
"""
Iterate over strings.
:return: string
:rtype: str or unicode
"""
for _, string in self.scan_strings():
yield string
def tb(self):
"""
Iterate over strings.
:return: string
:rtype: str or unicode
"""
for _, tb in self.scan_tb():
yield tb
def words(self):
"""
Iterate over strings.
:return: word
:rtype: str or unicode
"""
for _, word in self.scan_words():
yield word
def images(self):
"""
Iterate over images.
:return: XML fragment with image
:rtype: lxml.etree._Element
"""
for _, image in self.scan_images():
yield image
def wc(self):
"""
Iterate over words cualities.
:return: wc
:rtype: str or unicode
"""
for _, wc in self.scan_wc():
yield wc
def cc(self):
"""
Iterate over characters cualities.
:return: wc
:rtype: str or unicode
"""
for _, cc in self.scan_cc():
yield cc
def parse_structMap_Physical(self):
"""
Parse the structMap Physical information
:return: dictionary with the ID of each part as a keyword. For each part, it gets the shape and coord.
:rtype: dictionary
{'pa0001001': ['RECT', '1220,5,2893,221'], 'pa0001003': ['RECT', '2934,14,3709,211'], 'pa0004044': ['RECT', '5334,2088,5584,2121']}
"""
partsCoord = dict()
elem = self.metadata_tree.find(
'mets:structMap[@TYPE="PHYSICAL"]', self.namespaces
)
for physic in elem:
parts = physic.findall('mets:div[@TYPE="page"]', self.namespaces)
for part in parts:
metadata_parts = part.findall("mets:div", self.namespaces)
for metadata in metadata_parts:
fptr = metadata.find("mets:fptr", self.namespaces)
for fp in fptr:
partsCoord[list(metadata.values())[0]] = [
list(fp.values())[1],
list(fp.values())[2],
]
return partsCoord
def parse_structMap_Logical(self):
"""
Parse the structMap Logical information
:return: list of articlesID that conforms each document/issue. It only returns the articles ID, no other type of elements.
:rtype: list
[art0001, art0002, art0003]
"""
articlesId = []
elem = self.metadata_tree.find(
'mets:structMap[@TYPE="LOGICAL"]', self.namespaces
)
for logic in elem:
articles = logic.findall('mets:div[@TYPE="ARTICLE"]', self.namespaces)
for article in articles:
articlesId.append(list(article.values())[0])
return articlesId
def parse_structLink(self):
"""
Parse the strucLink information
:return: 1) A dictionary with articles IDs as keys. And per article ID, we have a list of parts/textblokcs ids that conform each article.
2) A dictionary with parts/textblocks ids as keys, and page and area as values.
:rtype: two dictionaries
{'#art0001':['#pa0001001', '#pa0001002', '#pa0001003', '#pa0001004', '#pa0001005', '#pa0001006', '#pa0001007'], '#art0002': ['#pa0001008', '#pa0001009' ..]}
{'pa0001001': 'page1 area1', 'pa0001003': 'page1 area3'}
"""
articlesId = []
articlesParts = dict()
partsPage = dict()
elem = self.metadata_tree.findall("mets:structLink", self.namespaces)
for smlinkgrp in elem:
parts = smlinkgrp.findall("mets:smLinkGrp", self.namespaces)
for linklocator in smlinkgrp:
linkl = linklocator.findall("mets:smLocatorLink", self.namespaces)
article_parts = []
for link in linkl:
idstring = list(link.values())[0]
partId = re.sub("[^A-Za-z0-9]+", "", idstring)
article_parts.append(partId)
partsPage[partId] = list(link.values())[1]
articlesParts[article_parts[0]] = article_parts[1:]
return articlesParts, partsPage
def articles_info(self):
"""
:return: create a dicitionary, with articles IDs as keys. Each entry has has a dictionary of parts/textblocks as values, with all the parts information (shape, coords and page_area).
:rtype: dictionary
#{'art0001 {'pa0001001': ['RECT', '1220,5,2893,221', 'page1 area1'], 'pa0001003': ['RECT', '2934,14,3709,211', 'page1 area3'], ....}}
"""
articlesId = []
articlesInfo = dict()
for a_id in self.articlesId:
articlesInfo[a_id] = dict()
for p_id in self.articlesParts[a_id]:
if p_id in self.partsCoord:
self.partsCoord[p_id].append(self.partsPage[p_id])
articlesInfo[a_id][p_id] = self.partsCoord[p_id]
return articlesInfo
| 33.948081 | 191 | 0.554558 |
f713088ab00e8c25a1a72f8168121f4e7a2c67f4 | 1,476 | py | Python | src/ice/persistence/config_file_backing_store.py | geraldhumphries/Ice | ec466e85407361bbbc87b9e484fe427c1769b4fe | [
"MIT"
] | null | null | null | src/ice/persistence/config_file_backing_store.py | geraldhumphries/Ice | ec466e85407361bbbc87b9e484fe427c1769b4fe | [
"MIT"
] | null | null | null | src/ice/persistence/config_file_backing_store.py | geraldhumphries/Ice | ec466e85407361bbbc87b9e484fe427c1769b4fe | [
"MIT"
] | null | null | null | # encoding: utf-8
"""
config_file_backing_store.py
Created by Scott on 2014-08-12.
Copyright (c) 2014 Scott Rice. All rights reserved.
"""
import ConfigParser
import backing_store
class ConfigFileBackingStore(backing_store.BackingStore):
def __init__(self, path):
super(ConfigFileBackingStore, self).__init__(path)
self.configParser = ConfigParser.RawConfigParser()
self.configParser.read(self.path)
def identifiers(self):
return self.configParser.sections()
def add_identifier(self, ident):
try:
self.configParser.add_section(ident)
except ConfigParser.DuplicateSectionError:
raise ValueError("The identifier `%s` already exists" % str(ident))
def remove_identifier(self, ident):
self.configParser.remove_section(ident)
def keys(self, ident):
try:
return self.configParser.options(ident)
except ConfigParser.NoSectionError:
raise ValueError("No identifier named `%s` exists" % str(ident))
def get(self, ident, key, default=None):
try:
val = self.configParser.get(ident, key.lower())
return val if val != "" else default
except ConfigParser.NoOptionError:
return default
def set(self, ident, key, value):
self.configParser.set(ident, key.lower(), value)
def save(self):
try:
with open(self.path, "w") as configFile:
self.configParser.write(configFile)
except IOError:
raise IOError("Cannot save data to `%s`. Permission Denied")
| 26.836364 | 73 | 0.70935 |
f7134da2b040dac9af18bee385823a8ba7bac14c | 708 | py | Python | doozerlib/constants.py | vfreex/doozer | 8ad0a1234120cdc30890afbbdda1bc40e4a4fc76 | [
"Apache-2.0"
] | 1 | 2020-09-21T06:48:40.000Z | 2020-09-21T06:48:40.000Z | doozerlib/constants.py | vfreex/doozer | 8ad0a1234120cdc30890afbbdda1bc40e4a4fc76 | [
"Apache-2.0"
] | null | null | null | doozerlib/constants.py | vfreex/doozer | 8ad0a1234120cdc30890afbbdda1bc40e4a4fc76 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, print_function, unicode_literals
# Environment variables to disable Git stdin prompts for username, password, etc
GIT_NO_PROMPTS = {
"GIT_SSH_COMMAND": "ssh -oBatchMode=yes",
"GIT_TERMINAL_PROMPT": "0",
}
BREWWEB_URL = "https://brewweb.engineering.redhat.com/brew"
# Environment variables that should be set for doozer interaction with db for storing and retrieving build records.
# DB ENV VARS
DB_HOST = "DOOZER_DB_HOST"
DB_PORT = "DOOZER_DB_PORT"
DB_USER = "DOOZER_DB_USER"
DB_PWD = "DOOZER_DB_PASSWORD"
DB_NAME = "DOOZER_DB_NAME"
# default db parameters
default_db_params = {
DB_NAME: "doozer_build",
DB_HOST: "localhost",
DB_PORT: "3306"
}
| 28.32 | 115 | 0.759887 |
f713854f3b5d2bc3bace7fa4697551d52b3eeb02 | 98 | py | Python | ludwig/__init__.py | phueb/Ludw | de426de1e396e700007869cda27dd5bc9b8f5d2d | [
"MIT"
] | null | null | null | ludwig/__init__.py | phueb/Ludw | de426de1e396e700007869cda27dd5bc9b8f5d2d | [
"MIT"
] | 1 | 2022-03-30T14:07:13.000Z | 2022-03-30T14:07:13.000Z | ludwig/__init__.py | phueb/Ludw | de426de1e396e700007869cda27dd5bc9b8f5d2d | [
"MIT"
] | 2 | 2020-06-15T13:06:53.000Z | 2021-02-12T00:33:29.000Z |
__version__ = '4.0.6'
def print_ludwig(s):
print(f'Ludwig-{__version__}: {s}', flush=True) | 14 | 51 | 0.642857 |
f713bf7049d636559b31faa12aae89196f72bea9 | 3,608 | py | Python | docs/samples/specification/azure_key_credential/generated/azure/key/credential/sample/_auto_rest_head_test_service.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 35 | 2018-04-03T12:15:53.000Z | 2022-03-11T14:03:34.000Z | docs/samples/specification/azure_key_credential/generated/azure/key/credential/sample/_auto_rest_head_test_service.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 652 | 2017-08-28T22:44:41.000Z | 2022-03-31T21:20:31.000Z | docs/samples/specification/azure_key_credential/generated/azure/key/credential/sample/_auto_rest_head_test_service.py | cfculhane/autorest.python | 8cbca95faee88d933a58bbbd17b76834faa8d387 | [
"MIT"
] | 29 | 2017-08-28T20:57:01.000Z | 2022-03-11T14:03:38.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from copy import deepcopy
from typing import TYPE_CHECKING
from azure.core import PipelineClient
from msrest import Deserializer, Serializer
from ._configuration import AutoRestHeadTestServiceConfiguration
from .operations import HttpSuccessOperations
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from typing import Any, Dict, Optional
from azure.core.credentials import AzureKeyCredential
from azure.core.rest import HttpRequest, HttpResponse
class AutoRestHeadTestService(object):
"""Test Infrastructure for AutoRest.
:ivar http_success: HttpSuccessOperations operations
:vartype http_success: azure.key.credential.sample.operations.HttpSuccessOperations
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.AzureKeyCredential
:param base_url: Service URL. Default value is 'http://localhost:3000'.
:type base_url: str
"""
def __init__(
self,
credential, # type: AzureKeyCredential
base_url="http://localhost:3000", # type: str
**kwargs # type: Any
):
# type: (...) -> None
self._config = AutoRestHeadTestServiceConfiguration(credential=credential, **kwargs)
self._client = PipelineClient(base_url=base_url, config=self._config, **kwargs)
client_models = {} # type: Dict[str, Any]
self._serialize = Serializer(client_models)
self._deserialize = Deserializer(client_models)
self._serialize.client_side_validation = False
self.http_success = HttpSuccessOperations(self._client, self._config, self._serialize, self._deserialize)
def _send_request(
self,
request, # type: HttpRequest
**kwargs # type: Any
):
# type: (...) -> HttpResponse
"""Runs the network request through the client's chained policies.
>>> from azure.core.rest import HttpRequest
>>> request = HttpRequest("GET", "https://www.example.org/")
<HttpRequest [GET], url: 'https://www.example.org/'>
>>> response = client._send_request(request)
<HttpResponse: 200 OK>
For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart
:param request: The network request you want to make. Required.
:type request: ~azure.core.rest.HttpRequest
:keyword bool stream: Whether the response payload will be streamed. Defaults to False.
:return: The response of your network call. Does not do error handling on your response.
:rtype: ~azure.core.rest.HttpResponse
"""
request_copy = deepcopy(request)
request_copy.url = self._client.format_url(request_copy.url)
return self._client.send_request(request_copy, **kwargs)
def close(self):
# type: () -> None
self._client.close()
def __enter__(self):
# type: () -> AutoRestHeadTestService
self._client.__enter__()
return self
def __exit__(self, *exc_details):
# type: (Any) -> None
self._client.__exit__(*exc_details)
| 39.217391 | 113 | 0.665188 |
f713c43c0962dea49c922bae1e450935719dbbf8 | 3,500 | py | Python | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/features/v2021_07_01/_configuration.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/features/v2021_07_01/_configuration.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | sdk/resources/azure-mgmt-resource/azure/mgmt/resource/features/v2021_07_01/_configuration.py | vincenttran-msft/azure-sdk-for-python | 348b56f9f03eeb3f7b502eed51daf494ffff874d | [
"MIT"
] | null | null | null | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, TYPE_CHECKING
from azure.core.configuration import Configuration
from azure.core.pipeline import policies
from azure.mgmt.core.policies import ARMChallengeAuthenticationPolicy, ARMHttpLoggingPolicy
from ._version import VERSION
if TYPE_CHECKING:
# pylint: disable=unused-import,ungrouped-imports
from azure.core.credentials import TokenCredential
class FeatureClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes
"""Configuration for FeatureClient.
Note that all parameters used to create this instance are saved as instance
attributes.
:param credential: Credential needed for the client to connect to Azure.
:type credential: ~azure.core.credentials.TokenCredential
:param subscription_id: The Azure subscription ID.
:type subscription_id: str
:keyword api_version: Api Version. Default value is "2021-07-01". Note that overriding this
default value may result in unsupported behavior.
:paramtype api_version: str
"""
def __init__(
self,
credential: "TokenCredential",
subscription_id: str,
**kwargs: Any
) -> None:
super(FeatureClientConfiguration, self).__init__(**kwargs)
api_version = kwargs.pop('api_version', "2021-07-01") # type: str
if credential is None:
raise ValueError("Parameter 'credential' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
self.credential = credential
self.subscription_id = subscription_id
self.api_version = api_version
self.credential_scopes = kwargs.pop('credential_scopes', ['https://management.azure.com/.default'])
kwargs.setdefault('sdk_moniker', 'mgmt-resource/{}'.format(VERSION))
self._configure(**kwargs)
def _configure(
self,
**kwargs # type: Any
):
# type: (...) -> None
self.user_agent_policy = kwargs.get('user_agent_policy') or policies.UserAgentPolicy(**kwargs)
self.headers_policy = kwargs.get('headers_policy') or policies.HeadersPolicy(**kwargs)
self.proxy_policy = kwargs.get('proxy_policy') or policies.ProxyPolicy(**kwargs)
self.logging_policy = kwargs.get('logging_policy') or policies.NetworkTraceLoggingPolicy(**kwargs)
self.http_logging_policy = kwargs.get('http_logging_policy') or ARMHttpLoggingPolicy(**kwargs)
self.retry_policy = kwargs.get('retry_policy') or policies.RetryPolicy(**kwargs)
self.custom_hook_policy = kwargs.get('custom_hook_policy') or policies.CustomHookPolicy(**kwargs)
self.redirect_policy = kwargs.get('redirect_policy') or policies.RedirectPolicy(**kwargs)
self.authentication_policy = kwargs.get('authentication_policy')
if self.credential and not self.authentication_policy:
self.authentication_policy = ARMChallengeAuthenticationPolicy(self.credential, *self.credential_scopes, **kwargs)
| 47.297297 | 125 | 0.693714 |
f713feeff5540cb5abd88be8581806ea4a417b09 | 14,614 | py | Python | oscar/lib/python2.7/site-packages/IPython/core/pylabtools.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/IPython/core/pylabtools.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | oscar/lib/python2.7/site-packages/IPython/core/pylabtools.py | sainjusajan/django-oscar | 466e8edc807be689b0a28c9e525c8323cc48b8e1 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
"""Pylab (matplotlib) support utilities."""
from __future__ import print_function
# Copyright (c) IPython Development Team.
# Distributed under the terms of the Modified BSD License.
from io import BytesIO
from IPython.core.display import _pngxy
from IPython.utils.decorators import flag_calls
from IPython.utils import py3compat
# If user specifies a GUI, that dictates the backend, otherwise we read the
# user's mpl default from the mpl rc structure
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'gtk3': 'GTK3Agg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'qt5': 'Qt5Agg',
'osx': 'MacOSX',
'nbagg': 'nbAgg',
'notebook': 'nbAgg',
'agg': 'agg',
'inline': 'module://ipykernel.pylab.backend_inline',
'ipympl': 'module://ipympl.backend_nbagg',
}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
# Our tests expect backend2gui to just return 'qt'
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['GTK3Cairo'] = 'gtk3'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
# And some backends that don't need GUI integration
del backend2gui['nbAgg']
del backend2gui['agg']
del backend2gui['module://ipykernel.pylab.backend_inline']
#-----------------------------------------------------------------------------
# Matplotlib utilities
#-----------------------------------------------------------------------------
def getfigs(*fig_nums):
"""Get a list of matplotlib figures by figure numbers.
If no arguments are given, all available figures are returned. If the
argument list contains references to invalid figures, a warning is printed
but the function continues pasting further figures.
Parameters
----------
figs : tuple
A tuple of ints giving the figure numbers of the figures to return.
"""
from matplotlib._pylab_helpers import Gcf
if not fig_nums:
fig_managers = Gcf.get_all_fig_managers()
return [fm.canvas.figure for fm in fig_managers]
else:
figs = []
for num in fig_nums:
f = Gcf.figs.get(num)
if f is None:
print('Warning: figure %s not available.' % num)
else:
figs.append(f.canvas.figure)
return figs
def figsize(sizex, sizey):
"""Set the default figure size to be [sizex, sizey].
This is just an easy to remember, convenience wrapper that sets::
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
"""
import matplotlib
matplotlib.rcParams['figure.figsize'] = [sizex, sizey]
def print_figure(fig, fmt='png', bbox_inches='tight', **kwargs):
"""Print a figure to an image, and return the resulting file data
Returned data will be bytes unless ``fmt='svg'``,
in which case it will be unicode.
Any keyword args are passed to fig.canvas.print_figure,
such as ``quality`` or ``bbox_inches``.
"""
from matplotlib import rcParams
# When there's an empty figure, we shouldn't return anything, otherwise we
# get big blank areas in the qt console.
if not fig.axes and not fig.lines:
return
dpi = fig.dpi
if fmt == 'retina':
dpi = dpi * 2
fmt = 'png'
# build keyword args
kw = dict(
format=fmt,
facecolor=fig.get_facecolor(),
edgecolor=fig.get_edgecolor(),
dpi=dpi,
bbox_inches=bbox_inches,
)
# **kwargs get higher priority
kw.update(kwargs)
bytes_io = BytesIO()
fig.canvas.print_figure(bytes_io, **kw)
data = bytes_io.getvalue()
if fmt == 'svg':
data = data.decode('utf-8')
return data
def retina_figure(fig, **kwargs):
"""format a figure as a pixel-doubled (retina) PNG"""
pngdata = print_figure(fig, fmt='retina', **kwargs)
# Make sure that retina_figure acts just like print_figure and returns
# None when the figure is empty.
if pngdata is None:
return
w, h = _pngxy(pngdata)
metadata = dict(width=w//2, height=h//2)
return pngdata, metadata
# We need a little factory function here to create the closure where
# safe_execfile can live.
def mpl_runner(safe_execfile):
"""Factory to return a matplotlib-enabled runner for %run.
Parameters
----------
safe_execfile : function
This must be a function with the same interface as the
:meth:`safe_execfile` method of IPython.
Returns
-------
A function suitable for use as the ``runner`` argument of the %run magic
function.
"""
def mpl_execfile(fname,*where,**kw):
"""matplotlib-aware wrapper around safe_execfile.
Its interface is identical to that of the :func:`execfile` builtin.
This is ultimately a call to execfile(), but wrapped in safeties to
properly handle interactive rendering."""
import matplotlib
import matplotlib.pyplot as plt
#print '*** Matplotlib runner ***' # dbg
# turn off rendering until end of script
is_interactive = matplotlib.rcParams['interactive']
matplotlib.interactive(False)
safe_execfile(fname,*where,**kw)
matplotlib.interactive(is_interactive)
# make rendering call now, if the user tried to do it
if plt.draw_if_interactive.called:
plt.draw()
plt.draw_if_interactive.called = False
# re-draw everything that is stale
try:
da = plt.draw_all
except AttributeError:
pass
else:
da()
return mpl_execfile
def _reshow_nbagg_figure(fig):
"""reshow an nbagg figure"""
try:
reshow = fig.canvas.manager.reshow
except AttributeError:
raise NotImplementedError()
else:
reshow()
def select_figure_formats(shell, formats, **kwargs):
"""Select figure formats for the inline backend.
Parameters
==========
shell : InteractiveShell
The main IPython instance.
formats : str or set
One or a set of figure formats to enable: 'png', 'retina', 'jpeg', 'svg', 'pdf'.
**kwargs : any
Extra keyword arguments to be passed to fig.canvas.print_figure.
"""
import matplotlib
from matplotlib.figure import Figure
svg_formatter = shell.display_formatter.formatters['image/svg+xml']
png_formatter = shell.display_formatter.formatters['image/png']
jpg_formatter = shell.display_formatter.formatters['image/jpeg']
pdf_formatter = shell.display_formatter.formatters['application/pdf']
if isinstance(formats, py3compat.string_types):
formats = {formats}
# cast in case of list / tuple
formats = set(formats)
[ f.pop(Figure, None) for f in shell.display_formatter.formatters.values() ]
mplbackend = matplotlib.get_backend().lower()
if mplbackend == 'nbagg' or mplbackend == 'module://ipympl.backend_nbagg':
formatter = shell.display_formatter.ipython_display_formatter
formatter.for_type(Figure, _reshow_nbagg_figure)
supported = {'png', 'png2x', 'retina', 'jpg', 'jpeg', 'svg', 'pdf'}
bad = formats.difference(supported)
if bad:
bs = "%s" % ','.join([repr(f) for f in bad])
gs = "%s" % ','.join([repr(f) for f in supported])
raise ValueError("supported formats are: %s not %s" % (gs, bs))
if 'png' in formats:
png_formatter.for_type(Figure, lambda fig: print_figure(fig, 'png', **kwargs))
if 'retina' in formats or 'png2x' in formats:
png_formatter.for_type(Figure, lambda fig: retina_figure(fig, **kwargs))
if 'jpg' in formats or 'jpeg' in formats:
jpg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'jpg', **kwargs))
if 'svg' in formats:
svg_formatter.for_type(Figure, lambda fig: print_figure(fig, 'svg', **kwargs))
if 'pdf' in formats:
pdf_formatter.for_type(Figure, lambda fig: print_figure(fig, 'pdf', **kwargs))
#-----------------------------------------------------------------------------
# Code for initializing matplotlib and importing pylab
#-----------------------------------------------------------------------------
def find_gui_and_backend(gui=None, gui_select=None):
"""Given a gui string return the gui and mpl backend.
Parameters
----------
gui : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
gui_select : str
Can be one of ('tk','gtk','wx','qt','qt4','inline').
This is any gui already selected by the shell.
Returns
-------
A tuple of (gui, backend) where backend is one of ('TkAgg','GTKAgg',
'WXAgg','Qt4Agg','module://ipykernel.pylab.backend_inline').
"""
import matplotlib
if gui and gui != 'auto':
# select backend based on requested gui
backend = backends[gui]
else:
# We need to read the backend from the original data structure, *not*
# from mpl.rcParams, since a prior invocation of %matplotlib may have
# overwritten that.
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParamsOrig['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
# If we have already had a gui active, we need it and inline are the
# ones allowed.
if gui_select and gui != gui_select:
gui = gui_select
backend = backends[gui]
return gui, backend
def activate_matplotlib(backend):
"""Activate the given backend and set interactive to True."""
import matplotlib
matplotlib.interactive(True)
# Matplotlib had a bug where even switch_backend could not force
# the rcParam to update. This needs to be set *before* the module
# magic of switch_backend().
matplotlib.rcParams['backend'] = backend
import matplotlib.pyplot
matplotlib.pyplot.switch_backend(backend)
# This must be imported last in the matplotlib series, after
# backend/interactivity choices have been made
import matplotlib.pyplot as plt
plt.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
plt.draw_if_interactive = flag_calls(plt.draw_if_interactive)
def import_pylab(user_ns, import_all=True):
"""Populate the namespace with pylab-related values.
Imports matplotlib, pylab, numpy, and everything from pylab and numpy.
Also imports a few names from IPython (figsize, display, getfigs)
"""
# Import numpy as np/pyplot as plt are conventions we're trying to
# somewhat standardize on. Making them available to users by default
# will greatly help this.
s = ("import numpy\n"
"import matplotlib\n"
"from matplotlib import pylab, mlab, pyplot\n"
"np = numpy\n"
"plt = pyplot\n"
)
exec(s, user_ns)
if import_all:
s = ("from matplotlib.pylab import *\n"
"from numpy import *\n")
exec(s, user_ns)
# IPython symbols to add
user_ns['figsize'] = figsize
from IPython.core.display import display
# Add display and getfigs to the user's namespace
user_ns['display'] = display
user_ns['getfigs'] = getfigs
def configure_inline_support(shell, backend):
"""Configure an IPython shell object for matplotlib use.
Parameters
----------
shell : InteractiveShell instance
backend : matplotlib backend
"""
# If using our svg payload backend, register the post-execution
# function that will pick up the results for display. This can only be
# done with access to the real shell object.
# Note: if we can't load the inline backend, then there's no point
# continuing (such as in terminal-only shells in environments without
# zeromq available).
try:
from ipykernel.pylab.backend_inline import InlineBackend
except ImportError:
return
import matplotlib
cfg = InlineBackend.instance(parent=shell)
cfg.shell = shell
if cfg not in shell.configurables:
shell.configurables.append(cfg)
if backend == backends['inline']:
from ipykernel.pylab.backend_inline import flush_figures
shell.events.register('post_execute', flush_figures)
# Save rcParams that will be overwrittern
shell._saved_rcParams = dict()
for k in cfg.rc:
shell._saved_rcParams[k] = matplotlib.rcParams[k]
# load inline_rc
matplotlib.rcParams.update(cfg.rc)
new_backend_name = "inline"
else:
from ipykernel.pylab.backend_inline import flush_figures
try:
shell.events.unregister('post_execute', flush_figures)
except ValueError:
pass
if hasattr(shell, '_saved_rcParams'):
matplotlib.rcParams.update(shell._saved_rcParams)
del shell._saved_rcParams
new_backend_name = "other"
# only enable the formats once -> don't change the enabled formats (which the user may
# has changed) when getting another "%matplotlib inline" call.
# See https://github.com/ipython/ipykernel/issues/29
cur_backend = getattr(configure_inline_support, "current_backend", "unset")
if new_backend_name != cur_backend:
# Setup the default figure format
select_figure_formats(shell, cfg.figure_formats, **cfg.print_figure_kwargs)
configure_inline_support.current_backend = new_backend_name
| 35.643902 | 91 | 0.622212 |
f71401cefe5604f63cb273a6cb7d6715836fee70 | 2,146 | py | Python | fab_deploy/contrib/servers.py | samdolan/django-fab-deploy | 642e5cc319f811d4ee647d65388c85988ac887e2 | [
"Unlicense"
] | 1 | 2019-08-04T20:54:43.000Z | 2019-08-04T20:54:43.000Z | fab_deploy/contrib/servers.py | samdolan/django-fab-deploy | 642e5cc319f811d4ee647d65388c85988ac887e2 | [
"Unlicense"
] | null | null | null | fab_deploy/contrib/servers.py | samdolan/django-fab-deploy | 642e5cc319f811d4ee647d65388c85988ac887e2 | [
"Unlicense"
] | null | null | null | from .constants import ALL_ROLES, DB_ROLE, WEB_ROLE
from .database import setup_db
from .django import update_db, update_python_libs
from .nginx import stop_nginx, start_nginx
from .ssh import setup_ssh_key
from .supervisor import stop_supervisor, start_supervisor, update_supervisor
from .utils import get_ip
from .webserver import setup_web
from fabric.colors import green
from fabric.api import *
from .git import get_source
from .nginx import update_nginx
import cuisine
COMMON_PACKAGES = [
'subversion', 'mercurial', 'git-core', 'vim', 'python-dev', 'ufw',
'python-setuptools', 'htop', 'ntp', 'colordiff', 'python-software-properties',
'psmisc',
'libpq-dev', # postgres
]
@task
@roles(DB_ROLE)
@runs_once
def set_database_ip(interface='eth1'):
"""Set the ip of the database."""
env.db_ip = get_ip(interface)
@task
@roles(WEB_ROLE)
@runs_once
def set_web_server_ips(interface='eth1'):
"""Set the ips of the webservers."""
env.webserver_internal_ips = [get_ip(interface),]
@task
def set_port(port):
"""Set the port to use for ssh connections."""
env.port = port
@task
@roles(ALL_ROLES)
def setup_common():
"""Set common packages."""
print(green("Running setup_common.........."))
execute(setup_ssh_key)
cuisine.package_install(COMMON_PACKAGES, True)
sudo('yes | ufw enable')
sudo('ufw logging on')
sudo('ufw allow %(port)s' % env)
sudo('ufw limit ssh')
sudo('ufw default deny')
@task
@roles(WEB_ROLE)
def setup_run_dirs():
for d in (env.log_location, env.socket_location):
with settings(warn_only=True):
sudo('mkdir %s' % d)
sudo('chown -R %s: %s' % (env.deploy_user, d))
@task
def setup():
"""Setup the servers."""
execute(setup_db)
execute(setup_web)
execute(update)
@task
def update():
"""Update the servers w/the latest source code + migrations."""
execute(stop_supervisor)
execute(stop_nginx)
execute(get_source)
execute(update_python_libs)
execute(update_db)
execute(update_supervisor)
execute(update_nginx)
execute(start_supervisor)
execute(start_nginx)
| 23.582418 | 82 | 0.695247 |
f714129360782fdce2e19567a3deb3c965cf7a55 | 5,386 | py | Python | chapter2/intogen-arrays/src/biomart/ent_exp.py | chris-zen/phd-thesis | 1eefdff8e7ca1910304e27ae42551dc64496b101 | [
"Unlicense"
] | 1 | 2015-12-22T00:53:18.000Z | 2015-12-22T00:53:18.000Z | chapter2/intogen-arrays/src/biomart/ent_exp.py | chris-zen/phd-thesis | 1eefdff8e7ca1910304e27ae42551dc64496b101 | [
"Unlicense"
] | null | null | null | chapter2/intogen-arrays/src/biomart/ent_exp.py | chris-zen/phd-thesis | 1eefdff8e7ca1910304e27ae42551dc64496b101 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
"""
Import experiments into the database
* Configuration parameters:
- The ones required by intogen.data.entity.EntityManagerFactory
"""
from wok.task import Task
from wok.element import DataElementList
from intogen.data.entity import types
from intogen.data.entity.server import EntityServer
from intogen.biomart import biomart_db_connect, DEFAULT_INSERT_SIZE, DEFAULT_DB_ENGINE
from intogen.sql import BatchInsert
from pubmed import Pubmed
task = Task()
@task.main()
def main():
task.check_conf(["entities", "repositories", "biomart.db"])
conf = task.conf
insert_size = conf.get("biomart.insert_size", DEFAULT_INSERT_SIZE, dtype=int)
if "biomart.study_source" in conf:
study_source_map = conf["biomart.study_source"]
else:
study_source_map = conf.create_element()
log = task.logger()
exp_port = task.ports("experiment")
es = EntityServer(conf["entities"])
em = es.manager()
conn = biomart_db_connect(conf["biomart.db"], log)
db_engine = conf.get("biomart.db.engine", DEFAULT_DB_ENGINE)
cursor = conn.cursor()
cursor.execute("""
CREATE TABLE ent_experiment (
id int(11) NOT NULL,
exp_name varchar(64) NOT NULL,
study_id varchar(32) NOT NULL,
study_source varchar(32) DEFAULT NULL,
study_source_url varchar(512) DEFAULT NULL,
study_link varchar(512) DEFAULT NULL,
pub_pubmed varchar(32) DEFAULT NULL,
pub_title varchar(300) DEFAULT NULL,
pub_authors varchar(300) DEFAULT NULL,
pub_year varchar(16) DEFAULT NULL,
pub_journal varchar(200) DEFAULT NULL,
platf_id varchar(32) NOT NULL,
platf_title varchar(250) DEFAULT NULL,
platf_technology varchar(96) DEFAULT NULL,
PRIMARY KEY (id),
KEY exp_name (exp_name),
KEY pub_pubmed (pub_pubmed),
KEY pub_title (pub_title),
KEY pub_authors (pub_authors),
KEY pub_year (pub_year),
KEY pub_journal (pub_journal),
KEY platf_title (platf_title),
KEY platf_technology (platf_technology)
) ENGINE={} CHARACTER SET utf8 COLLATE utf8_general_ci""".format(db_engine))
ib = BatchInsert(cursor, "ent_experiment",
["id", "exp_name", "study_id", "study_source", "study_source_url", "study_link",
"pub_title", "pub_authors", "pub_year", "pub_pubmed", "pub_journal",
"platf_id", "platf_title", "platf_technology"], insert_size)
pubmed = Pubmed()
for i, exp in enumerate(exp_port, 1):
study_id = exp[0]
platform_id = exp[1]
study = em.find(study_id, types.SOURCE_STUDY)
if study is None:
log.error("{} not found: {}".format(types.SOURCE_STUDY, study_id))
continue
platf = em.find(platform_id, types.SOURCE_PLATFORM)
if platf is None:
log.error("{} not found: {}".format(types.SOURCE_PLATFORM, platform_id))
continue
log.info("Experiment for study {} and platform {} ...".format(study_id, platform_id))
pub = {}
for k in ["title", "short_authors", "date", "journal"]:
pub[k] = None
if "pubmed" in study:
pmid = study["pubmed"]
if isinstance(pmid, (DataElementList, list)):
pmid = pmid[0]
log.warn("Study {} with many pubmed_id's, only the first {} will be considered".format(study_id, pmid))
log.debug("Retrieving information for pubmed_id '{}' ...".format(pmid))
try:
pub = pubmed.find(pmid)
if len(pub) == 0:
log.error("No publication information found for pubmed_id '{}' in experiment ({}, {})".format(pmid, study_id, platform_id))
else:
pub = pub[0]
except Exception as ex:
log.error("Error retrieving pubmed information for experiment ({}, {}) with pubmed_id '{}'".format(study_id, platform_id, pmid))
log.exception(ex)
else:
pmid = None
log.warn("Study {} has no 'pubmed_id' annotation".format(study_id))
if "title" not in study:
log.error("Study {} doesn't have annotation for 'pubmed_id' nor 'title'".format(study_id))
elif "SO/contact_details[0]/contact_name" not in study \
and "SO/contact_details/contact_name" not in study:
log.error("Study {} doesn't have annotation for 'pubmed_id' nor 'SO.contact_details[0].contact_name'".format(study_id))
else:
try:
pub["title"] = study["title"]
if "SO/contact_details[0]/contact_name" in study:
pub["short_authors"] = study["SO/contact_details[0]/contact_name"]
else:
pub["short_authors"] = study["SO/contact_details/contact_name"]
if "SO/submission/pub_date" in study:
pub["date"] = study["SO/submission/pub_date"]
else:
pub["date"] = ""
except Exception as ex:
log.debug(study)
log.execption(ex)
for k, v in pub.items():
if v is not None and isinstance(v, basestring):
pub[k] = v.replace("'", r"\'")
exp_name = "{}; {}".format(study_id, platform_id)
study_source = None
study_source_url = None
study_link = None
parts = study_id.split("-")
if len(parts) >= 2 and parts[0] in study_source_map:
ss = study_source_map[parts[0]]
study_source = ss.get("name")
study_source_url = ss.get("home_url")
try:
study_link = ss.get("link", "").format(parts[1])
except:
pass
ib.insert(i, exp_name, study_id, study_source, study_source_url, study_link,
pub["title"], pub["short_authors"], pub["date"], pmid, pub["journal"],
platform_id, platf["SO/platform_title"], "")
log.debug("{} experiments inserted".format(ib.count))
ib.close()
cursor.close()
conn.close()
em.close()
es.close()
task.start()
| 30.602273 | 132 | 0.690123 |
f7142aa0459addf88df1549845b063ae44233e96 | 5,200 | py | Python | azure-devops/azext_devops/vstsCompressed/models/models.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | azure-devops/azext_devops/vstsCompressed/models/models.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | 37 | 2020-04-27T07:45:19.000Z | 2021-04-05T07:27:15.000Z | azure-devops/azext_devops/vstsCompressed/models/models.py | vijayraavi/azure-devops-cli-extension | 88f1420c5815cb09bea15b050f4c553e0f326dad | [
"MIT"
] | null | null | null | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class ApiResourceLocation(Model):
"""ApiResourceLocation.
"""
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'area': {'key': 'area', 'type': 'str'},
'resource_name': {'key': 'resourceName', 'type': 'str'},
'route_template': {'key': 'routeTemplate', 'type': 'str'},
'resource_version': {'key': 'resourceVersion', 'type': 'int'},
'min_version': {'key': 'minVersion', 'type': 'float'},
'max_version': {'key': 'maxVersion', 'type': 'float'},
'released_version': {'key': 'releasedVersion', 'type': 'str'},
}
def __init__(self, id=None, area=None, resource_name=None,
route_template=None, resource_version=None,
min_version=None, max_version=None,
released_version=None):
super(ApiResourceLocation, self).__init__()
self.id = id
self.area = area
self.resource_name = resource_name
self.route_template = route_template
self.resource_version = resource_version
self.min_version = min_version
self.max_version = max_version
self.released_version = released_version
class ImproperException(Model):
"""ImproperException.
:param message:
:type message: str
"""
_attribute_map = {
'message': {'key': 'Message', 'type': 'str'}
}
def __init__(self, message=None):
super(ImproperException, self).__init__()
self.message = message
class SystemException(Model):
"""SystemException.
:param class_name:
:type class_name: str
:param inner_exception:
:type inner_exception: :class:`SystemException <vsts.models.SystemException>`
:param message:
:type message: str
"""
_attribute_map = {
'class_name': {'key': 'ClassName', 'type': 'str'},
'message': {'key': 'Message', 'type': 'str'},
'inner_exception': {'key': 'InnerException', 'type': 'SystemException'}
}
def __init__(self, class_name=None, message=None, inner_exception=None):
super(SystemException, self).__init__()
self.class_name = class_name
self.message = message
self.inner_exception = inner_exception
class VssJsonCollectionWrapperBase(Model):
"""VssJsonCollectionWrapperBase.
:param count:
:type count: int
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'}
}
def __init__(self, count=None):
super(VssJsonCollectionWrapperBase, self).__init__()
self.count = count
class WrappedException(Model):
"""WrappedException.
:param exception_id:
:type exception_id: str
:param inner_exception:
:type inner_exception: :class:`WrappedException <vsts.models.WrappedException>`
:param message:
:type message: str
:param type_name:
:type type_name: str
:param type_key:
:type type_key: str
:param error_code:
:type error_code: int
:param event_id:
:type event_id: int
:param custom_properties:
:type custom_properties: dict
"""
_attribute_map = {
'exception_id': {'key': '$id', 'type': 'str'},
'inner_exception': {'key': 'innerException', 'type': 'WrappedException'},
'message': {'key': 'message', 'type': 'str'},
'type_name': {'key': 'typeName', 'type': 'str'},
'type_key': {'key': 'typeKey', 'type': 'str'},
'error_code': {'key': 'errorCode', 'type': 'int'},
'event_id': {'key': 'eventId', 'type': 'int'},
'custom_properties': {'key': 'customProperties', 'type': '{object}'}
}
def __init__(self, exception_id=None, inner_exception=None, message=None,
type_name=None, type_key=None, error_code=None, event_id=None, custom_properties=None):
super(WrappedException, self).__init__()
self.exception_id = exception_id
self.inner_exception = inner_exception
self.message = message
self.type_name = type_name
self.type_key = type_key
self.error_code = error_code
self.event_id = event_id
self.custom_properties = custom_properties
class VssJsonCollectionWrapper(VssJsonCollectionWrapperBase):
"""VssJsonCollectionWrapper.
:param count:
:type count: int
:param value:
:type value: object
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'value': {'key': 'value', 'type': 'object'}
}
def __init__(self, count=None, value=None):
super(VssJsonCollectionWrapper, self).__init__(count=count)
self.value = value
| 32.911392 | 104 | 0.59 |
f71434c24f3b7959298b19af49f4893c651e600c | 2,465 | py | Python | credoscript/adaptors/variationadaptor.py | tlb-lab/credoscript | 32bdf08d84703dc2062dae4df1a95587d36c3cf7 | [
"MIT"
] | null | null | null | credoscript/adaptors/variationadaptor.py | tlb-lab/credoscript | 32bdf08d84703dc2062dae4df1a95587d36c3cf7 | [
"MIT"
] | null | null | null | credoscript/adaptors/variationadaptor.py | tlb-lab/credoscript | 32bdf08d84703dc2062dae4df1a95587d36c3cf7 | [
"MIT"
] | null | null | null | from sqlalchemy.sql.expression import and_
from credoscript.mixins.base import paginate
class VariationAdaptor(object):
"""
"""
def __init__(self, dynamic=False, paginate=False, per_page=100):
self.query = Variation.query
self.dynamic = dynamic
self.paginate = paginate
self.per_page = per_page
def fetch_by_variation_id(self, variation_id):
"""
"""
return self.query.get(variation_id)
def fetch_by_variation_name(self, variation_name):
"""
"""
return self.query.filter_by(variation_name=variation_name).first()
@paginate
def fetch_all_by_res_map_id(self, res_map_id, *expr, **kwargs):
"""
"""
query = self.query.join('Variation2PDB')
query = query.filter(Variation2PDB.res_map_id==res_map_id)
return query
@paginate
def fetch_all_by_chain_id(self, chain_id, *expr, **kwargs):
"""
"""
query = self.query.join('Variation2PDB')
query = query.join(Peptide, Peptide.res_map_id==Variation2PDB.res_map_id)
query = query.filter(and_(Peptide.chain_id==chain_id, *expr))
return query
@paginate
def fetch_all_ext_by_chain_id(self, chain_id, *expr, **kwargs):
"""
"""
query = self.query.join('Variation2UniProt','Variation2PDB','Peptide')
query = query.filter(and_(Peptide.chain_id==chain_id, *expr))
query = query.add_entity(Variation2UniProt)
query = query.add_entity(Peptide)
return query
@paginate
def fetch_all_by_phenotype_id(self, phenotype_id, *expr, **kwargs):
"""
"""
query = self.query.join('Annotations')
query = query.filter(and_(Annotation.phenotype_id==phenotype_id, *expr))
query = query.distinct()
return query
@paginate
def fetch_all_in_contact_with_ligand_id(self, ligand_id, *expr, **kwargs):
"""
Returns all variations that can be mapped onto binding sites defined by
the ligand having the input ligand identifier.
"""
query = self.query.join('Variation2BindingSites')
query = query.filter(and_(Variation2BindingSite.ligand_id==ligand_id,
*expr))
return query.distinct()
from ..models.variation import Variation, Annotation, Variation2UniProt, Variation2PDB, Variation2BindingSite
from ..models.peptide import Peptide
| 31.602564 | 109 | 0.643813 |
f7147793a2e6c2dd68fdd7d5efb9db0e5d179701 | 14,417 | py | Python | mi/dataset/dataset_parser.py | rmanoni/mi-dataset | c1012a0cd8f2ea075e008cdd1ab291ed54f44d43 | [
"BSD-2-Clause"
] | null | null | null | mi/dataset/dataset_parser.py | rmanoni/mi-dataset | c1012a0cd8f2ea075e008cdd1ab291ed54f44d43 | [
"BSD-2-Clause"
] | null | null | null | mi/dataset/dataset_parser.py | rmanoni/mi-dataset | c1012a0cd8f2ea075e008cdd1ab291ed54f44d43 | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
"""
@package mi.dataset.parser A collection of parsers that strip data blocks
out of files and feed them into the system.
@file mi/dataset/parser.py
@author Steve Foley
@brief Base classes for data set agent parsers
"""
__author__ = 'Steve Foley'
__license__ = 'Apache 2.0'
import time
import ntplib
from mi.core.log import get_logger
log = get_logger()
from mi.core.instrument.chunker import StringChunker
from mi.core.instrument.data_particle import DataParticleKey
from mi.core.exceptions import RecoverableSampleException, SampleEncodingException
from mi.core.exceptions import NotImplementedException, UnexpectedDataException
from mi.core.common import BaseEnum
class DataSetDriverConfigKeys(BaseEnum):
PARTICLE_MODULE = "particle_module"
PARTICLE_CLASS = "particle_class"
PARTICLE_CLASSES_DICT = "particle_classes_dict"
DIRECTORY = "directory"
STORAGE_DIRECTORY = "storage_directory"
PATTERN = "pattern"
FREQUENCY = "frequency"
FILE_MOD_WAIT_TIME = "file_mod_wait_time"
HARVESTER = "harvester"
PARSER = "parser"
MODULE = "module"
CLASS = "class"
URI = "uri"
CLASS_ARGS = "class_args"
class Parser(object):
""" abstract class to show API needed for plugin poller objects """
def __init__(self, config, stream_handle, state, sieve_fn,
state_callback, publish_callback, exception_callback=None):
"""
@param config The configuration parameters to feed into the parser
@param stream_handle An already open file-like filehandle
@param state The location in the file to start parsing from.
This reflects what has already been published.
@param sieve_fn A sieve function that might be added to a handler
to appropriate filter out the data
@param state_callback The callback method from the agent driver
(ultimately the agent) to call back when a state needs to be
updated
@param publish_callback The callback from the agent driver (and
ultimately from the agent) where we send our sample particle to
be published into ION
@param exception_callback The callback from the agent driver (and
ultimately from the agent) where we send our error events to
be published into ION
"""
self._chunker = StringChunker(sieve_fn)
self._stream_handle = stream_handle
self._state = state
self._state_callback = state_callback
self._publish_callback = publish_callback
self._exception_callback = exception_callback
self._config = config
# Build class from module and class name, then set the state
if config.get(DataSetDriverConfigKeys.PARTICLE_CLASS) is not None:
if config.get(DataSetDriverConfigKeys.PARTICLE_MODULE):
self._particle_module = __import__(config.get(DataSetDriverConfigKeys.PARTICLE_MODULE),
fromlist=[config.get(DataSetDriverConfigKeys.PARTICLE_CLASS)])
# if there is more than one particle class for this parser, this cannot be used, need to hard code the
# particle class in the driver
try:
self._particle_class = getattr(self._particle_module,
config.get(DataSetDriverConfigKeys.PARTICLE_CLASS))
except TypeError:
self._particle_class = None
else:
log.warn("Particle class is specified in config, but no particle module is specified in config")
def get_records(self, max_count):
"""
Returns a list of particles (following the instrument driver structure).
"""
raise NotImplementedException("get_records() not overridden!")
def _publish_sample(self, samples):
"""
Publish the samples with the given publishing callback.
@param samples The list of data particle to publish up to the system
"""
if isinstance(samples, list):
self._publish_callback(samples)
else:
self._publish_callback([samples])
def _extract_sample(self, particle_class, regex, raw_data, timestamp):
"""
Extract sample from a response line if present and publish
parsed particle
@param particle_class The class to instantiate for this specific
data particle. Parameterizing this allows for simple, standard
behavior from this routine
@param regex The regular expression that matches a data sample if regex
is none then process every line
@param raw_data data to input into this particle.
@retval return a raw particle if a sample was found, else None
"""
particle = None
try:
if regex is None or regex.match(raw_data):
particle = particle_class(raw_data, internal_timestamp=timestamp,
preferred_timestamp=DataParticleKey.INTERNAL_TIMESTAMP)
# need to actually parse the particle fields to find out of there are errors
particle.generate()
encoding_errors = particle.get_encoding_errors()
if encoding_errors:
log.warn("Failed to encode: %s", encoding_errors)
raise SampleEncodingException("Failed to encode: %s" % encoding_errors)
except (RecoverableSampleException, SampleEncodingException) as e:
log.error("Sample exception detected: %s raw data: %s", e, raw_data)
if self._exception_callback:
self._exception_callback(e)
else:
raise e
return particle
class BufferLoadingParser(Parser):
"""
This class loads data values into a record buffer, then offers up
records from this buffer as they are requested. Parsers dont have
to operate this way, but it can keep memory in check and smooth out
stream inputs if they dont all come at once.
"""
def __init__(self, config, stream_handle, state, sieve_fn,
state_callback, publish_callback, exception_callback=None):
"""
@param config The configuration parameters to feed into the parser
@param stream_handle An already open file-like filehandle
@param state The location in the file to start parsing from.
This reflects what has already been published.
@param sieve_fn A sieve function that might be added to a handler
to appropriate filter out the data
@param state_callback The callback method from the agent driver
(ultimately the agent) to call back when a state needs to be
updated
@param publish_callback The callback from the agent driver (and
ultimately from the agent) where we send our sample particle to
be published into ION
@param exception_callback The callback from the agent driver (and
ultimately from the agent) where we send our error events to
be published into ION
"""
self._record_buffer = []
self._timestamp = 0.0
self.file_complete = False
super(BufferLoadingParser, self).__init__(config, stream_handle, state,
sieve_fn, state_callback,
publish_callback,
exception_callback)
def get_records(self, num_records):
"""
Go ahead and execute the data parsing loop up to a point. This involves
getting data from the file, stuffing it in to the chunker, then parsing
it and publishing.
@param num_records The number of records to gather
@retval Return the list of particles requested, [] if none available
"""
if num_records <= 0:
return []
try:
while len(self._record_buffer) < num_records:
self._load_particle_buffer()
except EOFError:
self._process_end_of_file()
return self._yank_particles(num_records)
def _process_end_of_file(self):
"""
Confirm that the chunker does not have any extra bytes left at the end of the file
"""
(nd_timestamp, non_data) = self._chunker.get_next_non_data()
(timestamp, chunk) = self._chunker.get_next_data()
if non_data and len(non_data) > 0:
log.warn("Have extra unexplained non-data bytes at the end of the file:%s", non_data)
raise UnexpectedDataException("Have extra unexplained non-data bytes at the end of the file:%s" % non_data)
elif chunk and len(chunk) > 0:
log.warn("Have extra unexplained data chunk bytes at the end of the file:%s", chunk)
raise UnexpectedDataException("Have extra unexplained data chunk bytes at the end of the file:%s" % chunk)
def _yank_particles(self, num_records):
"""
Get particles out of the buffer and publish them. Update the state
of what has been published, too.
@param num_records The number of particles to remove from the buffer
@retval A list with num_records elements from the buffer. If num_records
cannot be collected (perhaps due to an EOF), the list will have the
elements it was able to collect.
"""
if len(self._record_buffer) < num_records:
num_to_fetch = len(self._record_buffer)
else:
num_to_fetch = num_records
log.trace("Yanking %s records of %s requested",
num_to_fetch,
num_records)
return_list = []
records_to_return = self._record_buffer[:num_to_fetch]
self._record_buffer = self._record_buffer[num_to_fetch:]
if len(records_to_return) > 0:
self._state = records_to_return[-1][1] # state side of tuple of last entry
# strip the state info off of them now that we have what we need
for item in records_to_return:
log.debug("Record to return: %s", item)
return_list.append(item[0])
self._publish_sample(return_list)
log.trace("Sending parser state [%s] to driver", self._state)
file_ingested = False
if self.file_complete and len(self._record_buffer) == 0:
# file has been read completely and all records pulled out of the record buffer
file_ingested = True
self._state_callback(self._state, file_ingested) # push new state to driver
return return_list
def _load_particle_buffer(self):
"""
Load up the internal record buffer with some particles based on a
gather from the get_block method.
"""
while self.get_block():
result = self.parse_chunks()
self._record_buffer.extend(result)
def get_block(self, size=1024):
"""
Get a block of characters for processing
@param size The size of the block to try to read
@retval The length of data retreived
@throws EOFError when the end of the file is reached
"""
# read in some more data
data = self._stream_handle.read(size)
if data:
self._chunker.add_chunk(data, ntplib.system_to_ntp_time(time.time()))
return len(data)
else: # EOF
self.file_complete = True
raise EOFError
def parse_chunks(self):
"""
Parse out any pending data chunks in the chunker. If
it is a valid data piece, build a particle, update the position and
timestamp. Go until the chunker has no more valid data.
@retval a list of tuples with sample particles encountered in this
parsing, plus the state (ie "(sample, state)"). An empty list of
nothing was parsed.
"""
raise NotImplementedException("Must write parse_chunks()!")
class SimpleParser(Parser):
def __init__(self, config, stream_handle, exception_callback):
"""
Initialize the simple parser, which does not use state or the chunker
and sieve functions.
@param config: The parser configuration dictionary
@param stream_handle: The stream handle of the file to parse
@param exception_callback: The callback to use when an exception occurs
"""
# the record buffer which will store all parsed particles
self._record_buffer = []
# a flag indicating if the file has been parsed or not
self._file_parsed = False
super(SimpleParser, self).__init__(config,
stream_handle,
None, # state not used
None, # sieve_fn not used
None, # state_callback not used
None, # publish_callback not used
exception_callback)
def parse_file(self):
"""
This method must be overridden. This method should open and read the file and parser the data within, and at
the end of this method self._record_buffer will be filled with all the particles in the file.
"""
raise NotImplementedException("parse_file() not overridden!")
def get_records(self, number_requested=1):
"""
Initiate parsing the file if it has not been done already, and pop particles off the record buffer to
return as many as requested if they are available in the buffer.
@param number_requested the number of records requested to be returned
@return an array of particles, with a length of the number requested or less
"""
particles_to_return = []
if number_requested > 0:
if self._file_parsed is False:
self.parse_file()
self._file_parsed = True
while len(particles_to_return) < number_requested and len(self._record_buffer) > 0:
particles_to_return.append(self._record_buffer.pop(0))
return particles_to_return
| 43.820669 | 119 | 0.634667 |
f714d5a168b4a464f6eba8acff23787cdd077327 | 4,848 | py | Python | datasets/W300.py | HapKoM/pyhowfar | b12c248f696dc9bc2b50455b63a2b6ca7a440ba7 | [
"BSD-3-Clause"
] | null | null | null | datasets/W300.py | HapKoM/pyhowfar | b12c248f696dc9bc2b50455b63a2b6ca7a440ba7 | [
"BSD-3-Clause"
] | null | null | null | datasets/W300.py | HapKoM/pyhowfar | b12c248f696dc9bc2b50455b63a2b6ca7a440ba7 | [
"BSD-3-Clause"
] | null | null | null | from __future__ import print_function
import os
import numpy as np
import random
import math
from skimage import io
import torch
import torch.utils.data as data
import torchfile
# from utils.utils import *
from utils.imutils import *
from utils.transforms import *
class W300(data.Dataset):
def __init__(self, args, split):
self.nParts = 68
self.pointType = args.pointType
# self.anno = anno
self.img_folder = args.data
self.split = split
self.is_train = True if self.split == 'train' else False
self.anno = self._getDataFaces(self.is_train)
self.total = len(self.anno)
self.scale_factor = args.scale_factor
self.rot_factor = args.rot_factor
self.mean, self.std = self._comput_mean()
def _getDataFaces(self, is_train):
base_dir = self.img_folder
dirs = os.listdir(base_dir)
lines = []
vallines = []
if is_train:
fid = open(os.path.join(base_dir, 'train.txt'), 'r')
for line in fid.readlines():
lines.append(line.strip())
fid.close()
else:
fid = open(os.path.join(base_dir, 'test.txt'), 'r')
for line in fid.readlines():
vallines.append(line.strip())
fid.close()
if is_train:
print('=> loaded train set, {} images were found'.format(len(lines)))
return lines
else:
print('=> loaded validation set, {} images were found'.format(len(vallines)))
return vallines
def __len__(self):
return self.total
def __getitem__(self, index):
inp, out, pts, c, s = self.generateSampleFace(index)
self.pts, self.c, self.s = pts, c, s
if self.is_train:
return inp, out
else:
meta = {'index': index, 'center': c, 'scale': s, 'pts': pts,}
return inp, out, meta
def generateSampleFace(self, idx):
sf = self.scale_factor
rf = self.rot_factor
main_pts = torchfile.load(
os.path.join(self.img_folder, 'landmarks', self.anno[idx].split('_')[0],
self.anno[idx][:-4] + '.t7'))
pts = main_pts[0] if self.pointType == '2D' else main_pts[1]
c = torch.Tensor((450 / 2, 450 / 2 + 50))
s = 1.8
img = load_image(
os.path.join(self.img_folder, self.anno[idx].split('_')[0], self.anno[idx][:-8] +
'.jpg'))
r = 0
if self.is_train:
s = s * torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0]
r = torch.randn(1).mul_(rf).clamp(-2 * rf, 2 * rf)[0] if random.random() <= 0.6 else 0
if random.random() <= 0.5:
img = torch.from_numpy(fliplr(img.numpy())).float()
pts = shufflelr(pts, width=img.size(2), dataset='w300lp')
c[0] = img.size(2) - c[0]
img[0, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[1, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
img[2, :, :].mul_(random.uniform(0.7, 1.3)).clamp_(0, 1)
inp = crop(img, c, s, [256, 256], rot=r)
inp = color_normalize(inp, self.mean, self.std)
tpts = pts.clone()
out = torch.zeros(self.nParts, 64, 64)
for i in range(self.nParts):
if tpts[i, 0] > 0:
tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2] + 1, c, s, [64, 64], rot=r))
out[i] = draw_labelmap(out[i], tpts[i] - 1, sigma=1)
return inp, out, pts, c, s
def _comput_mean(self):
meanstd_file = './data/300W_LP/mean.pth.tar'
if os.path.isfile(meanstd_file):
ms = torch.load(meanstd_file)
else:
print("\tcomputing mean and std for the first time, it may takes a while, drink a cup of coffe...")
mean = torch.zeros(3)
std = torch.zeros(3)
if self.is_train:
for i in range(self.total):
a = self.anno[i]
img_path = os.path.join(self.img_folder, self.anno[i].split('_')[0],
self.anno[i][:-8] + '.jpg')
img = load_image(img_path)
mean += img.view(img.size(0), -1).mean(1)
std += img.view(img.size(0), -1).std(1)
mean /= self.total
std /= self.total
ms = {
'mean': mean,
'std': std,
}
torch.save(ms, meanstd_file)
if self.is_train:
print('\tMean: %.4f, %.4f, %.4f' % (ms['mean'][0], ms['mean'][1], ms['mean'][2]))
print('\tStd: %.4f, %.4f, %.4f' % (ms['std'][0], ms['std'][1], ms['std'][2]))
return ms['mean'], ms['std']
| 35.130435 | 111 | 0.514233 |
f714e1119b8f7e34f516de3746a674c249a5f780 | 969 | py | Python | experiments/2014-01-28-extrap-SE.py | jaesikchoi/gpss-research | 2a64958a018f1668f7b8eedf33c4076a63af7868 | [
"MIT"
] | 151 | 2015-01-09T19:25:05.000Z | 2022-01-05T02:05:52.000Z | experiments/2014-01-28-extrap-SE.py | jaesikchoi/gpss-research | 2a64958a018f1668f7b8eedf33c4076a63af7868 | [
"MIT"
] | 1 | 2016-08-04T13:12:51.000Z | 2016-08-04T13:12:51.000Z | experiments/2014-01-28-extrap-SE.py | jaesikchoi/gpss-research | 2a64958a018f1668f7b8eedf33c4076a63af7868 | [
"MIT"
] | 59 | 2015-02-04T19:13:58.000Z | 2021-07-28T23:36:09.000Z | Experiment(description='SE extrapolation experiment',
data_dir='../data/tsdlr_9010/',
max_depth=1,
random_order=False,
k=1,
debug=False,
local_computation=False,
n_rand=9,
sd=2,
jitter_sd=0.1,
max_jobs=1000,
verbose=False,
make_predictions=True,
skip_complete=True,
results_dir='../results/2014-01-28-extrap-SE/',
iters=250,
base_kernels='SE',
random_seed=1,
subset=True,
subset_size=250,
full_iters=10,
bundle_size=5,
additive_form=True,
mean='ff.MeanZero()', # Starting mean
kernel='ff.NoiseKernel()', # Starting kernel
lik='ff.LikGauss(sf=-np.Inf)', # Starting likelihood
score='bic',
search_operators=[('A', ('+', 'A', 'B'), {'A': 'kernel', 'B': 'base'})])
| 33.413793 | 83 | 0.495356 |
f71503d83257c56d9a08f215294410fe3f0189c1 | 4,679 | py | Python | venv/Lib/site-packages/pyrogram/parser/markdown.py | D1ne2021/jjhhhjj | a090da30983b3ef276dfe4cef2ded4526f36002a | [
"MIT"
] | 2 | 2021-12-13T07:09:55.000Z | 2022-01-12T12:15:20.000Z | venv/Lib/site-packages/pyrogram/parser/markdown.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | venv/Lib/site-packages/pyrogram/parser/markdown.py | hoangkiet1906/Botcie_ver1 | c133b915edde06dac690a7dc6ca160f6792fc4c8 | [
"MIT"
] | null | null | null | # Pyrogram - Telegram MTProto API Client Library for Python
# Copyright (C) 2017-2021 Dan <https://github.com/delivrance>
#
# This file is part of Pyrogram.
#
# Pyrogram is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Pyrogram is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Pyrogram. If not, see <http://www.gnu.org/licenses/>.
import html
import re
from typing import Optional
import pyrogram
from . import utils
from .html import HTML
BOLD_DELIM = "**"
ITALIC_DELIM = "__"
UNDERLINE_DELIM = "--"
STRIKE_DELIM = "~~"
CODE_DELIM = "`"
PRE_DELIM = "```"
MARKDOWN_RE = re.compile(r"({d})|\[(.+?)\]\((.+?)\)".format(
d="|".join(
["".join(i) for i in [
[rf"\{j}" for j in i]
for i in [
PRE_DELIM,
CODE_DELIM,
STRIKE_DELIM,
UNDERLINE_DELIM,
ITALIC_DELIM,
BOLD_DELIM
]
]]
)))
OPENING_TAG = "<{}>"
CLOSING_TAG = "</{}>"
URL_MARKUP = '<a href="{}">{}</a>'
FIXED_WIDTH_DELIMS = [CODE_DELIM, PRE_DELIM]
class Markdown:
def __init__(self, client: Optional["pyrogram.Client"]):
self.html = HTML(client)
async def parse(self, text: str, strict: bool = False):
if strict:
text = html.escape(text)
delims = set()
is_fixed_width = False
for i, match in enumerate(re.finditer(MARKDOWN_RE, text)):
start, _ = match.span()
delim, text_url, url = match.groups()
full = match.group(0)
if delim in FIXED_WIDTH_DELIMS:
is_fixed_width = not is_fixed_width
if is_fixed_width and delim not in FIXED_WIDTH_DELIMS:
continue
if text_url:
text = utils.replace_once(text, full, URL_MARKUP.format(url, text_url), start)
continue
if delim == BOLD_DELIM:
tag = "b"
elif delim == ITALIC_DELIM:
tag = "i"
elif delim == UNDERLINE_DELIM:
tag = "u"
elif delim == STRIKE_DELIM:
tag = "s"
elif delim == CODE_DELIM:
tag = "code"
elif delim == PRE_DELIM:
tag = "pre"
else:
continue
if delim not in delims:
delims.add(delim)
tag = OPENING_TAG.format(tag)
else:
delims.remove(delim)
tag = CLOSING_TAG.format(tag)
text = utils.replace_once(text, delim, tag, start)
return await self.html.parse(text)
@staticmethod
def unparse(text: str, entities: list):
text = utils.add_surrogates(text)
entities_offsets = []
for entity in entities:
entity_type = entity.type
start = entity.offset
end = start + entity.length
if entity_type == "bold":
start_tag = end_tag = BOLD_DELIM
elif entity_type == "italic":
start_tag = end_tag = ITALIC_DELIM
elif entity_type == "underline":
start_tag = end_tag = UNDERLINE_DELIM
elif entity_type == "strikethrough":
start_tag = end_tag = STRIKE_DELIM
elif entity_type == "code":
start_tag = end_tag = CODE_DELIM
elif entity_type in ("pre", "blockquote"):
start_tag = end_tag = PRE_DELIM
elif entity_type == "text_link":
url = entity.url
start_tag = "["
end_tag = f"]({url})"
elif entity_type == "text_mention":
user = entity.user
start_tag = "["
end_tag = f"](tg://user?id={user.id})"
else:
continue
entities_offsets.append((start_tag, start,))
entities_offsets.append((end_tag, end,))
# sorting by offset (desc)
entities_offsets.sort(key=lambda x: -x[1])
for entity, offset in entities_offsets:
text = text[:offset] + entity + text[offset:]
return utils.remove_surrogates(text)
| 30.986755 | 94 | 0.551186 |
f7151ade5974d9fd42771cf7639194622d837538 | 5,015 | py | Python | src/phoebe_shelves_clt/manage.py | anthony-agbay/owl_shelves_clt | da09e1579f8d134a585b50de2f8da38c889c23b9 | [
"MIT"
] | 1 | 2021-05-04T03:06:13.000Z | 2021-05-04T03:06:13.000Z | src/phoebe_shelves_clt/manage.py | anthony-agbay/phoebe-shelves-clt | da09e1579f8d134a585b50de2f8da38c889c23b9 | [
"MIT"
] | null | null | null | src/phoebe_shelves_clt/manage.py | anthony-agbay/phoebe-shelves-clt | da09e1579f8d134a585b50de2f8da38c889c23b9 | [
"MIT"
] | null | null | null | """ Launching point and supporting functions for database management tools.
This module serves as the launching point for the database management tools.
Backend-specific implementations are located within their specific modules and
common functions and methods are included in this file.
"""
import numpy as np
from typing import Tuple, Dict
from phoebe_shelves_clt.csv_backend import manage_csv
from phoebe_shelves_clt.sql_backend import manage_sql
from phoebe_shelves_clt.utils import data_model
from phoebe_shelves_clt.utils import sql_api
def prompt_for_rating(prompt: str):
"""Prompt user for an integer rating (max 5).
Args:
prompt: Prompt that user sees on the command line
Outputs:
rating (int | float): Intger rating or np.nan if empty string is passed
"""
rating = input(prompt)
while rating not in {"", "1", "2", "3", "4", "5"}:
rating = input("Choose an integer between 1 and 5 or leave blank: ")
# Format rating
rating = int(rating) if rating != "" else np.nan
return(rating)
def prompt_for_title(backend: str, *args) -> Tuple[str, Dict[str, int]]:
""" Prompt for a title from the books table and return the title and ID
Prompts the user to provide a title and returns the title and ID of any
books that match the title *exactly*.
Args:
backend: Backend to use
Positional Args:
(CSVDataModel): Current instance of the CSV backend database
(psycopg2.connection): Connection to the PostgreSQL database
Returns:
A tuple containing the following:
title: Title of the book provided by the user
title_results: Dictionary mapping possible titles to their ID's
"""
title = input("Please enter the book title: ")
if backend == "csv":
title_results = args[0].get_books_dict(title)
else:
query = f"SELECT title, id FROM books WHERE title ILIKE '{title}'"
title_results = dict(sql_api.execute_query(args[0], query,
"to_list")) # type: ignore
return(title, title_results)
def prompt_for_author(backend: str, *args) -> Tuple[str, Dict]:
""" Prompt for an author from the authors table and return the name and ID
Prompts the user to provide an author's last name and returns the names
and ID's of possible matches based on the last name.
Args:
backend: Backend to use
Positional Args:
(CSVDataModel): Current instance of the CSV backend database
(psycopg2.connection): Connection to the PostgreSQL database
Returns:
A tuple containing the following:
last_name: Last name provided by the user
author_results: Dictionary mapping possible authors to their ID's
"""
last_name = input("Please enter the author's last name: ")
if backend == "csv":
author_results = args[0].get_authors_dict(last_name)
else:
author_query = (sql_api.read_query('author_filter').format(last_name))
author_results = dict(sql_api.execute_query(args[0], author_query,
"to_list")) # type: ignore
return(last_name, author_results)
def prompt_for_genre(backend: str, *args) -> Tuple[str, Dict]:
""" Prompt for an genre from the genres table and return the name and ID
Prompts user to enter a genre name. It then retrieves the potential
matching options for further processing.
Args:
backend: Backend to use
Positional Args:
(CSVDataModel): Current instance of the CSV backend database
(psycopg2.connection): Connection to the PostgreSQL database
Returns:
A tuple containing the following:
genre_name: Genre name provided by the user
genreresults: Dictionary mapping possible genres to their ID's
"""
genre_name = input("Please enter the genre name: ")
if backend == "csv":
genre_results = args[0].get_genres_dict(genre_name)
else:
genre_query = f"SELECT name, id from genres where name ilike '{genre_name}'"
genre_results = dict(sql_api.execute_query(args[0], genre_query,
"to_list")) # type: ignore
return(genre_name, genre_results)
def manage_module(backend: str, db_select: str, mode: str, **kwargs):
""" Launch management workflows for either backend
Launch the mangement workflows for either the CSV or SQL backends
Args:
backend: Backend to use
db_select: Database to manage
mode: Management mode
Keyword Args:
data_directory (string): Path to CSV backend data directory
sql_configs (Dict): SQL server configurations
"""
if backend == "csv":
model = data_model.CSVDataModel(kwargs["data_directory"])
manage_csv.main(db_select, mode, model)
else:
manage_sql.main(db_select, mode, kwargs["sql_configs"]) | 36.079137 | 84 | 0.664008 |
f7152949331934bec0c7d5505f3422644b6d6f4e | 114,228 | gyp | Python | grpc.gyp | stungkit/grpc | 063c36cb46733c13d2ce8116b6af482c9bd832d6 | [
"Apache-2.0"
] | null | null | null | grpc.gyp | stungkit/grpc | 063c36cb46733c13d2ce8116b6af482c9bd832d6 | [
"Apache-2.0"
] | null | null | null | grpc.gyp | stungkit/grpc | 063c36cb46733c13d2ce8116b6af482c9bd832d6 | [
"Apache-2.0"
] | null | null | null | # GRPC GYP build file
# This file has been automatically generated from a template file.
# Please look at the templates directory instead.
# This file can be regenerated from the template by running
# tools/buildgen/generate_projects.sh
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
{
'variables': {
# The openssl and zlib dependencies must be passed in as variables
# defined in an included gypi file, usually common.gypi.
'openssl_gyp_target%': 'Please Define openssl_gyp_target variable',
'zlib_gyp_target%': 'Please Define zlib_gyp_target variable',
'grpc_gcov%': 'false',
'grpc_alpine%': 'false',
},
'target_defaults': {
'configurations': {
'Debug': {
'cflags': [
'-O0',
],
'defines': [
'_DEBUG',
'DEBUG',
],
},
'Release': {
'cflags': [
'-O2',
'-Wframe-larger-than=16384',
],
'defines': [
'NDEBUG',
],
},
},
'cflags': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
],
'ldflags': [
'-g',
],
'cflags_c': [
'-Werror',
'-std=c99',
],
'cflags_cc': [
'-Werror',
'-std=c++11',
],
'include_dirs': [
'.',
'../..',
'include',
],
'defines': [
'GRPC_ARES=0',
],
'dependencies': [
'<(openssl_gyp_target)',
'<(zlib_gyp_target)',
],
'conditions': [
['grpc_gcov=="true"', {
'cflags': [
'-O0',
'-fprofile-arcs',
'-ftest-coverage',
'-Wno-return-type',
],
'defines': [
'_DEBUG',
'DEBUG',
'GPR_GCOV',
],
'ldflags': [
'-fprofile-arcs',
'-ftest-coverage',
'-rdynamic',
'-lstdc++',
],
}],
['grpc_alpine=="true"', {
'defines': [
'GPR_MUSL_LIBC_COMPAT'
]
}],
['OS == "win"', {
'defines': [
'_WIN32_WINNT=0x0600',
'WIN32_LEAN_AND_MEAN',
'_HAS_EXCEPTIONS=0',
'UNICODE',
'_UNICODE',
'NOMINMAX',
],
'msvs_settings': {
'VCCLCompilerTool': {
'RuntimeLibrary': 1, # static debug
}
},
"libraries": [
"ws2_32"
]
}],
['OS == "mac"', {
'xcode_settings': {
'OTHER_CFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
],
'OTHER_CPLUSPLUSFLAGS': [
'-g',
'-Wall',
'-Wextra',
'-DOSATOMIC_USE_INLINED=1',
'-Ithird_party/abseil-cpp',
'-Ithird_party/re2',
'-Ithird_party/upb',
'-Isrc/core/ext/upb-generated',
'-Isrc/core/ext/upbdefs-generated',
'-Ithird_party/xxhash',
'-stdlib=libc++',
'-std=c++11',
'-Wno-error=deprecated-declarations',
],
},
}]
]
},
'targets': [
{
'target_name': 'address_sorting',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/address_sorting/address_sorting.c',
'third_party/address_sorting/address_sorting_posix.c',
'third_party/address_sorting/address_sorting_windows.c',
],
},
{
'target_name': 'end2end_tests',
'type': 'static_library',
'dependencies': [
'grpc_test_util',
],
'sources': [
'src/core/lib/security/authorization/grpc_authorization_policy_provider.cc',
'src/core/lib/security/authorization/rbac_translator.cc',
'test/core/compression/args_utils.cc',
'test/core/end2end/cq_verifier.cc',
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/core/end2end/end2end_test_utils.cc',
'test/core/end2end/end2end_tests.cc',
'test/core/end2end/fixtures/http_proxy_fixture.cc',
'test/core/end2end/fixtures/local_util.cc',
'test/core/end2end/fixtures/proxy.cc',
'test/core/end2end/tests/authority_not_supported.cc',
'test/core/end2end/tests/bad_hostname.cc',
'test/core/end2end/tests/bad_ping.cc',
'test/core/end2end/tests/binary_metadata.cc',
'test/core/end2end/tests/call_creds.cc',
'test/core/end2end/tests/call_host_override.cc',
'test/core/end2end/tests/cancel_after_accept.cc',
'test/core/end2end/tests/cancel_after_client_done.cc',
'test/core/end2end/tests/cancel_after_invoke.cc',
'test/core/end2end/tests/cancel_after_round_trip.cc',
'test/core/end2end/tests/cancel_before_invoke.cc',
'test/core/end2end/tests/cancel_in_a_vacuum.cc',
'test/core/end2end/tests/cancel_with_status.cc',
'test/core/end2end/tests/channelz.cc',
'test/core/end2end/tests/client_streaming.cc',
'test/core/end2end/tests/compressed_payload.cc',
'test/core/end2end/tests/connectivity.cc',
'test/core/end2end/tests/default_host.cc',
'test/core/end2end/tests/disappearing_server.cc',
'test/core/end2end/tests/empty_batch.cc',
'test/core/end2end/tests/filter_causes_close.cc',
'test/core/end2end/tests/filter_context.cc',
'test/core/end2end/tests/filter_init_fails.cc',
'test/core/end2end/tests/filter_latency.cc',
'test/core/end2end/tests/filter_status_code.cc',
'test/core/end2end/tests/filtered_metadata.cc',
'test/core/end2end/tests/graceful_server_shutdown.cc',
'test/core/end2end/tests/grpc_authz.cc',
'test/core/end2end/tests/high_initial_seqno.cc',
'test/core/end2end/tests/hpack_size.cc',
'test/core/end2end/tests/invoke_large_request.cc',
'test/core/end2end/tests/keepalive_timeout.cc',
'test/core/end2end/tests/large_metadata.cc',
'test/core/end2end/tests/max_concurrent_streams.cc',
'test/core/end2end/tests/max_connection_age.cc',
'test/core/end2end/tests/max_connection_idle.cc',
'test/core/end2end/tests/max_message_length.cc',
'test/core/end2end/tests/negative_deadline.cc',
'test/core/end2end/tests/no_error_on_hotpath.cc',
'test/core/end2end/tests/no_logging.cc',
'test/core/end2end/tests/no_op.cc',
'test/core/end2end/tests/payload.cc',
'test/core/end2end/tests/ping.cc',
'test/core/end2end/tests/ping_pong_streaming.cc',
'test/core/end2end/tests/proxy_auth.cc',
'test/core/end2end/tests/registered_call.cc',
'test/core/end2end/tests/request_with_flags.cc',
'test/core/end2end/tests/request_with_payload.cc',
'test/core/end2end/tests/resource_quota_server.cc',
'test/core/end2end/tests/retry.cc',
'test/core/end2end/tests/retry_cancel_after_first_attempt_starts.cc',
'test/core/end2end/tests/retry_cancel_during_delay.cc',
'test/core/end2end/tests/retry_cancel_with_multiple_send_batches.cc',
'test/core/end2end/tests/retry_cancellation.cc',
'test/core/end2end/tests/retry_disabled.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_delay.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_initial_batch.cc',
'test/core/end2end/tests/retry_exceeds_buffer_size_in_subsequent_batch.cc',
'test/core/end2end/tests/retry_lb_drop.cc',
'test/core/end2end/tests/retry_lb_fail.cc',
'test/core/end2end/tests/retry_non_retriable_status.cc',
'test/core/end2end/tests/retry_non_retriable_status_before_recv_trailing_metadata_started.cc',
'test/core/end2end/tests/retry_per_attempt_recv_timeout.cc',
'test/core/end2end/tests/retry_per_attempt_recv_timeout_on_last_attempt.cc',
'test/core/end2end/tests/retry_recv_initial_metadata.cc',
'test/core/end2end/tests/retry_recv_message.cc',
'test/core/end2end/tests/retry_recv_message_replay.cc',
'test/core/end2end/tests/retry_recv_trailing_metadata_error.cc',
'test/core/end2end/tests/retry_send_initial_metadata_refs.cc',
'test/core/end2end/tests/retry_send_op_fails.cc',
'test/core/end2end/tests/retry_send_recv_batch.cc',
'test/core/end2end/tests/retry_server_pushback_delay.cc',
'test/core/end2end/tests/retry_server_pushback_disabled.cc',
'test/core/end2end/tests/retry_streaming.cc',
'test/core/end2end/tests/retry_streaming_after_commit.cc',
'test/core/end2end/tests/retry_streaming_succeeds_before_replay_finished.cc',
'test/core/end2end/tests/retry_throttled.cc',
'test/core/end2end/tests/retry_too_many_attempts.cc',
'test/core/end2end/tests/retry_transparent_goaway.cc',
'test/core/end2end/tests/retry_transparent_max_concurrent_streams.cc',
'test/core/end2end/tests/retry_transparent_not_sent_on_wire.cc',
'test/core/end2end/tests/retry_unref_before_finish.cc',
'test/core/end2end/tests/retry_unref_before_recv.cc',
'test/core/end2end/tests/server_finishes_request.cc',
'test/core/end2end/tests/server_streaming.cc',
'test/core/end2end/tests/shutdown_finishes_calls.cc',
'test/core/end2end/tests/shutdown_finishes_tags.cc',
'test/core/end2end/tests/simple_delayed_request.cc',
'test/core/end2end/tests/simple_metadata.cc',
'test/core/end2end/tests/simple_request.cc',
'test/core/end2end/tests/streaming_error_response.cc',
'test/core/end2end/tests/trailing_metadata.cc',
'test/core/end2end/tests/write_buffering.cc',
'test/core/end2end/tests/write_buffering_at_end.cc',
'test/core/util/test_lb_policies.cc',
],
},
{
'target_name': 'gpr',
'type': 'static_library',
'dependencies': [
'absl/base:base',
'absl/base:core_headers',
'absl/memory:memory',
'absl/random:random',
'absl/status:status',
'absl/strings:cord',
'absl/strings:str_format',
'absl/strings:strings',
'absl/synchronization:synchronization',
'absl/time:time',
'absl/types:optional',
'upb',
],
'sources': [
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/lib/gpr/alloc.cc',
'src/core/lib/gpr/atm.cc',
'src/core/lib/gpr/cpu_iphone.cc',
'src/core/lib/gpr/cpu_linux.cc',
'src/core/lib/gpr/cpu_posix.cc',
'src/core/lib/gpr/cpu_windows.cc',
'src/core/lib/gpr/env_linux.cc',
'src/core/lib/gpr/env_posix.cc',
'src/core/lib/gpr/env_windows.cc',
'src/core/lib/gpr/log.cc',
'src/core/lib/gpr/log_android.cc',
'src/core/lib/gpr/log_linux.cc',
'src/core/lib/gpr/log_posix.cc',
'src/core/lib/gpr/log_windows.cc',
'src/core/lib/gpr/murmur_hash.cc',
'src/core/lib/gpr/string.cc',
'src/core/lib/gpr/string_posix.cc',
'src/core/lib/gpr/string_util_windows.cc',
'src/core/lib/gpr/string_windows.cc',
'src/core/lib/gpr/sync.cc',
'src/core/lib/gpr/sync_abseil.cc',
'src/core/lib/gpr/sync_posix.cc',
'src/core/lib/gpr/sync_windows.cc',
'src/core/lib/gpr/time.cc',
'src/core/lib/gpr/time_posix.cc',
'src/core/lib/gpr/time_precise.cc',
'src/core/lib/gpr/time_windows.cc',
'src/core/lib/gpr/tmpfile_msys.cc',
'src/core/lib/gpr/tmpfile_posix.cc',
'src/core/lib/gpr/tmpfile_windows.cc',
'src/core/lib/gpr/wrap_memcpy.cc',
'src/core/lib/gprpp/examine_stack.cc',
'src/core/lib/gprpp/fork.cc',
'src/core/lib/gprpp/global_config_env.cc',
'src/core/lib/gprpp/host_port.cc',
'src/core/lib/gprpp/mpscq.cc',
'src/core/lib/gprpp/stat_posix.cc',
'src/core/lib/gprpp/stat_windows.cc',
'src/core/lib/gprpp/status_helper.cc',
'src/core/lib/gprpp/thd_posix.cc',
'src/core/lib/gprpp/thd_windows.cc',
'src/core/lib/gprpp/time_util.cc',
'src/core/lib/profiling/basic_timers.cc',
'src/core/lib/profiling/stap_timers.cc',
],
},
{
'target_name': 'grpc',
'type': 'static_library',
'dependencies': [
'absl/container:flat_hash_map',
'absl/container:inlined_vector',
'absl/functional:bind_front',
'absl/hash:hash',
'absl/meta:type_traits',
'absl/status:statusor',
'absl/types:span',
'absl/types:variant',
'absl/utility:utility',
'gpr',
'address_sorting',
],
'sources': [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/channel_idle/channel_idle_filter.cc',
'src/core/ext/filters/channel_idle/idle_filter_state.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/cds.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_impl.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_manager.cc',
'src/core/ext/filters/client_channel/lb_policy/xds/xds_cluster_resolver.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/google_c2p/google_c2p_resolver.cc',
'src/core/ext/filters/client_channel/resolver/polling_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver/xds/xds_resolver.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_channel/subchannel_stream_client.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/filters/rbac/rbac_filter.cc',
'src/core/ext/filters/rbac/rbac_service_config_parser.cc',
'src/core/ext/filters/server_config_selector/server_config_selector.cc',
'src/core/ext/filters/server_config_selector/server_config_selector_filter.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/envoy/admin/v3/certs.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/clusters.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/config_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/init_dump.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/listeners.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/memory.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/metrics.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/mutex_stats.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/server_info.upb.c',
'src/core/ext/upb-generated/envoy/admin/v3/tap.upb.c',
'src/core/ext/upb-generated/envoy/annotations/deprecation.upb.c',
'src/core/ext/upb-generated/envoy/annotations/resource.upb.c',
'src/core/ext/upb-generated/envoy/config/accesslog/v3/accesslog.upb.c',
'src/core/ext/upb-generated/envoy/config/bootstrap/v3/bootstrap.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/circuit_breaker.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/filter.upb.c',
'src/core/ext/upb-generated/envoy/config/cluster/v3/outlier_detection.upb.c',
'src/core/ext/upb-generated/envoy/config/common/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/address.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/backoff.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/base.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/config_source.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/event_service_config.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/extension.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_method_list.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/grpc_service.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/health_check.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/http_uri.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/proxy_protocol.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/resolver.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/socket_option.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/substitution_format_string.upb.c',
'src/core/ext/upb-generated/envoy/config/core/v3/udp_socket_config.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/endpoint_components.upb.c',
'src/core/ext/upb-generated/envoy/config/endpoint/v3/load_report.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/api_listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/listener_components.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/quic_config.upb.c',
'src/core/ext/upb-generated/envoy/config/listener/v3/udp_listener_config.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/metrics_service.upb.c',
'src/core/ext/upb-generated/envoy/config/metrics/v3/stats.upb.c',
'src/core/ext/upb-generated/envoy/config/overload/v3/overload.upb.c',
'src/core/ext/upb-generated/envoy/config/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/route_components.upb.c',
'src/core/ext/upb-generated/envoy/config/route/v3/scoped_route.upb.c',
'src/core/ext/upb-generated/envoy/config/tap/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/datadog.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/dynamic_ot.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/http_tracer.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/lightstep.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/opencensus.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/service.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/skywalking.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/trace.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/xray.upb.c',
'src/core/ext/upb-generated/envoy/config/trace/v3/zipkin.upb.c',
'src/core/ext/upb-generated/envoy/extensions/clusters/aggregate/v3/cluster.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/common/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/fault/v3/fault.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/rbac/v3/rbac.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/http/router/v3/router.upb.c',
'src/core/ext/upb-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/cert.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/common.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/secret.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls.upb.c',
'src/core/ext/upb-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/ads.upb.c',
'src/core/ext/upb-generated/envoy/service/discovery/v3/discovery.upb.c',
'src/core/ext/upb-generated/envoy/service/load_stats/v3/lrs.upb.c',
'src/core/ext/upb-generated/envoy/service/status/v3/csds.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/cookie.upb.c',
'src/core/ext/upb-generated/envoy/type/http/v3/path_transformation.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/http_inputs.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/node.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/number.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/path.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/struct.upb.c',
'src/core/ext/upb-generated/envoy/type/matcher/v3/value.upb.c',
'src/core/ext/upb-generated/envoy/type/metadata/v3/metadata.upb.c',
'src/core/ext/upb-generated/envoy/type/tracing/v3/custom_tag.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/hash_policy.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/http_status.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/percent.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/range.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/ratelimit_unit.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/semantic_version.upb.c',
'src/core/ext/upb-generated/envoy/type/v3/token_bucket.upb.c',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/checked.upb.c',
'src/core/ext/upb-generated/google/api/expr/v1alpha1/syntax.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/api/httpbody.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/opencensus/proto/trace/v1/trace_config.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/altscontext.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/handshaker.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/gcp/transport_security_common.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls_config.upb.c',
'src/core/ext/upb-generated/udpa/annotations/migrate.upb.c',
'src/core/ext/upb-generated/udpa/annotations/security.upb.c',
'src/core/ext/upb-generated/udpa/annotations/sensitive.upb.c',
'src/core/ext/upb-generated/udpa/annotations/status.upb.c',
'src/core/ext/upb-generated/udpa/annotations/versioning.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/migrate.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/security.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/sensitive.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/status.upb.c',
'src/core/ext/upb-generated/xds/annotations/v3/versioning.upb.c',
'src/core/ext/upb-generated/xds/core/v3/authority.upb.c',
'src/core/ext/upb-generated/xds/core/v3/collection_entry.upb.c',
'src/core/ext/upb-generated/xds/core/v3/context_params.upb.c',
'src/core/ext/upb-generated/xds/core/v3/extension.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_locator.upb.c',
'src/core/ext/upb-generated/xds/core/v3/resource_name.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/matcher.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/regex.upb.c',
'src/core/ext/upb-generated/xds/type/matcher/v3/string.upb.c',
'src/core/ext/upb-generated/xds/type/v3/typed_struct.upb.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/certs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/clusters.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/config_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/init_dump.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/listeners.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/memory.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/metrics.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/mutex_stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/server_info.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/admin/v3/tap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/deprecation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/annotations/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/accesslog/v3/accesslog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/bootstrap/v3/bootstrap.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/circuit_breaker.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/filter.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/cluster/v3/outlier_detection.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/common/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/address.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/backoff.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/base.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/config_source.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/event_service_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_method_list.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/grpc_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/health_check.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/http_uri.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/proxy_protocol.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/resolver.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/socket_option.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/substitution_format_string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/core/v3/udp_socket_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/endpoint_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/endpoint/v3/load_report.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/api_listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/listener_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/quic_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/listener/v3/udp_listener_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/metrics_service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/metrics/v3/stats.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/overload/v3/overload.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/route_components.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/route/v3/scoped_route.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/tap/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/datadog.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/dynamic_ot.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/http_tracer.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/lightstep.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/opencensus.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/service.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/skywalking.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/trace.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/xray.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/config/trace/v3/zipkin.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/clusters/aggregate/v3/cluster.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/common/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/fault/v3/fault.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/rbac/v3/rbac.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/http/router/v3/router.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/filters/network/http_connection_manager/v3/http_connection_manager.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/cert.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/common.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/secret.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/extensions/transport_sockets/tls/v3/tls_spiffe_validator_config.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/ads.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/discovery/v3/discovery.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/load_stats/v3/lrs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/service/status/v3/csds.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/cookie.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/http/v3/path_transformation.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/http_inputs.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/node.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/number.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/path.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/matcher/v3/value.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/metadata/v3/metadata.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/tracing/v3/custom_tag.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/hash_policy.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/http_status.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/percent.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/range.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/ratelimit_unit.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/semantic_version.upbdefs.c',
'src/core/ext/upbdefs-generated/envoy/type/v3/token_bucket.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/annotations.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/checked.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/expr/v1alpha1/syntax.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/http.upbdefs.c',
'src/core/ext/upbdefs-generated/google/api/httpbody.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/any.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/duration.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/empty.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/struct.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/timestamp.upbdefs.c',
'src/core/ext/upbdefs-generated/google/protobuf/wrappers.upbdefs.c',
'src/core/ext/upbdefs-generated/google/rpc/status.upbdefs.c',
'src/core/ext/upbdefs-generated/opencensus/proto/trace/v1/trace_config.upbdefs.c',
'src/core/ext/upbdefs-generated/src/proto/grpc/lookup/v1/rls_config.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/security.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/status.upbdefs.c',
'src/core/ext/upbdefs-generated/udpa/annotations/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/validate/validate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/migrate.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/security.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/sensitive.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/status.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/annotations/v3/versioning.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/authority.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/collection_entry.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/context_params.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/extension.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_locator.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/core/v3/resource_name.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/matcher.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/regex.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/matcher/v3/string.upbdefs.c',
'src/core/ext/upbdefs-generated/xds/type/v3/typed_struct.upbdefs.c',
'src/core/ext/xds/certificate_provider_registry.cc',
'src/core/ext/xds/certificate_provider_store.cc',
'src/core/ext/xds/file_watcher_certificate_provider_factory.cc',
'src/core/ext/xds/xds_api.cc',
'src/core/ext/xds/xds_bootstrap.cc',
'src/core/ext/xds/xds_certificate_provider.cc',
'src/core/ext/xds/xds_channel_stack_modifier.cc',
'src/core/ext/xds/xds_client.cc',
'src/core/ext/xds/xds_client_stats.cc',
'src/core/ext/xds/xds_cluster.cc',
'src/core/ext/xds/xds_cluster_specifier_plugin.cc',
'src/core/ext/xds/xds_common_types.cc',
'src/core/ext/xds/xds_endpoint.cc',
'src/core/ext/xds/xds_http_fault_filter.cc',
'src/core/ext/xds/xds_http_filters.cc',
'src/core/ext/xds/xds_http_rbac_filter.cc',
'src/core/ext/xds/xds_listener.cc',
'src/core/ext/xds/xds_resource_type.cc',
'src/core/ext/xds/xds_route_config.cc',
'src/core/ext/xds/xds_routing.cc',
'src/core/ext/xds/xds_server_config_fetcher.cc',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_stack_builder_impl.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/promise_based_filter.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/channel_args_endpoint_config.cc',
'src/core/lib/event_engine/default_event_engine_factory.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/httpcli_security_connector.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/sockaddr_utils_posix.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/matchers/matchers.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
'src/core/lib/resolver/server_address.cc',
'src/core/lib/resource_quota/api.cc',
'src/core/lib/resource_quota/arena.cc',
'src/core/lib/resource_quota/memory_quota.cc',
'src/core/lib/resource_quota/resource_quota.cc',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/grpc_authorization_engine.cc',
'src/core/lib/security/authorization/grpc_server_authz_filter.cc',
'src/core/lib/security/authorization/matchers.cc',
'src/core/lib/security/authorization/rbac_policy.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/alts/alts_credentials.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_linux.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_no_op.cc',
'src/core/lib/security/credentials/alts/check_gcp_environment_windows.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_client_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_options.cc',
'src/core/lib/security/credentials/alts/grpc_alts_credentials_server_options.cc',
'src/core/lib/security/credentials/call_creds_util.cc',
'src/core/lib/security/credentials/channel_creds_registry_init.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/external/aws_external_account_credentials.cc',
'src/core/lib/security/credentials/external/aws_request_signer.cc',
'src/core/lib/security/credentials/external/external_account_credentials.cc',
'src/core/lib/security/credentials/external/file_external_account_credentials.cc',
'src/core/lib/security/credentials/external/url_external_account_credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/google_default/credentials_generic.cc',
'src/core/lib/security/credentials/google_default/google_default_credentials.cc',
'src/core/lib/security/credentials/iam/iam_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/jwt/json_token.cc',
'src/core/lib/security/credentials/jwt/jwt_credentials.cc',
'src/core/lib/security/credentials/jwt/jwt_verifier.cc',
'src/core/lib/security/credentials/local/local_credentials.cc',
'src/core/lib/security/credentials/oauth2/oauth2_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/ssl/ssl_credentials.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_distributor.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_provider.cc',
'src/core/lib/security/credentials/tls/grpc_tls_certificate_verifier.cc',
'src/core/lib/security/credentials/tls/grpc_tls_credentials_options.cc',
'src/core/lib/security/credentials/tls/tls_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/credentials/xds/xds_credentials.cc',
'src/core/lib/security/security_connector/alts/alts_security_connector.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/local/local_security_connector.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/security_connector/ssl/ssl_security_connector.cc',
'src/core/lib/security/security_connector/ssl_utils.cc',
'src/core/lib/security/security_connector/ssl_utils_config.cc',
'src/core/lib/security/security_connector/tls/tls_security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/service_config/service_config_impl.cc',
'src/core/lib/service_config/service_config_parser.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_buffer_api.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/handshaker.cc',
'src/core/lib/transport/handshaker_registry.cc',
'src/core/lib/transport/http_connect_handshaker.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/parsed_metadata.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/tcp_connect_handshaker.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/plugin_registry/grpc_plugin_registry_extra.cc',
'src/core/tsi/alts/crypt/aes_gcm.cc',
'src/core/tsi/alts/crypt/gsec.cc',
'src/core/tsi/alts/frame_protector/alts_counter.cc',
'src/core/tsi/alts/frame_protector/alts_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_frame_protector.cc',
'src/core/tsi/alts/frame_protector/alts_record_protocol_crypter_common.cc',
'src/core/tsi/alts/frame_protector/alts_seal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/alts_unseal_privacy_integrity_crypter.cc',
'src/core/tsi/alts/frame_protector/frame_handler.cc',
'src/core/tsi/alts/handshaker/alts_handshaker_client.cc',
'src/core/tsi/alts/handshaker/alts_shared_resource.cc',
'src/core/tsi/alts/handshaker/alts_tsi_handshaker.cc',
'src/core/tsi/alts/handshaker/alts_tsi_utils.cc',
'src/core/tsi/alts/handshaker/transport_security_common_api.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_integrity_only_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_privacy_integrity_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_grpc_record_protocol_common.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_iovec_record_protocol.cc',
'src/core/tsi/alts/zero_copy_frame_protector/alts_zero_copy_grpc_protector.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/ssl/key_logging/ssl_key_logging.cc',
'src/core/tsi/ssl/session_cache/ssl_session_boringssl.cc',
'src/core/tsi/ssl/session_cache/ssl_session_cache.cc',
'src/core/tsi/ssl/session_cache/ssl_session_openssl.cc',
'src/core/tsi/ssl_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
],
},
{
'target_name': 'grpc_test_util',
'type': 'static_library',
'dependencies': [
'absl/debugging:failure_signal_handler',
'absl/debugging:stacktrace',
'absl/debugging:symbolize',
'grpc',
],
'sources': [
'test/core/event_engine/test_init.cc',
'test/core/util/build.cc',
'test/core/util/cmdline.cc',
'test/core/util/fuzzer_util.cc',
'test/core/util/grpc_profiler.cc',
'test/core/util/histogram.cc',
'test/core/util/mock_endpoint.cc',
'test/core/util/parse_hexstring.cc',
'test/core/util/passthru_endpoint.cc',
'test/core/util/port.cc',
'test/core/util/port_isolated_runtime_environment.cc',
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/test_tcp_server.cc',
'test/core/util/tls_utils.cc',
'test/core/util/tracer_util.cc',
],
},
{
'target_name': 'grpc_test_util_unsecure',
'type': 'static_library',
'dependencies': [
'absl/debugging:failure_signal_handler',
'absl/debugging:stacktrace',
'absl/debugging:symbolize',
'grpc_unsecure',
],
'sources': [
'test/core/event_engine/test_init.cc',
'test/core/util/build.cc',
'test/core/util/cmdline.cc',
'test/core/util/fuzzer_util.cc',
'test/core/util/grpc_profiler.cc',
'test/core/util/histogram.cc',
'test/core/util/mock_endpoint.cc',
'test/core/util/parse_hexstring.cc',
'test/core/util/passthru_endpoint.cc',
'test/core/util/port.cc',
'test/core/util/port_isolated_runtime_environment.cc',
'test/core/util/port_server_client.cc',
'test/core/util/reconnect_server.cc',
'test/core/util/resolve_localhost_ip46.cc',
'test/core/util/slice_splitter.cc',
'test/core/util/stack_tracer.cc',
'test/core/util/subprocess_posix.cc',
'test/core/util/subprocess_windows.cc',
'test/core/util/test_config.cc',
'test/core/util/test_tcp_server.cc',
'test/core/util/tracer_util.cc',
],
},
{
'target_name': 'grpc_unsecure',
'type': 'static_library',
'dependencies': [
'absl/container:flat_hash_map',
'absl/container:inlined_vector',
'absl/functional:bind_front',
'absl/hash:hash',
'absl/meta:type_traits',
'absl/status:statusor',
'absl/types:span',
'absl/types:variant',
'absl/utility:utility',
'gpr',
'address_sorting',
],
'sources': [
'src/core/ext/filters/census/grpc_context.cc',
'src/core/ext/filters/channel_idle/channel_idle_filter.cc',
'src/core/ext/filters/channel_idle/idle_filter_state.cc',
'src/core/ext/filters/client_channel/backend_metric.cc',
'src/core/ext/filters/client_channel/backup_poller.cc',
'src/core/ext/filters/client_channel/channel_connectivity.cc',
'src/core/ext/filters/client_channel/client_channel.cc',
'src/core/ext/filters/client_channel/client_channel_channelz.cc',
'src/core/ext/filters/client_channel/client_channel_factory.cc',
'src/core/ext/filters/client_channel/client_channel_plugin.cc',
'src/core/ext/filters/client_channel/config_selector.cc',
'src/core/ext/filters/client_channel/dynamic_filters.cc',
'src/core/ext/filters/client_channel/global_subchannel_pool.cc',
'src/core/ext/filters/client_channel/health/health_check_client.cc',
'src/core/ext/filters/client_channel/http_proxy.cc',
'src/core/ext/filters/client_channel/lb_policy.cc',
'src/core/ext/filters/client_channel/lb_policy/address_filtering.cc',
'src/core/ext/filters/client_channel/lb_policy/child_policy_handler.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/client_load_reporting_filter.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_balancer_addresses.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/grpclb_client_stats.cc',
'src/core/ext/filters/client_channel/lb_policy/grpclb/load_balancer_api.cc',
'src/core/ext/filters/client_channel/lb_policy/oob_backend_metric.cc',
'src/core/ext/filters/client_channel/lb_policy/pick_first/pick_first.cc',
'src/core/ext/filters/client_channel/lb_policy/priority/priority.cc',
'src/core/ext/filters/client_channel/lb_policy/ring_hash/ring_hash.cc',
'src/core/ext/filters/client_channel/lb_policy/rls/rls.cc',
'src/core/ext/filters/client_channel/lb_policy/round_robin/round_robin.cc',
'src/core/ext/filters/client_channel/lb_policy/weighted_target/weighted_target.cc',
'src/core/ext/filters/client_channel/lb_policy_registry.cc',
'src/core/ext/filters/client_channel/local_subchannel_pool.cc',
'src/core/ext/filters/client_channel/proxy_mapper_registry.cc',
'src/core/ext/filters/client_channel/resolver/binder/binder_resolver.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/dns_resolver_ares.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_ev_driver_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_event_engine.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_posix.cc',
'src/core/ext/filters/client_channel/resolver/dns/c_ares/grpc_ares_wrapper_windows.cc',
'src/core/ext/filters/client_channel/resolver/dns/dns_resolver_selection.cc',
'src/core/ext/filters/client_channel/resolver/dns/native/dns_resolver.cc',
'src/core/ext/filters/client_channel/resolver/fake/fake_resolver.cc',
'src/core/ext/filters/client_channel/resolver/polling_resolver.cc',
'src/core/ext/filters/client_channel/resolver/sockaddr/sockaddr_resolver.cc',
'src/core/ext/filters/client_channel/resolver_result_parsing.cc',
'src/core/ext/filters/client_channel/retry_filter.cc',
'src/core/ext/filters/client_channel/retry_service_config.cc',
'src/core/ext/filters/client_channel/retry_throttle.cc',
'src/core/ext/filters/client_channel/service_config_channel_arg_filter.cc',
'src/core/ext/filters/client_channel/subchannel.cc',
'src/core/ext/filters/client_channel/subchannel_pool_interface.cc',
'src/core/ext/filters/client_channel/subchannel_stream_client.cc',
'src/core/ext/filters/deadline/deadline_filter.cc',
'src/core/ext/filters/fault_injection/fault_injection_filter.cc',
'src/core/ext/filters/fault_injection/service_config_parser.cc',
'src/core/ext/filters/http/client/http_client_filter.cc',
'src/core/ext/filters/http/client_authority_filter.cc',
'src/core/ext/filters/http/http_filters_plugin.cc',
'src/core/ext/filters/http/message_compress/message_compress_filter.cc',
'src/core/ext/filters/http/message_compress/message_decompress_filter.cc',
'src/core/ext/filters/http/server/http_server_filter.cc',
'src/core/ext/filters/message_size/message_size_filter.cc',
'src/core/ext/transport/chttp2/alpn/alpn.cc',
'src/core/ext/transport/chttp2/client/chttp2_connector.cc',
'src/core/ext/transport/chttp2/server/chttp2_server.cc',
'src/core/ext/transport/chttp2/transport/bin_decoder.cc',
'src/core/ext/transport/chttp2/transport/bin_encoder.cc',
'src/core/ext/transport/chttp2/transport/chttp2_transport.cc',
'src/core/ext/transport/chttp2/transport/context_list.cc',
'src/core/ext/transport/chttp2/transport/flow_control.cc',
'src/core/ext/transport/chttp2/transport/frame_data.cc',
'src/core/ext/transport/chttp2/transport/frame_goaway.cc',
'src/core/ext/transport/chttp2/transport/frame_ping.cc',
'src/core/ext/transport/chttp2/transport/frame_rst_stream.cc',
'src/core/ext/transport/chttp2/transport/frame_settings.cc',
'src/core/ext/transport/chttp2/transport/frame_window_update.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder.cc',
'src/core/ext/transport/chttp2/transport/hpack_encoder_table.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser.cc',
'src/core/ext/transport/chttp2/transport/hpack_parser_table.cc',
'src/core/ext/transport/chttp2/transport/http2_settings.cc',
'src/core/ext/transport/chttp2/transport/huffsyms.cc',
'src/core/ext/transport/chttp2/transport/parsing.cc',
'src/core/ext/transport/chttp2/transport/stream_lists.cc',
'src/core/ext/transport/chttp2/transport/stream_map.cc',
'src/core/ext/transport/chttp2/transport/varint.cc',
'src/core/ext/transport/chttp2/transport/writing.cc',
'src/core/ext/transport/inproc/inproc_plugin.cc',
'src/core/ext/transport/inproc/inproc_transport.cc',
'src/core/ext/upb-generated/google/api/annotations.upb.c',
'src/core/ext/upb-generated/google/api/http.upb.c',
'src/core/ext/upb-generated/google/protobuf/any.upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upb-generated/google/protobuf/duration.upb.c',
'src/core/ext/upb-generated/google/protobuf/empty.upb.c',
'src/core/ext/upb-generated/google/protobuf/struct.upb.c',
'src/core/ext/upb-generated/google/protobuf/timestamp.upb.c',
'src/core/ext/upb-generated/google/protobuf/wrappers.upb.c',
'src/core/ext/upb-generated/google/rpc/status.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/health/v1/health.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lb/v1/load_balancer.upb.c',
'src/core/ext/upb-generated/src/proto/grpc/lookup/v1/rls.upb.c',
'src/core/ext/upb-generated/validate/validate.upb.c',
'src/core/ext/upb-generated/xds/data/orca/v3/orca_load_report.upb.c',
'src/core/ext/upb-generated/xds/service/orca/v3/orca.upb.c',
'src/core/lib/address_utils/parse_address.cc',
'src/core/lib/address_utils/sockaddr_utils.cc',
'src/core/lib/backoff/backoff.cc',
'src/core/lib/channel/channel_args.cc',
'src/core/lib/channel/channel_args_preconditioning.cc',
'src/core/lib/channel/channel_stack.cc',
'src/core/lib/channel/channel_stack_builder.cc',
'src/core/lib/channel/channel_stack_builder_impl.cc',
'src/core/lib/channel/channel_trace.cc',
'src/core/lib/channel/channelz.cc',
'src/core/lib/channel/channelz_registry.cc',
'src/core/lib/channel/connected_channel.cc',
'src/core/lib/channel/promise_based_filter.cc',
'src/core/lib/channel/status_util.cc',
'src/core/lib/compression/compression.cc',
'src/core/lib/compression/compression_internal.cc',
'src/core/lib/compression/message_compress.cc',
'src/core/lib/config/core_configuration.cc',
'src/core/lib/debug/stats.cc',
'src/core/lib/debug/stats_data.cc',
'src/core/lib/debug/trace.cc',
'src/core/lib/event_engine/channel_args_endpoint_config.cc',
'src/core/lib/event_engine/default_event_engine_factory.cc',
'src/core/lib/event_engine/event_engine.cc',
'src/core/lib/event_engine/memory_allocator.cc',
'src/core/lib/event_engine/resolved_address.cc',
'src/core/lib/event_engine/slice.cc',
'src/core/lib/event_engine/slice_buffer.cc',
'src/core/lib/event_engine/sockaddr.cc',
'src/core/lib/gprpp/time.cc',
'src/core/lib/http/format_request.cc',
'src/core/lib/http/httpcli.cc',
'src/core/lib/http/parser.cc',
'src/core/lib/iomgr/buffer_list.cc',
'src/core/lib/iomgr/call_combiner.cc',
'src/core/lib/iomgr/cfstream_handle.cc',
'src/core/lib/iomgr/combiner.cc',
'src/core/lib/iomgr/dualstack_socket_posix.cc',
'src/core/lib/iomgr/endpoint.cc',
'src/core/lib/iomgr/endpoint_cfstream.cc',
'src/core/lib/iomgr/endpoint_pair_event_engine.cc',
'src/core/lib/iomgr/endpoint_pair_posix.cc',
'src/core/lib/iomgr/endpoint_pair_windows.cc',
'src/core/lib/iomgr/error.cc',
'src/core/lib/iomgr/error_cfstream.cc',
'src/core/lib/iomgr/ev_apple.cc',
'src/core/lib/iomgr/ev_epoll1_linux.cc',
'src/core/lib/iomgr/ev_poll_posix.cc',
'src/core/lib/iomgr/ev_posix.cc',
'src/core/lib/iomgr/ev_windows.cc',
'src/core/lib/iomgr/event_engine/closure.cc',
'src/core/lib/iomgr/event_engine/endpoint.cc',
'src/core/lib/iomgr/event_engine/iomgr.cc',
'src/core/lib/iomgr/event_engine/pollset.cc',
'src/core/lib/iomgr/event_engine/resolved_address_internal.cc',
'src/core/lib/iomgr/event_engine/resolver.cc',
'src/core/lib/iomgr/event_engine/tcp.cc',
'src/core/lib/iomgr/event_engine/timer.cc',
'src/core/lib/iomgr/exec_ctx.cc',
'src/core/lib/iomgr/executor.cc',
'src/core/lib/iomgr/executor/mpmcqueue.cc',
'src/core/lib/iomgr/executor/threadpool.cc',
'src/core/lib/iomgr/fork_posix.cc',
'src/core/lib/iomgr/fork_windows.cc',
'src/core/lib/iomgr/gethostname_fallback.cc',
'src/core/lib/iomgr/gethostname_host_name_max.cc',
'src/core/lib/iomgr/gethostname_sysconf.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_posix.cc',
'src/core/lib/iomgr/grpc_if_nametoindex_unsupported.cc',
'src/core/lib/iomgr/internal_errqueue.cc',
'src/core/lib/iomgr/iocp_windows.cc',
'src/core/lib/iomgr/iomgr.cc',
'src/core/lib/iomgr/iomgr_internal.cc',
'src/core/lib/iomgr/iomgr_posix.cc',
'src/core/lib/iomgr/iomgr_posix_cfstream.cc',
'src/core/lib/iomgr/iomgr_windows.cc',
'src/core/lib/iomgr/load_file.cc',
'src/core/lib/iomgr/lockfree_event.cc',
'src/core/lib/iomgr/polling_entity.cc',
'src/core/lib/iomgr/pollset.cc',
'src/core/lib/iomgr/pollset_set.cc',
'src/core/lib/iomgr/pollset_set_windows.cc',
'src/core/lib/iomgr/pollset_windows.cc',
'src/core/lib/iomgr/resolve_address.cc',
'src/core/lib/iomgr/resolve_address_posix.cc',
'src/core/lib/iomgr/resolve_address_windows.cc',
'src/core/lib/iomgr/sockaddr_utils_posix.cc',
'src/core/lib/iomgr/socket_factory_posix.cc',
'src/core/lib/iomgr/socket_mutator.cc',
'src/core/lib/iomgr/socket_utils_common_posix.cc',
'src/core/lib/iomgr/socket_utils_linux.cc',
'src/core/lib/iomgr/socket_utils_posix.cc',
'src/core/lib/iomgr/socket_utils_windows.cc',
'src/core/lib/iomgr/socket_windows.cc',
'src/core/lib/iomgr/tcp_client.cc',
'src/core/lib/iomgr/tcp_client_cfstream.cc',
'src/core/lib/iomgr/tcp_client_posix.cc',
'src/core/lib/iomgr/tcp_client_windows.cc',
'src/core/lib/iomgr/tcp_posix.cc',
'src/core/lib/iomgr/tcp_server.cc',
'src/core/lib/iomgr/tcp_server_posix.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_common.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_ifaddrs.cc',
'src/core/lib/iomgr/tcp_server_utils_posix_noifaddrs.cc',
'src/core/lib/iomgr/tcp_server_windows.cc',
'src/core/lib/iomgr/tcp_windows.cc',
'src/core/lib/iomgr/time_averaged_stats.cc',
'src/core/lib/iomgr/timer.cc',
'src/core/lib/iomgr/timer_generic.cc',
'src/core/lib/iomgr/timer_heap.cc',
'src/core/lib/iomgr/timer_manager.cc',
'src/core/lib/iomgr/unix_sockets_posix.cc',
'src/core/lib/iomgr/unix_sockets_posix_noop.cc',
'src/core/lib/iomgr/wakeup_fd_eventfd.cc',
'src/core/lib/iomgr/wakeup_fd_nospecial.cc',
'src/core/lib/iomgr/wakeup_fd_pipe.cc',
'src/core/lib/iomgr/wakeup_fd_posix.cc',
'src/core/lib/iomgr/work_serializer.cc',
'src/core/lib/json/json_reader.cc',
'src/core/lib/json/json_util.cc',
'src/core/lib/json/json_writer.cc',
'src/core/lib/promise/activity.cc',
'src/core/lib/promise/sleep.cc',
'src/core/lib/resolver/resolver.cc',
'src/core/lib/resolver/resolver_registry.cc',
'src/core/lib/resolver/server_address.cc',
'src/core/lib/resource_quota/api.cc',
'src/core/lib/resource_quota/arena.cc',
'src/core/lib/resource_quota/memory_quota.cc',
'src/core/lib/resource_quota/resource_quota.cc',
'src/core/lib/resource_quota/thread_quota.cc',
'src/core/lib/resource_quota/trace.cc',
'src/core/lib/security/authorization/authorization_policy_provider_vtable.cc',
'src/core/lib/security/authorization/evaluate_args.cc',
'src/core/lib/security/authorization/grpc_server_authz_filter.cc',
'src/core/lib/security/context/security_context.cc',
'src/core/lib/security/credentials/call_creds_util.cc',
'src/core/lib/security/credentials/composite/composite_credentials.cc',
'src/core/lib/security/credentials/credentials.cc',
'src/core/lib/security/credentials/fake/fake_credentials.cc',
'src/core/lib/security/credentials/insecure/insecure_credentials.cc',
'src/core/lib/security/credentials/plugin/plugin_credentials.cc',
'src/core/lib/security/credentials/tls/tls_utils.cc',
'src/core/lib/security/security_connector/fake/fake_security_connector.cc',
'src/core/lib/security/security_connector/insecure/insecure_security_connector.cc',
'src/core/lib/security/security_connector/load_system_roots_fallback.cc',
'src/core/lib/security/security_connector/load_system_roots_linux.cc',
'src/core/lib/security/security_connector/security_connector.cc',
'src/core/lib/security/transport/client_auth_filter.cc',
'src/core/lib/security/transport/secure_endpoint.cc',
'src/core/lib/security/transport/security_handshaker.cc',
'src/core/lib/security/transport/server_auth_filter.cc',
'src/core/lib/security/transport/tsi_error.cc',
'src/core/lib/security/util/json_util.cc',
'src/core/lib/service_config/service_config_impl.cc',
'src/core/lib/service_config/service_config_parser.cc',
'src/core/lib/slice/b64.cc',
'src/core/lib/slice/percent_encoding.cc',
'src/core/lib/slice/slice.cc',
'src/core/lib/slice/slice_api.cc',
'src/core/lib/slice/slice_buffer.cc',
'src/core/lib/slice/slice_buffer_api.cc',
'src/core/lib/slice/slice_refcount.cc',
'src/core/lib/slice/slice_split.cc',
'src/core/lib/slice/slice_string_helpers.cc',
'src/core/lib/surface/api_trace.cc',
'src/core/lib/surface/builtins.cc',
'src/core/lib/surface/byte_buffer.cc',
'src/core/lib/surface/byte_buffer_reader.cc',
'src/core/lib/surface/call.cc',
'src/core/lib/surface/call_details.cc',
'src/core/lib/surface/call_log_batch.cc',
'src/core/lib/surface/channel.cc',
'src/core/lib/surface/channel_init.cc',
'src/core/lib/surface/channel_ping.cc',
'src/core/lib/surface/channel_stack_type.cc',
'src/core/lib/surface/completion_queue.cc',
'src/core/lib/surface/completion_queue_factory.cc',
'src/core/lib/surface/event_string.cc',
'src/core/lib/surface/init.cc',
'src/core/lib/surface/lame_client.cc',
'src/core/lib/surface/metadata_array.cc',
'src/core/lib/surface/server.cc',
'src/core/lib/surface/validate_metadata.cc',
'src/core/lib/surface/version.cc',
'src/core/lib/transport/bdp_estimator.cc',
'src/core/lib/transport/byte_stream.cc',
'src/core/lib/transport/connectivity_state.cc',
'src/core/lib/transport/error_utils.cc',
'src/core/lib/transport/handshaker.cc',
'src/core/lib/transport/handshaker_registry.cc',
'src/core/lib/transport/http_connect_handshaker.cc',
'src/core/lib/transport/metadata_batch.cc',
'src/core/lib/transport/parsed_metadata.cc',
'src/core/lib/transport/pid_controller.cc',
'src/core/lib/transport/status_conversion.cc',
'src/core/lib/transport/tcp_connect_handshaker.cc',
'src/core/lib/transport/timeout_encoding.cc',
'src/core/lib/transport/transport.cc',
'src/core/lib/transport/transport_op_string.cc',
'src/core/lib/uri/uri_parser.cc',
'src/core/plugin_registry/grpc_plugin_registry.cc',
'src/core/plugin_registry/grpc_plugin_registry_noextra.cc',
'src/core/tsi/fake_transport_security.cc',
'src/core/tsi/local_transport_security.cc',
'src/core/tsi/transport_security.cc',
'src/core/tsi/transport_security_grpc.cc',
],
},
{
'target_name': 'benchmark_helpers',
'type': 'static_library',
'dependencies': [
'benchmark',
'grpc++_unsecure',
'grpc_test_util_unsecure',
'grpc++_test_config',
],
'sources': [
'src/proto/grpc/testing/echo.proto',
'src/proto/grpc/testing/echo_messages.proto',
'src/proto/grpc/testing/simple_messages.proto',
'src/proto/grpc/testing/xds/v3/orca_load_report.proto',
'test/cpp/microbenchmarks/helpers.cc',
],
},
{
'target_name': 'grpc++',
'type': 'static_library',
'dependencies': [
'grpc',
],
'sources': [
'src/core/ext/transport/binder/client/binder_connector.cc',
'src/core/ext/transport/binder/client/channel_create.cc',
'src/core/ext/transport/binder/client/channel_create_impl.cc',
'src/core/ext/transport/binder/client/connection_id_generator.cc',
'src/core/ext/transport/binder/client/endpoint_binder_pool.cc',
'src/core/ext/transport/binder/client/jni_utils.cc',
'src/core/ext/transport/binder/client/security_policy_setting.cc',
'src/core/ext/transport/binder/security_policy/binder_security_policy.cc',
'src/core/ext/transport/binder/server/binder_server.cc',
'src/core/ext/transport/binder/server/binder_server_credentials.cc',
'src/core/ext/transport/binder/transport/binder_transport.cc',
'src/core/ext/transport/binder/utils/ndk_binder.cc',
'src/core/ext/transport/binder/utils/transport_stream_receiver_impl.cc',
'src/core/ext/transport/binder/wire_format/binder_android.cc',
'src/core/ext/transport/binder/wire_format/binder_constants.cc',
'src/core/ext/transport/binder/wire_format/transaction.cc',
'src/core/ext/transport/binder/wire_format/wire_reader_impl.cc',
'src/core/ext/transport/binder/wire_format/wire_writer.cc',
'src/cpp/client/channel_cc.cc',
'src/cpp/client/client_callback.cc',
'src/cpp/client/client_context.cc',
'src/cpp/client/client_interceptor.cc',
'src/cpp/client/create_channel.cc',
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/credentials_cc.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/client/secure_credentials.cc',
'src/cpp/client/xds_credentials.cc',
'src/cpp/codegen/codegen_init.cc',
'src/cpp/common/alarm.cc',
'src/cpp/common/auth_property_iterator.cc',
'src/cpp/common/channel_arguments.cc',
'src/cpp/common/channel_filter.cc',
'src/cpp/common/completion_queue_cc.cc',
'src/cpp/common/core_codegen.cc',
'src/cpp/common/resource_quota_cc.cc',
'src/cpp/common/rpc_method.cc',
'src/cpp/common/secure_auth_context.cc',
'src/cpp/common/secure_channel_arguments.cc',
'src/cpp/common/secure_create_auth_context.cc',
'src/cpp/common/tls_certificate_provider.cc',
'src/cpp/common/tls_certificate_verifier.cc',
'src/cpp/common/tls_credentials_options.cc',
'src/cpp/common/validate_service_config.cc',
'src/cpp/common/version_cc.cc',
'src/cpp/server/async_generic_service.cc',
'src/cpp/server/channel_argument_option.cc',
'src/cpp/server/create_default_thread_pool.cc',
'src/cpp/server/dynamic_thread_pool.cc',
'src/cpp/server/external_connection_acceptor_impl.cc',
'src/cpp/server/health/default_health_check_service.cc',
'src/cpp/server/health/health_check_service.cc',
'src/cpp/server/health/health_check_service_server_builder_option.cc',
'src/cpp/server/insecure_server_credentials.cc',
'src/cpp/server/secure_server_credentials.cc',
'src/cpp/server/server_builder.cc',
'src/cpp/server/server_callback.cc',
'src/cpp/server/server_cc.cc',
'src/cpp/server/server_context.cc',
'src/cpp/server/server_credentials.cc',
'src/cpp/server/server_posix.cc',
'src/cpp/server/xds_server_credentials.cc',
'src/cpp/thread_manager/thread_manager.cc',
'src/cpp/util/byte_buffer_cc.cc',
'src/cpp/util/status.cc',
'src/cpp/util/string_ref.cc',
'src/cpp/util/time_cc.cc',
],
},
{
'target_name': 'grpc++_alts',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/common/alts_context.cc',
'src/cpp/common/alts_util.cc',
],
},
{
'target_name': 'grpc++_error_details',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/util/error_details.cc',
],
},
{
'target_name': 'grpc++_reflection',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/proto/grpc/reflection/v1alpha/reflection.proto',
'src/cpp/ext/proto_server_reflection.cc',
'src/cpp/ext/proto_server_reflection_plugin.cc',
],
},
{
'target_name': 'grpc++_test',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/cpp/client/channel_test_peer.cc',
],
},
{
'target_name': 'grpc++_test_config',
'type': 'static_library',
'dependencies': [
'absl/flags:parse',
'gpr',
],
'sources': [
'test/cpp/util/test_config_cc.cc',
],
},
{
'target_name': 'grpc++_test_util',
'type': 'static_library',
'dependencies': [
'absl/flags:flag',
'grpc++',
'grpc_test_util',
],
'sources': [
'test/core/end2end/data/client_certs.cc',
'test/core/end2end/data/server1_cert.cc',
'test/core/end2end/data/server1_key.cc',
'test/core/end2end/data/test_root_cert.cc',
'test/cpp/util/byte_buffer_proto_helper.cc',
'test/cpp/util/create_test_channel.cc',
'test/cpp/util/string_ref_helper.cc',
'test/cpp/util/subprocess.cc',
'test/cpp/util/test_credentials_provider.cc',
],
},
{
'target_name': 'grpc++_unsecure',
'type': 'static_library',
'dependencies': [
'grpc_unsecure',
],
'sources': [
'src/cpp/client/channel_cc.cc',
'src/cpp/client/client_callback.cc',
'src/cpp/client/client_context.cc',
'src/cpp/client/client_interceptor.cc',
'src/cpp/client/create_channel.cc',
'src/cpp/client/create_channel_internal.cc',
'src/cpp/client/create_channel_posix.cc',
'src/cpp/client/credentials_cc.cc',
'src/cpp/client/insecure_credentials.cc',
'src/cpp/codegen/codegen_init.cc',
'src/cpp/common/alarm.cc',
'src/cpp/common/channel_arguments.cc',
'src/cpp/common/channel_filter.cc',
'src/cpp/common/completion_queue_cc.cc',
'src/cpp/common/core_codegen.cc',
'src/cpp/common/insecure_create_auth_context.cc',
'src/cpp/common/resource_quota_cc.cc',
'src/cpp/common/rpc_method.cc',
'src/cpp/common/validate_service_config.cc',
'src/cpp/common/version_cc.cc',
'src/cpp/server/async_generic_service.cc',
'src/cpp/server/channel_argument_option.cc',
'src/cpp/server/create_default_thread_pool.cc',
'src/cpp/server/dynamic_thread_pool.cc',
'src/cpp/server/external_connection_acceptor_impl.cc',
'src/cpp/server/health/default_health_check_service.cc',
'src/cpp/server/health/health_check_service.cc',
'src/cpp/server/health/health_check_service_server_builder_option.cc',
'src/cpp/server/insecure_server_credentials.cc',
'src/cpp/server/server_builder.cc',
'src/cpp/server/server_callback.cc',
'src/cpp/server/server_cc.cc',
'src/cpp/server/server_context.cc',
'src/cpp/server/server_credentials.cc',
'src/cpp/server/server_posix.cc',
'src/cpp/thread_manager/thread_manager.cc',
'src/cpp/util/byte_buffer_cc.cc',
'src/cpp/util/status.cc',
'src/cpp/util/string_ref.cc',
'src/cpp/util/time_cc.cc',
],
},
{
'target_name': 'grpc_plugin_support',
'type': 'static_library',
'dependencies': [
],
'sources': [
'src/compiler/cpp_generator.cc',
'src/compiler/csharp_generator.cc',
'src/compiler/node_generator.cc',
'src/compiler/objective_c_generator.cc',
'src/compiler/php_generator.cc',
'src/compiler/python_generator.cc',
'src/compiler/ruby_generator.cc',
],
},
{
'target_name': 'grpcpp_channelz',
'type': 'static_library',
'dependencies': [
'grpc++',
],
'sources': [
'src/proto/grpc/channelz/channelz.proto',
'src/cpp/server/channelz/channelz_service.cc',
'src/cpp/server/channelz/channelz_service_plugin.cc',
],
},
{
'target_name': 'boringssl',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/boringssl-with-bazel/err_data.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bitstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_bool.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_d2i_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_dup.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_enum.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_gentm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_i2d_fp.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_mbstr.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_object.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_octet.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_print.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strex.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_strnid.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_time.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_type.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utctm.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/a_utf8.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_lib.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn1_par.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/asn_pack.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_int.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/f_string.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_dec.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_enc.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_fre.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_new.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_typ.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/tasn_utl.c',
'third_party/boringssl-with-bazel/src/crypto/asn1/time_support.c',
'third_party/boringssl-with-bazel/src/crypto/base64/base64.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio.c',
'third_party/boringssl-with-bazel/src/crypto/bio/bio_mem.c',
'third_party/boringssl-with-bazel/src/crypto/bio/connect.c',
'third_party/boringssl-with-bazel/src/crypto/bio/fd.c',
'third_party/boringssl-with-bazel/src/crypto/bio/file.c',
'third_party/boringssl-with-bazel/src/crypto/bio/hexdump.c',
'third_party/boringssl-with-bazel/src/crypto/bio/pair.c',
'third_party/boringssl-with-bazel/src/crypto/bio/printf.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket.c',
'third_party/boringssl-with-bazel/src/crypto/bio/socket_helper.c',
'third_party/boringssl-with-bazel/src/crypto/blake2/blake2.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/bn_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/bn_extra/convert.c',
'third_party/boringssl-with-bazel/src/crypto/buf/buf.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/asn1_compat.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/ber.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbb.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/cbs.c',
'third_party/boringssl-with-bazel/src/crypto/bytestring/unicode.c',
'third_party/boringssl-with-bazel/src/crypto/chacha/chacha.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/cipher_extra.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/derive_key.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesccm.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesctrhmac.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_aesgcmsiv.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_chacha20poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_null.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc2.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_rc4.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/e_tls.c',
'third_party/boringssl-with-bazel/src/crypto/cipher_extra/tls_cbc.c',
'third_party/boringssl-with-bazel/src/crypto/cmac/cmac.c',
'third_party/boringssl-with-bazel/src/crypto/conf/conf.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-aarch64-win.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm-linux.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-arm.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-intel.c',
'third_party/boringssl-with-bazel/src/crypto/cpu-ppc64le.c',
'third_party/boringssl-with-bazel/src/crypto/crypto.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/curve25519.c',
'third_party/boringssl-with-bazel/src/crypto/curve25519/spake25519.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/dh_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/dh_extra/params.c',
'third_party/boringssl-with-bazel/src/crypto/digest_extra/digest_extra.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa.c',
'third_party/boringssl-with-bazel/src/crypto/dsa/dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/ec_derive.c',
'third_party/boringssl-with-bazel/src/crypto/ec_extra/hash_to_curve.c',
'third_party/boringssl-with-bazel/src/crypto/ecdh_extra/ecdh_extra.c',
'third_party/boringssl-with-bazel/src/crypto/ecdsa_extra/ecdsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/engine/engine.c',
'third_party/boringssl-with-bazel/src/crypto/err/err.c',
'third_party/boringssl-with-bazel/src/crypto/evp/digestsign.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/evp_ctx.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_dsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ec_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_ed25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519.c',
'third_party/boringssl-with-bazel/src/crypto/evp/p_x25519_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/evp/pbkdf.c',
'third_party/boringssl-with-bazel/src/crypto/evp/print.c',
'third_party/boringssl-with-bazel/src/crypto/evp/scrypt.c',
'third_party/boringssl-with-bazel/src/crypto/evp/sign.c',
'third_party/boringssl-with-bazel/src/crypto/ex_data.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/bcm.c',
'third_party/boringssl-with-bazel/src/crypto/fipsmodule/fips_shared_support.c',
'third_party/boringssl-with-bazel/src/crypto/hkdf/hkdf.c',
'third_party/boringssl-with-bazel/src/crypto/hpke/hpke.c',
'third_party/boringssl-with-bazel/src/crypto/hrss/hrss.c',
'third_party/boringssl-with-bazel/src/crypto/lhash/lhash.c',
'third_party/boringssl-with-bazel/src/crypto/mem.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj.c',
'third_party/boringssl-with-bazel/src/crypto/obj/obj_xref.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_all.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_info.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_lib.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_oth.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pk8.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pem/pem_xaux.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs7/pkcs7_x509.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/p5_pbev2.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8.c',
'third_party/boringssl-with-bazel/src/crypto/pkcs8/pkcs8_x509.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_arm.c',
'third_party/boringssl-with-bazel/src/crypto/poly1305/poly1305_vec.c',
'third_party/boringssl-with-bazel/src/crypto/pool/pool.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/deterministic.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/forkunsafe.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/fuchsia.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/passive.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/rand_extra.c',
'third_party/boringssl-with-bazel/src/crypto/rand_extra/windows.c',
'third_party/boringssl-with-bazel/src/crypto/rc4/rc4.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_c11.c',
'third_party/boringssl-with-bazel/src/crypto/refcount_lock.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_asn1.c',
'third_party/boringssl-with-bazel/src/crypto/rsa_extra/rsa_print.c',
'third_party/boringssl-with-bazel/src/crypto/siphash/siphash.c',
'third_party/boringssl-with-bazel/src/crypto/stack/stack.c',
'third_party/boringssl-with-bazel/src/crypto/thread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_none.c',
'third_party/boringssl-with-bazel/src/crypto/thread_pthread.c',
'third_party/boringssl-with-bazel/src/crypto/thread_win.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/pmbtoken.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/trust_token.c',
'third_party/boringssl-with-bazel/src/crypto/trust_token/voprf.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_digest.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_sign.c',
'third_party/boringssl-with-bazel/src/crypto/x509/a_verify.c',
'third_party/boringssl-with-bazel/src/crypto/x509/algorithm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/asn1_gen.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_dir.c',
'third_party/boringssl-with-bazel/src/crypto/x509/by_file.c',
'third_party/boringssl-with-bazel/src/crypto/x509/i2d_pr.c',
'third_party/boringssl-with-bazel/src/crypto/x509/name_print.c',
'third_party/boringssl-with-bazel/src/crypto/x509/rsa_pss.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/t_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_att.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_cmp.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_d2.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_def.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_ext.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_lu.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_obj.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_set.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_trs.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_txt.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_v3.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vfy.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509_vpm.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509cset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509rset.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x509spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_algor.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_all.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_attrib.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_crl.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_exten.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_name.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_pubkey.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_req.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_sig.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_spki.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_val.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509.c',
'third_party/boringssl-with-bazel/src/crypto/x509/x_x509a.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_cache.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_data.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_map.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_node.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/pcy_tree.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_akeya.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_alt.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_bitst.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_conf.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_cpols.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_crld.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_enum.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_extku.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_genn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ia5.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_info.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_int.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_lib.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ncons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_ocsp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pci.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcia.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pcons.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_pmaps.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_prn.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_purp.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_skey.c',
'third_party/boringssl-with-bazel/src/crypto/x509v3/v3_utl.c',
'third_party/boringssl-with-bazel/src/ssl/bio_ssl.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_both.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/d1_srtp.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/dtls_record.cc',
'third_party/boringssl-with-bazel/src/ssl/encrypted_client_hello.cc',
'third_party/boringssl-with-bazel/src/ssl/extensions.cc',
'third_party/boringssl-with-bazel/src/ssl/handoff.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_client.cc',
'third_party/boringssl-with-bazel/src/ssl/handshake_server.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_both.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/s3_pkt.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_aead_ctx.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_asn1.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_buffer.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cert.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_cipher.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_file.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_key_share.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_lib.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_privkey.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_session.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_stat.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_transcript.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_versions.cc',
'third_party/boringssl-with-bazel/src/ssl/ssl_x509.cc',
'third_party/boringssl-with-bazel/src/ssl/t1_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_both.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_client.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_enc.cc',
'third_party/boringssl-with-bazel/src/ssl/tls13_server.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_method.cc',
'third_party/boringssl-with-bazel/src/ssl/tls_record.cc',
],
},
{
'target_name': 'boringssl_test_util',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/boringssl-with-bazel/src/crypto/test/file_test.cc',
'third_party/boringssl-with-bazel/src/crypto/test/malloc.cc',
'third_party/boringssl-with-bazel/src/crypto/test/test_util.cc',
'third_party/boringssl-with-bazel/src/crypto/test/wycheproof_util.cc',
],
},
{
'target_name': 'benchmark',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/benchmark/src/benchmark.cc',
'third_party/benchmark/src/benchmark_api_internal.cc',
'third_party/benchmark/src/benchmark_main.cc',
'third_party/benchmark/src/benchmark_name.cc',
'third_party/benchmark/src/benchmark_register.cc',
'third_party/benchmark/src/benchmark_runner.cc',
'third_party/benchmark/src/colorprint.cc',
'third_party/benchmark/src/commandlineflags.cc',
'third_party/benchmark/src/complexity.cc',
'third_party/benchmark/src/console_reporter.cc',
'third_party/benchmark/src/counter.cc',
'third_party/benchmark/src/csv_reporter.cc',
'third_party/benchmark/src/json_reporter.cc',
'third_party/benchmark/src/perf_counters.cc',
'third_party/benchmark/src/reporter.cc',
'third_party/benchmark/src/sleep.cc',
'third_party/benchmark/src/statistics.cc',
'third_party/benchmark/src/string_util.cc',
'third_party/benchmark/src/sysinfo.cc',
'third_party/benchmark/src/timers.cc',
],
},
{
'target_name': 're2',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/re2/re2/bitstate.cc',
'third_party/re2/re2/compile.cc',
'third_party/re2/re2/dfa.cc',
'third_party/re2/re2/filtered_re2.cc',
'third_party/re2/re2/mimics_pcre.cc',
'third_party/re2/re2/nfa.cc',
'third_party/re2/re2/onepass.cc',
'third_party/re2/re2/parse.cc',
'third_party/re2/re2/perl_groups.cc',
'third_party/re2/re2/prefilter.cc',
'third_party/re2/re2/prefilter_tree.cc',
'third_party/re2/re2/prog.cc',
'third_party/re2/re2/re2.cc',
'third_party/re2/re2/regexp.cc',
'third_party/re2/re2/set.cc',
'third_party/re2/re2/simplify.cc',
'third_party/re2/re2/stringpiece.cc',
'third_party/re2/re2/tostring.cc',
'third_party/re2/re2/unicode_casefold.cc',
'third_party/re2/re2/unicode_groups.cc',
'third_party/re2/util/pcre.cc',
'third_party/re2/util/rune.cc',
'third_party/re2/util/strutil.cc',
],
},
{
'target_name': 'upb',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/upb/third_party/utf8_range/naive.c',
'third_party/upb/third_party/utf8_range/range2-neon.c',
'third_party/upb/third_party/utf8_range/range2-sse.c',
'third_party/upb/upb/decode_fast.c',
'third_party/upb/upb/decode.c',
'third_party/upb/upb/def.c',
'third_party/upb/upb/encode.c',
'third_party/upb/upb/json_encode.c',
'third_party/upb/upb/msg.c',
'third_party/upb/upb/reflection.c',
'third_party/upb/upb/table.c',
'third_party/upb/upb/text_encode.c',
'third_party/upb/upb/upb.c',
'src/core/ext/upb-generated/google/protobuf/descriptor.upb.c',
'src/core/ext/upbdefs-generated/google/protobuf/descriptor.upbdefs.c',
],
},
{
'target_name': 'z',
'type': 'static_library',
'dependencies': [
],
'sources': [
'third_party/zlib/adler32.c',
'third_party/zlib/compress.c',
'third_party/zlib/crc32.c',
'third_party/zlib/deflate.c',
'third_party/zlib/gzclose.c',
'third_party/zlib/gzlib.c',
'third_party/zlib/gzread.c',
'third_party/zlib/gzwrite.c',
'third_party/zlib/infback.c',
'third_party/zlib/inffast.c',
'third_party/zlib/inflate.c',
'third_party/zlib/inftrees.c',
'third_party/zlib/trees.c',
'third_party/zlib/uncompr.c',
'third_party/zlib/zutil.c',
],
},
]
}
| 54.73311 | 135 | 0.684683 |
f7153249e54fec334ca1d518b4485c45f6ac4c7a | 693 | py | Python | osiris/vault/__init__.py | skadyan/aws-glue-python-kickstart | 5e3228a0793188d248f801a2b5a522210048ccde | [
"Apache-2.0"
] | 4 | 2020-04-23T18:43:27.000Z | 2022-02-22T03:57:06.000Z | osiris/vault/__init__.py | skadyan/aws-glue-python-kickstart | 5e3228a0793188d248f801a2b5a522210048ccde | [
"Apache-2.0"
] | 1 | 2021-06-02T00:47:12.000Z | 2021-06-02T00:47:12.000Z | osiris/vault/__init__.py | skadyan/aws-glue-python-kickstart | 5e3228a0793188d248f801a2b5a522210048ccde | [
"Apache-2.0"
] | null | null | null | import abc
from abc import abstractmethod
from typing import Union
from osiris.base.generalutils import instantiate
class SecretVault(abc.ABC):
@abstractmethod
def get_secret(self, key: str, attr: str = None, **kwargs) -> Union[dict, str]:
pass
class NoopSecretVault(SecretVault):
def get_secret(self, key: str, attr: str = None, **kwargs) -> Union[dict, str]:
return None
def new_secret_vault(env) -> SecretVault:
instance = None
if env.flag("sys.vault.enabled"):
impl = env.get_property("sys.vault.impl")
impl_kwargs = env.get_section("sys.vault.impl_kwargs")
instance = instantiate(impl, impl_kwargs)
return instance
| 24.75 | 83 | 0.688312 |