code
stringlengths 1
5.19M
| package
stringlengths 1
81
| path
stringlengths 9
304
| filename
stringlengths 4
145
|
---|---|---|---|
import io
from pathlib import Path
import re
import numpy as np
import pandas as pd
from scipy import signal
from abr.datatype import ABRWaveform, ABRSeries
def load(filename, filter_settings=None, frequencies=None):
filename = Path(filename)
with filename.open(encoding='ISO-8859-1') as f:
line = f.readline()
if not line.startswith(':RUN-'):
raise IOError('Unsupported file format')
p_level = re.compile(':LEVELS:([0-9;]+)')
p_fs = re.compile('SAMPLE \(.sec\): ([0-9]+)')
p_freq = re.compile('FREQ: ([0-9\.]+)')
abr_window = 8500 # usec
try:
with filename.open(encoding='ISO-8859-1') as f:
header, data = f.read().split('DATA')
# Extract data from header
levelstring = p_level.search(header).group(1).strip(';').split(';')
levels = np.array(levelstring).astype(np.float32)
sampling_period = float(p_fs.search(header).group(1))
frequency = float(p_freq.search(header).group(1))
# Convert text representation of data to Numpy array
fs = 1e6/sampling_period
cutoff = int(abr_window / sampling_period)
data = np.array(data.split()).astype(np.float32)
data.shape = -1, len(levels)
data = data.T[:, :cutoff]
t = np.arange(data.shape[-1]) / fs * 1e3
t = pd.Index(t, name='time')
if filter_settings is not None:
Wn = filter_settings['highpass'], filter_settings['lowpass']
N = filter_settings['order']
b, a = signal.iirfilter(N, Wn, fs=fs)
data = signal.filtfilt(b, a, data, axis=-1)
waveforms = []
for s, level in zip(data, levels):
# Checks for a ABR I-O bug that sometimes saves zeroed waveforms
if not (s == 0).all():
w = pd.Series(s, index=t)
waveform = ABRWaveform(fs, w, level)
waveforms.append(waveform)
series = ABRSeries(waveforms, frequency)
series.filename = filename
return [series]
except (AttributeError, ValueError):
msg = 'Could not parse %s. Most likely not a valid ABR file.' % fname
raise IOError(msg)
| ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/parsers/EPL.py | EPL.py |
from __future__ import division
import pandas as pd
import numpy as np
from abr.datatype import ABRWaveform, ABRSeries
################################################################################
# Utility functions
################################################################################
def _parse_line(line):
'''
Parse list of comma-separated values from line
Parameters
----------
line : string
Line containing the values that need to be parsed
Returns
-------
tokens : list
List of values found in line. If values are numeric, they will be
converted to floats. Otherwise they will be returned as strings.
'''
tokens = line.strip().split(',')[1:]
try:
return [float(t) for t in tokens if t]
except ValueError:
return [t for t in tokens if t]
def load_metadata(filename):
'''
Load the metadata stored in the ABR file
Parameters:
-----------
filename : string
Filename to load
Returns
-------
info : pandas.DataFrame
Dataframe containing information on each waveform
'''
info = {}
with open(filename, 'r') as fh:
for i, line in enumerate(fh):
if i == 20:
break
name = line.split(',', 1)[0].strip(':').lower()
info[name] = _parse_line(line)
info = pd.DataFrame(info)
# Number the trials. We will use this number later to look up which column
# contains the ABR waveform for corresponding parameter.
info['waveform'] = np.arange(len(info))
info.set_index('waveform', inplace=True)
# Convert the intensity to the actual level in dB SPL
info['level'] = np.round(info.intensity/10)*10
# Store the scaling factor for the waveform so we can recover this when
# loading. By default the scaling factor is 674. For 110 dB SPL, the
# scaling factor is 337. The statistician uses 6.74 and 3.37, but he
# includes a division of 100 elsewhere in his code to correct.
info['waveform_sf'] = 6.74e2
# The rows where level is 110 dB SPL have a different scaling factor.
info.loc[info.level == 110, 'waveform_sf'] = 3.37e2
# Start time of stimulus in usec (since sampling period is reported in usec,
# we should try to be consistent with all time units).
info['stimulus_start'] = 12.5e3
return info
def load_waveforms(filename, info):
'''
Load the waveforms stored in the ABR file
Only the waveforms specified in info will be loaded. For example, if you
have filtered the info DataFrame to only contain waveforms from channel 1,
only those waveforms will be loaded.
Parameters:
-----------
filename : string
Filename to load
info : pandas.DataFrame
Waveform metadata (see `load_metadata`)
Returns
-------
info : pandas.DataFrame
Dataframe containing waveforms
'''
# Read the waveform table into a dataframe
df = pd.io.parsers.read_csv(filename, skiprows=20)
# Keep only the columns containing the signal of interest. There are six
# columns for each trial. We only want the column containing the raw
# average (i.e., not converted to uV).
df = df[[c for c in df.columns if c.startswith('Average:')]]
# Renumber them so we can look them up by number. The numbers should
# correspond to the trial number we generated in `load_metadata`.
df.columns = np.arange(len(df.columns))
# Loop through the entries in the info DataFrame. This dataframe contains
# metadata needed for processing the waveform (e.g., it tells us which
# waveforms to keep, the scaling factor to use, etc.).
signals = []
for w_index, w_info in info.iterrows():
# Compute time of each point. Currently in usec because smp. period is
# in usec.
t = np.arange(len(df), dtype=np.float32)*w_info['smp. period']
# Subtract stimulus start so that t=0 is when stimulus begins. Convert
# to msec.
t = (t-w_info['stimulus_start'])*1e-3
time = pd.Index(t, name='time')
# Divide by the scaling factor and convert from nV to uV
s = df[w_index]/w_info['waveform_sf']*1e-3
s.index = time
signals.append(s)
# Merge together the waveforms into a single DataFrame
waveforms = pd.concat(signals, keys=info.index, names=['waveform'])
waveforms = waveforms.unstack(level='waveform')
return waveforms
################################################################################
# API
################################################################################
# Minimum wave 1 latencies
latencies = {
1000: 3.1,
3000: 2.1,
4000: 2.3,
6000: 1.8,
}
def load(fname, filter=None, abr_window=8.5e-3):
with open(fname) as fh:
line = fh.readline()
if not line.startswith('Identifier:'):
raise IOError('Unsupported file format')
info = load_metadata(fname)
info = info[info.channel == 1]
fs = 1/(info.iloc[0]['smp. period']*1e-6)
series = []
for frequency, f_info in info.groupby('stim. freq.'):
signal = load_waveforms(fname, f_info)
signal = signal[signal.index >= 0]
waveforms = []
min_latency = latencies.get(frequency)
for i, row in f_info.iterrows():
s = signal[i].values[np.newaxis]
waveform = ABRWaveform(fs, s, row['level'], min_latency=min_latency,
filter=filter)
waveforms.append(waveform)
s = ABRSeries(waveforms, frequency/1e3)
s.filename = fname
series.append(s)
return series
| ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/parsers/NCRAR.py | NCRAR.py |
from functools import cached_property, lru_cache
import glob
import json
import os.path
from pathlib import Path
import numpy as np
import pandas as pd
from scipy import signal
from abr.datatype import ABRWaveform, ABRSeries
from .dataset import DataCollection, Dataset
def get_filename(pathname, suffix='ABR average waveforms.csv'):
if pathname.is_file():
if pathname.name.endswith(suffix):
return pathname
else:
raise IOError('Invalid ABR file')
filename = pathname / suffix
if filename.exists():
return filename
filename = pathname / f'{pathname.stem} {suffix}'
if filename.exists():
return filename
raise IOError(f'Could not find average waveforms file for {pathname}')
@lru_cache(maxsize=64)
def read_file(filename):
with filename.open() as fh:
# This supports a variable-length header where we may not have included
# some levels (e.g., epoch_n and epoch_reject_ratio).
header = {}
while True:
line = fh.readline()
if line.startswith('time'):
break
name, *keys = line.split(',')
header[name] = np.array(keys).astype('f')
data = pd.read_csv(fh, index_col=0, header=None)
header = pd.MultiIndex.from_arrays(list(header.values()),
names=list(header.keys()))
data.index.name = 'time'
data.index *= 1e3
data.columns = header
return data.T
class PSIDataCollection(DataCollection):
def __init__(self, filename):
filename = Path(filename)
self.filename = get_filename(filename)
@cached_property
def fs(self):
settings_file = get_filename(self.filename.parent, 'ABR processing settings.json')
if settings_file.exists():
fs = json.loads(settings_file.read_text())['actual_fs']
else:
fs = np.mean(np.diff(data.columns.values)**-1)
return fs
@cached_property
def data(self):
data = read_file(self.filename)
keep = ['frequency', 'level']
drop = [c for c in data.index.names if c not in keep]
return data.reset_index(drop, drop=True)
@cached_property
def frequencies(self):
return self.data.index.unique('frequency').values
@property
def name(self):
return self.filename.parent.stem
def iter_frequencies(self):
for frequency in self.frequencies:
yield PSIDataset(self, frequency)
class PSIDataset(Dataset):
def __init__(self, parent, frequency):
self.parent = parent
self.frequency = frequency
@property
def filename(self):
return self.parent.filename
@property
def fs(self):
return self.parent.fs
def get_series(self, filter_settings=None):
data = self.parent.data.loc[self.frequency]
if filter_settings is not None:
Wn = filter_settings['highpass'], filter_settings['lowpass']
N = filter_settings['order']
b, a = signal.iirfilter(N, Wn, fs=self.fs)
data_filt = signal.filtfilt(b, a, data.values, axis=-1)
data = pd.DataFrame(data_filt, columns=data.columns, index=data.index)
waveforms = []
for level, w in data.iterrows():
level = float(level)
waveforms.append(ABRWaveform(self.fs, w, level))
series = ABRSeries(waveforms, self.frequency)
series.filename = self.parent.filename
series.id = self.parent.filename.parent.name
return series
def iter_all(path):
results = []
path = Path(path)
if path.stem.endswith('abr_io'):
yield from PSIDataCollection(path).iter_frequencies()
else:
for subpath in path.glob('**/*abr_io'):
yield from PSIDataCollection(subpath).iter_frequencies()
| ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/parsers/PSI.py | PSI.py |
'''
This module defines the import/export routines for interacting with the data
store. If you wish to customize this, simply define the load() function.
load(run_location, invert, filter) -- When the program needs a run loaded, it
will pass the run_location provided by list(). Invert is a boolean flag
indicating whether waveform polarity should be flipped. Filter is a dictionary
containing the following keys:
1. ftype: any of None, butterworth, bessel, etc.
2. fh: highpass cutoff (integer in Hz)
3. fl: lowpass cutoff (integer in Hz)
All objects of the epl.datatype.Waveform class will accept the filter
dictionary and perform the appropriate filtering. It is recommended you use the
filtering provided by the Waveform class as the parameters of the filter will
also be recorded. This function must return an object of the
epl.datatype.ABRSeries class. See this class for appropriate documentation.
The save function must return a message. If there is an error in saving, throw
the appropriate exception.
'''
import importlib
import re
from glob import glob
import os
from pathlib import Path
import time
import pandas as pd
import numpy as np
import abr
from ..datatype import Point
P_ANALYZER = re.compile('.*kHz(?:-(\w+))?-analyzed.txt')
def get_analyzer(filename):
return P_ANALYZER.match(filename.name).group(1)
def waveform_string(waveform):
data = [f'{waveform.level:.2f}']
data.append(f'{waveform.mean(0, 1)}')
data.append(f'{waveform.std(0, 1)}')
for _, point in sorted(waveform.points.items()):
data.append(f'{point.latency:.8f}')
data.append(f'{point.amplitude:.8f}')
return '\t'.join(data)
def filter_string(waveform):
if getattr(waveform, '_zpk', None) is None:
return 'No filtering'
t = 'Pass %d -- z: %r, p: %r, k: %r'
filt = [t % (i, z, p, k) for i, (z, p, k) in enumerate(waveform._zpk)]
return '\n' + '\n'.join(filt)
def load_analysis(fname):
th_match = re.compile('Threshold \(dB SPL\): ([\w.-]+)')
freq_match = re.compile('Frequency \(kHz\): ([\d.]+)')
with open(fname) as fh:
text = fh.readline()
th = th_match.search(text).group(1)
th = None if th == 'None' else float(th)
text = fh.readline()
freq = float(freq_match.search(text).group(1))
for line in fh:
if line.startswith('NOTE'):
break
data = pd.io.parsers.read_csv(fh, sep='\t', index_col='Level')
return (freq, th, data)
def parse_peaks(peaks, threshold):
# Convert the peaks dataframe to a format that can be used by _set_points.
p_pattern = re.compile('P(\d) Latency')
n_pattern = re.compile('N(\d) Latency')
p_latencies = {}
n_latencies = {}
for c in peaks:
match = p_pattern.match(c)
if match:
wave = int(match.group(1))
p_latencies[wave] = pd.DataFrame({'x': peaks[c]})
match = n_pattern.match(c)
if match:
wave = int(match.group(1))
n_latencies[wave] = pd.DataFrame({'x': peaks[c]})
p_latencies = pd.concat(p_latencies.values(), keys=p_latencies.keys(),
names=['wave'])
p_latencies = {g: df.reset_index('Level', drop=True) \
for g, df in p_latencies.groupby('Level')}
n_latencies = pd.concat(n_latencies.values(), keys=n_latencies.keys(),
names=['wave'])
n_latencies = {g: df.reset_index('Level', drop=True) \
for g, df in n_latencies.groupby('Level')}
for level, df in p_latencies.items():
if level < threshold:
df[:] = -df[:]
for level, df in n_latencies.items():
if level < threshold:
df[:] = -df[:]
return p_latencies, n_latencies
class Parser(object):
filename_template = '{filename}-{frequency}kHz-{user}analyzed.txt'
def __init__(self, file_format, filter_settings, user=None):
'''
Parameters
----------
file_format : string
File format that will be loaded.
filter_settings : {None, dict}
If None, no filtering is applied. If dict, must contain ftype,
lowpass, highpass and order as keys.
user : {None, string}
Person analyzing the data.
'''
self._file_format = file_format
self._filter_settings = filter_settings
self._user = user
self._module_name = f'abr.parsers.{file_format}'
self._module = importlib.import_module(self._module_name)
def load(self, fs):
return fs.get_series(self._filter_settings)
def load_analysis(self, series, filename):
freq, th, peaks = load_analysis(filename)
series.load_analysis(th, peaks)
def find_analyzed_files(self, filename, frequency):
frequency = round(frequency * 1e-3, 8)
glob_pattern = self.filename_template.format(
filename=filename.with_suffix(''),
frequency=frequency,
user='*')
path = Path(glob_pattern)
return list(path.parent.glob(path.name))
def get_save_filename(self, filename, frequency):
# Round frequency to nearest 8 places to minimize floating-point
# errors.
user_name = self._user + '-' if self._user else ''
frequency = round(frequency * 1e-3, 8)
save_filename = self.filename_template.format(
filename=filename.with_suffix(''),
frequency=frequency,
user=user_name)
return Path(save_filename)
def save(self, model):
# Assume that all waveforms were filtered identically
filter_history = filter_string(model.waveforms[-1])
# Generate list of columns
columns = ['Level', '1msec Avg', '1msec StDev']
point_keys = sorted(model.waveforms[0].points)
for point_number, point_type in point_keys:
point_type_code = 'P' if point_type == Point.PEAK else 'N'
for measure in ('Latency', 'Amplitude'):
columns.append(f'{point_type_code}{point_number} {measure}')
columns = '\t'.join(columns)
spreadsheet = '\n'.join(waveform_string(w) \
for w in reversed(model.waveforms))
content = CONTENT.format(threshold=model.threshold,
frequency=model.freq*1e-3,
filter_history=filter_history,
columns=columns,
spreadsheet=spreadsheet,
version=abr.__version__)
filename = self.get_save_filename(model.filename, model.freq)
with open(filename, 'w') as fh:
fh.writelines(content)
def iter_all(self, path):
yield from self._module.iter_all(path)
def find_processed(self, path):
for ds in self.iter_all(path):
if self.get_save_filename(ds.filename, ds.frequency).exists():
yield ds
def find_unprocessed(self, path):
for ds in self.iter_all(path):
if not self.get_save_filename(ds.filename, ds.frequency).exists():
yield ds
def find_analyses(self, dirname, frequencies=None):
analyzed = {}
for p, f in self.find_all(dirname, frequencies):
analyzed[p, f] = self.find_analyzed_files(p, f)
return analyzed
def load_analyses(self, dirname, frequencies=None):
analyzed = self.find_analyses(dirname, frequencies)
keys = []
thresholds = []
for (raw_file, frequency), analyzed_files in analyzed.items():
for analyzed_file in analyzed_files:
user = get_analyzer(analyzed_file)
keys.append((raw_file, frequency, analyzed_file, user))
_, threshold, _ = load_analysis(analyzed_file)
thresholds.append(threshold)
cols = ['raw_file', 'frequency', 'analyzed_file', 'user']
index = pd.MultiIndex.from_tuples(keys, names=cols)
return pd.Series(thresholds, index=index)
CONTENT = '''
Threshold (dB SPL): {threshold:.2f}
Frequency (kHz): {frequency:.2f}
Filter history (zpk format): {filter_history}
file_format_version: 0.0.2
code_version: {version}
NOTE: Negative latencies indicate no peak. NaN for amplitudes indicate peak was unscorable.
{columns}
{spreadsheet}
'''.strip()
PARSER_MAP = {
'PSI': 'psiexperiment',
'NCRAR': 'IHS text export',
'EPL': 'EPL CFTS',
}
| ABR | /ABR-0.0.10.tar.gz/ABR-0.0.10/abr/parsers/__init__.py | __init__.py |
# Pacote principal Absolute Investimentos
Pacote principal com funções básicas para uso interno na Absolute Investimentos
## Instalando pacote no Windows
- Substituir arquivos na rede, na pasta `P:/sistemas/Python`
- Alterar versão no setup.py para a data da publicação
- Caso necessário, incluir novas dependências no requirements.txt
- Enviar email respondendo "Documentação Pacote ABS (python)" avisando que
há uma nova versão disponível
## Instalando pacote no linux
Para instalação do pacote, é recomendado ter o conda configurado na máquina.
Também é necessário instalar os drivers compatíveis do [Microsoft ODBC](https://bit.ly/3Bsn0Pz).
Depois de ambos instalados, em um terminal, navegue para a pasta raiz deste
projeto, `PackageABS`, e digite:
```bash
> sudo apt-get install unixodbc-dev
> conda install pyodbc
> cd Pacotes/
> pip install -r requirements.txt
> pip install .
> python -c "import ABS; print(ABS.__name__)"
```
| ABS-95i943594 | /ABS_95i943594-2023.5.4.tar.gz/ABS_95i943594-2023.5.4/README.md | README.md |
import pytest
from absql import Runner
@pytest.fixture()
def runner():
return Runner(greeting="Hello")
def test_set_context(runner):
assert runner.extra_context["greeting"] == "Hello"
runner.set_context(greeting="Hey")
assert runner.extra_context["greeting"] == "Hey"
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/tests/test_set.py | test_set.py |
import pytest
from absql import Runner
@pytest.fixture()
def planet():
def planet(planet_name):
return planet_name
return planet
def test_render_text_extra_partial(planet):
got = Runner.render_text(
"Hello {{planet()}}",
planet=planet,
planet_name="Earth",
partial_kwargs=["planet_name"],
)
want = "Hello Earth"
assert got == want
def test_default_render_text_extra_partial_fails(planet):
with pytest.raises(TypeError):
Runner.render_text("Hello {{planet()}}", planet=planet, planet_name="Earth")
def test_render_file_extra_partial(planet):
got = Runner.render_file(
"tests/files/partial_planet.sql",
planet=planet,
planet_name="Earth",
partial_kwargs=["planet_name"],
)
want = "Hello Earth, Goodbye Earth"
assert got == want
def test_render_runner_extra_partial(planet):
r = Runner(planet=planet, planet_name="Earth", partial_kwargs=["planet_name"])
text_got = r.render("Hello {{planet()}}")
text_want = "Hello Earth"
assert text_got == text_want
file_got = r.render("tests/files/partial_planet.sql")
file_want = "Hello Earth, Goodbye Earth"
assert file_got == file_want
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/tests/test_partial_kwargs.py | test_partial_kwargs.py |
from absql.text import clean_spacing, create_replacements
def test_clean_spaces():
text = "{{ hello }} world"
got = clean_spacing(text)
want = "{{ hello }} world"
assert got == want
def test_clean_spaces_2():
want = "{{ hello }} world"
text = "{{hello }} world"
got = clean_spacing(text)
assert got == want
text = "{{ hello}} world"
got = clean_spacing(text)
assert got == want
def test_clean_tabs():
text = "{{ hello }} world"
got = clean_spacing(text)
want = "{{ hello }} world"
assert got == want
def test_replacements():
got = create_replacements(foo="bar")
want = {"{{foo}}": "bar", "{{ foo }}": "bar"}
assert got == want
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/tests/test_text.py | test_text.py |
import pytest
import mock
import os
from absql import Runner
@pytest.fixture(autouse=True)
def mock_settings_env_vars():
with mock.patch.dict(os.environ, {"name": "Bob"}):
yield
@pytest.fixture()
def runner():
def double_it(x):
return x + x
return Runner(extra_constructors={"double_it": double_it}, greeting="Hello")
@pytest.fixture()
def contextless_runner():
return Runner()
@pytest.fixture
def simple_sql_path():
return "tests/files/simple.sql"
@pytest.fixture
def no_frontmatter_path():
return "tests/files/no_frontmatter.sql"
def test_render_simple_sql(runner, simple_sql_path):
sql = runner.render(simple_sql_path)
assert sql == "SELECT * FROM my_table"
def test_render_no_frontmatter(runner, no_frontmatter_path):
sql = runner.render(no_frontmatter_path)
assert sql == "SELECT * FROM Hello"
def test_render_text_only(runner):
got = runner.render("{{greeting}}, {{env_var('name')}}!")
want = "Hello, Bob!"
assert got == want
def test_render_extra_context(runner):
got = runner.render("{{new_greeting}}, {{env_var('name')}}!", new_greeting="Hey")
want = "Hey, Bob!"
assert got == want
def test_contextless_runner(contextless_runner):
got = contextless_runner.render("{{no_greeting}}, Bill!")
want = "{{ no_greeting }}, Bill!"
assert got == want
def test_replace_only_changes(runner):
original = runner.render("{{env_switch(foo='address')}} and {{greeting}}")
assert original == "value_unspecified and Hello"
replaced_only = runner.render(
"{{env_switch(foo='address')}} and {{greeting}}", replace_only=True
)
assert replaced_only == "{{ env_switch(foo='address') }} and Hello"
original_2 = runner.render("{{env_switch(foo='address')}} and {{greeting}}")
assert original_2 == "value_unspecified and Hello"
def test_runner_renders_yaml(runner):
got = runner.render("tests/files/constructor.yml")
want = "SELECT * FROM tabletable"
assert got == want
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/tests/test_render.py | test_render.py |
import pytest
import datetime
from absql import Runner
from copy import copy
@pytest.fixture(scope="function")
def r_original():
# dt because I am scared to copy modules
return Runner(f="foo", dt=datetime)
@pytest.fixture(scope="function")
def r_copy(r_original):
return copy(r_original)
def test_copy_set_context(r_original, r_copy):
r_copy.set_context(f="bar")
assert r_original.render("{{f}}") == "foo"
assert r_copy.render("{{f}}") == "bar"
def test_copy_set_attr(r_original, r_copy):
r_original.partial_kwargs = ["a"]
r_copy.partial_kwargs = ["b"]
assert r_original.partial_kwargs == ["a"]
assert r_copy.partial_kwargs == ["b"]
r_copy.extra_context = {"f": "buzz"}
assert r_original.render("{{f}}") == "foo"
assert r_copy.render("{{f}}") == "buzz"
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/tests/test_copy.py | test_copy.py |
from absql import Runner
def test_loader_always_new():
runner_a = Runner(extra_constructors={"f": lambda: "a"})
runner_a_want = "SELECT * FROM a"
render_a_got_1 = runner_a.render("tests/files/loader_test.sql")
assert render_a_got_1 == runner_a_want
runner_b = Runner(extra_constructors={"f": lambda: "b"})
runner_b_want = "SELECT * FROM b"
runner_b_got = runner_b.render("tests/files/loader_test.sql")
assert runner_b_got == runner_b_want
render_a_got_2 = runner_a.render("tests/files/loader_test.sql")
assert render_a_got_2 == runner_a_want
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/tests/test_loader.py | test_loader.py |
import pytest
from absql.files import parse
@pytest.fixture
def simple_sql_path():
return "tests/files/simple.sql"
@pytest.fixture
def simple_yml_path():
return "tests/files/simple.yml"
def test_simple_sql(simple_sql_path):
res = parse(simple_sql_path)
assert "sql" in res.keys()
assert "my_table_placeholder" in res.keys()
assert res["my_table_placeholder"] == "my_table"
assert res["sql"] == "SELECT * FROM {{my_table_placeholder}}"
def test_simple_yml(simple_yml_path):
res = parse(simple_yml_path)
assert "sql" in res.keys()
assert "my_table_placeholder" in res.keys()
assert res["my_table_placeholder"] == "my_table"
assert res["sql"] == "SELECT * FROM {{my_table_placeholder}}"
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/tests/test_parse_file.py | test_parse_file.py |
import pytest
import mock
import os
from absql import Runner as r
from absql.functions.time import previous_date
@pytest.fixture(autouse=True)
def mock_settings_env_vars():
with mock.patch.dict(
os.environ,
{"name": "Bob", "ENV": "dev", "my_env_var": "foo", "my_timezone": "EST"},
):
yield
@pytest.fixture
def extra_sql_path():
return "tests/files/extra.sql"
@pytest.fixture
def simple_sql_path():
return "tests/files/simple.sql"
@pytest.fixture
def simple_yml_path():
return "tests/files/simple.yml"
@pytest.fixture
def constructor_sql_path():
return "tests/files/constructor.sql"
@pytest.fixture
def constructor_plus_function_sql_path():
return "tests/files/constructors_plus_functions.sql"
@pytest.fixture
def jinja_frontmatter_path():
return "tests/files/jinja_frontmatter.sql"
@pytest.fixture
def nested_constructors_path():
return "tests/files/nested_constructors.sql"
def test_nested_constructors(nested_constructors_path):
sql = r.render_file(nested_constructors_path)
assert sql == "SELECT * FROM foo WHERE bar AND {previous}".format(
previous=previous_date("EST")
)
def test_render_simple_sql(simple_sql_path):
sql = r.render_file(simple_sql_path)
assert sql == "SELECT * FROM my_table"
def test_render_simple_yml(simple_yml_path):
sql = r.render_file(simple_yml_path)
assert sql == "SELECT * FROM my_table"
def test_render_additional_sql(extra_sql_path):
sql = r.render_file(extra_sql_path, extra="my_extra_context")
assert sql == "SELECT * FROM my_table WHERE my_extra_context"
def test_render_constructor_sql(constructor_sql_path):
def add(*nums):
x = 0
for n in nums:
x += n
return x
def first(**kwargs):
for i in range(len(kwargs)):
if i == 0:
return kwargs[list(kwargs.keys())[0]]
def get_table():
return "my_constructor_table"
runner = r(extra_constructors=[get_table, add, first])
sql = runner.render(constructor_sql_path)
assert sql == "SELECT * FROM my_constructor_table WHERE '6' and 'this'"
def test_render_jinja_frontmatter(jinja_frontmatter_path):
def provide_table():
return "my_func_table"
sql = r.render_file(jinja_frontmatter_path, get_table=provide_table)
assert sql == "SELECT * FROM my_func_table WHERE name = 'Bob'"
def test_render_jinja_frontmatter_instantiated(jinja_frontmatter_path):
def provide_table():
return "my_func_table"
runner = r(get_table=provide_table)
sql = runner.render(jinja_frontmatter_path)
assert sql == "SELECT * FROM my_func_table WHERE name = 'Bob'"
def test_var_dict(extra_sql_path):
want = "SELECT * FROM my_table WHERE something different!"
got = r.render_file(extra_sql_path, file_context_from="my_var_dict")
assert got == want
runner = r(file_context_from="my_var_dict")
got = runner.render(extra_sql_path)
assert got == want
def test_constructor_plus_function(constructor_plus_function_sql_path):
got = r.render_file(constructor_plus_function_sql_path)
want = "SELECT * FROM somewhere_else"
assert got == want
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/tests/test_render_file.py | test_render_file.py |
from absql.utils import (
nested_apply,
get_function_arg_names,
partialize_function,
load_body,
)
def test_simple_apply():
d = {"a": "a", "b": "b", "c": 5}
got = nested_apply(d, lambda x: x + "zzz")
want = {"a": "azzz", "b": "bzzz", "c": 5}
assert got == want
def test_nested_list():
d = {"a": "a", "b": "b", "c": [5, "d"]}
got = nested_apply(d, lambda x: x + "zzz")
want = {"a": "azzz", "b": "bzzz", "c": [5, "dzzz"]}
assert got == want
def test_nested_dict():
d = {"a": "a", "b": "b", "c": {5: "d"}}
got = nested_apply(d, lambda x: x + "zzz")
want = {"a": "azzz", "b": "bzzz", "c": {5: "dzzz"}}
assert got == want
def test_get_function_arg_names():
def func(a, b):
return a + b
got = get_function_arg_names(func)
want = ["a", "b"]
assert got == want
def test_partialize_function():
def simple_func(a, engine):
return a + engine
simple_func = partialize_function(simple_func, engine=7)
got = simple_func(3)
want = 10
assert got == want
def test_load_body_frontmatter():
assert (
load_body("tests/files/simple.sql")
== "\nSELECT * FROM {{my_table_placeholder}}\n"
)
def test_load_body_no_frontmatter():
assert (
load_body("tests/files/no_frontmatter.sql")
== "{% set cols='*' %}\n\nSELECT {{cols}} FROM {{greeting}}\n"
)
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/tests/test_utils.py | test_utils.py |
from absql import Runner as r
def test_render_text():
template = "SELECT * FROM {{my_table_placeholder}}"
result = r.render_text(template, my_table_placeholder="my_table")
assert result == "SELECT * FROM my_table"
def test_leave_unknown_vars_alone():
template = "SELECT * FROM {{my_table_placeholder}} WHERE date = '{{my_date}}'"
result = r.render_text(template, my_table_placeholder="my_table")
assert result == "SELECT * FROM my_table WHERE date = '{{ my_date }}'"
# Would be nice to just use jinja for this
# https://stackoverflow.com/questions/71374498/ignore-unknown-functions-in-jinja2
def test_leave_unknown_functions_alone():
template = "SELECT * FROM {{my_table_placeholder}} WHERE date = '{{ get_date() }}'"
result = r.render_text(template, my_table_placeholder="my_table", replace_only=True)
assert result == "SELECT * FROM my_table WHERE date = '{{ get_date() }}'"
def test_nested_replace():
context = {"params": {"foo": "bar", "biz": "baz"}}
template = "{{ params.foo }} and {{ params.biz }}"
got = r.render_text(template, replace_only=True, **context)
want = "bar and baz"
assert got == want
def test_pretty_encode():
got = r.render_text(
"""SELECT * FROM "my_schema"."my_table" WHERE my_col = 'my_value'""",
pretty_encode=True,
)
want = """\x1b[1m\x1b[96mSELECT\x1b[0m * \x1b[1m\x1b[96mFROM\x1b[0m \x1b[95m"my_schema"\x1b[39m.\x1b[95m"my_table"\x1b[39m \x1b[1m\x1b[96mWHERE\x1b[0m my_col = \x1b[95m\'my_value\'\x1b[39m""" # noqa
assert got == want
def test_filters():
def alter_text(x, upper=True):
if upper:
return x.upper()
return x.lower()
you = "yOu"
upper_want = "hey YOU"
upper_got = r.render_text(
"hey {{person | case_it}}", person=you, case_it=alter_text
)
assert upper_got == upper_want
lower_want = "hey you"
lower_got = r.render_text(
"hey {{person | case_it(upper=False)}}", person=you, case_it=alter_text
)
assert lower_want == lower_got
partial_want = "hey you"
partial_got = r.render_text(
"hey {{person | case_it}}",
person=you,
case_it=alter_text,
upper=False,
partial_kwargs=["upper"],
)
assert partial_want == partial_got
pipeline_want = "hey YOU"
pipeline_got = r.render_text(
"hey {{person | case_it(upper=False) | case_it}}",
person=you,
case_it=alter_text,
)
assert pipeline_want == pipeline_got
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/tests/test_render_text.py | test_render_text.py |
from absql.files import accepted_file_types
from absql.files.loader import generate_loader
from absql.render import render_text, render_context, render_file
class Runner:
def __init__(
self,
extra_constructors=None,
replace_only=False,
file_context_from=None,
partial_kwargs=None,
**extra_context,
):
self.extra_context = dict(extra_context)
self.loader = generate_loader(extra_constructors or [])
self.replace_only = replace_only
self.file_context_from = file_context_from
self.partial_kwargs = partial_kwargs or ["engine"]
@staticmethod
def render_text(
text, replace_only=False, pretty_encode=False, partial_kwargs=None, **vars
):
return render_text(
text=text,
replace_only=replace_only,
pretty_encode=pretty_encode,
partial_kwargs=partial_kwargs,
**vars,
)
@staticmethod
def render_context(extra_context=None, file_contents=None, partial_kwargs=None):
return render_context(
extra_context=extra_context,
file_contents=file_contents,
partial_kwargs=partial_kwargs,
)
@staticmethod
def render_file(
file_path,
loader=None,
replace_only=False,
extra_constructors=None,
file_context_from=None,
pretty_encode=False,
partial_kwargs=None,
**extra_context,
):
return render_file(
file_path=file_path,
loader=loader,
replace_only=replace_only,
extra_constructors=extra_constructors,
file_context_from=file_context_from,
pretty_encode=pretty_encode,
partial_kwargs=partial_kwargs,
**extra_context,
)
def render(self, text, pretty_encode=False, replace_only=None, **extra_context):
"""
Given text or a file path, render SQL with the a combination of
the vars in the file and any extras passed to extra_context during
the instantiation of the runner.
"""
current_context = self.extra_context.copy()
current_context.update(extra_context)
if text.endswith(accepted_file_types):
rendered = render_file(
file_path=text,
loader=self.loader,
replace_only=replace_only or self.replace_only,
file_context_from=self.file_context_from,
pretty_encode=pretty_encode,
partial_kwargs=self.partial_kwargs,
**current_context,
)
else:
rendered = render_text(
text=text,
replace_only=replace_only or self.replace_only,
pretty_encode=pretty_encode,
partial_kwargs=self.partial_kwargs,
**render_context(current_context, partial_kwargs=self.partial_kwargs),
)
return rendered
def set_context(self, **context):
self.extra_context = self.extra_context.copy()
self.extra_context.update(context)
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/__init__.py | __init__.py |
import yaml
from absql.functions import default_constructors
def scalar_to_value(scalar, constructor_dict):
"""
Converts a YAML ScalarNode to its underlying Python value
"""
type = scalar.tag.split(":")[-1]
val = scalar.value
if isinstance(scalar, yaml.MappingNode):
val = node_converter(scalar, constructor_dict)
func = constructor_dict.get(type)
return func(**val)
if isinstance(scalar, yaml.SequenceNode):
val = node_converter(scalar, constructor_dict)
func = constructor_dict.get(type)
return func(*val)
if type.startswith("!"):
func = constructor_dict.get(type)
return func(val)
# Handle null type - https://yaml.org/type/null.html
if type == "null":
return None
return eval('{type}("""{val}""")'.format(type=type, val=val))
def node_converter(x, constructor_dict):
"""
Converts YAML nodes of varying types into Python values,
lists, and dictionaries
"""
if isinstance(x, yaml.ScalarNode):
# "I am an atomic value"
return yaml.load(x.value, yaml.SafeLoader)
if isinstance(x, yaml.SequenceNode):
# "I am a list"
return [scalar_to_value(v, constructor_dict) for v in x.value]
if isinstance(x, yaml.MappingNode):
# "I am a dict"
return {
scalar_to_value(v[0], constructor_dict): scalar_to_value(
v[1], constructor_dict
)
for v in x.value
}
def wrap_yaml(func, constructor_dict):
"""Turn a function into one that can be run on a YAML input"""
def ret(loader, x):
value = node_converter(x, constructor_dict)
if value is not None:
if isinstance(value, list):
return func(*value)
if isinstance(value, dict):
return func(**value)
return func(value)
else:
return func()
return ret
def generate_loader(extra_constructors=None):
"""Generates a SafeLoader with both default and custom constructors"""
class Loader(yaml.SafeLoader):
# ensures a new Loader is returned
# every time the function is called
pass
extra_constructors = extra_constructors or []
unchecked_constructors = default_constructors.copy()
if isinstance(extra_constructors, list) and len(extra_constructors) > 0:
extra_constructors = {
("!" + func.__name__): func for func in extra_constructors
}
if len(extra_constructors) > 0:
unchecked_constructors.update(extra_constructors)
# Ensure all tags start with "!"
checked_constructors = {}
for tag, func in unchecked_constructors.items():
if not tag.startswith("!"):
tag = "!" + tag
checked_constructors[tag] = func
for tag, func in checked_constructors.items():
Loader.add_constructor(tag, wrap_yaml(func, checked_constructors))
return Loader
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/files/loader.py | loader.py |
import re
import yaml
from absql.files.loader import generate_loader
FM_BOUNDARY = re.compile(r"^-{3,}\s*$", re.MULTILINE)
def frontmatter_load(file_path, loader=None):
"""
Loads YAML frontmatter. Expects a YAML block at the top of the file
that starts and ends with "---". In use in favor of frontmatter.load
so that custom dag_constructors (via PyYaml) can be used uniformly across
all file types.
"""
if loader is None:
loader = generate_loader()
with open(file_path, "r") as file:
text = "".join(file.readlines())
if text.startswith("---"):
_, metadata, content = FM_BOUNDARY.split(text, 2)
metadata = yaml.load(metadata, Loader=loader)
content = content.strip("\n")
elif text.startswith("{"):
tmp_header = "/*ABSQLQSBA*/ "
text = tmp_header + text
metadata = {}
content = yaml.load(text, Loader=loader)
content = content.replace(tmp_header, "")
else:
metadata = {}
content = yaml.load(text, Loader=loader)
return {"metadata": metadata, "content": content}
def parse_generic(file_path, loader=None):
if loader is None:
loader = generate_loader()
raw_content = frontmatter_load(file_path, loader=loader)
file_content = raw_content["metadata"] or raw_content["content"]
return file_content
def parse_sql(file_path, loader=None):
if loader is None:
loader = generate_loader()
raw_content = frontmatter_load(file_path, loader=loader)
file_content = raw_content["metadata"]
file_content["sql"] = raw_content["content"]
return file_content
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/files/parsers.py | parsers.py |
import os
from absql.utils import get_function_arg_names
from absql.files.loader import generate_loader
from absql.files.parsers import parse_generic, parse_sql
default_parsers = {
".yml": parse_generic,
".yaml": parse_generic,
".sql": parse_sql,
}
accepted_file_types = tuple(default_parsers.keys())
def parse(file_path, parse_dict=default_parsers, loader=None):
"""
Load a file.
"""
if loader is None:
loader = generate_loader()
path, extension = os.path.splitext(file_path)
parser = parse_dict[extension]
if "loader" in get_function_arg_names(parser):
file_parsed = parser(file_path, loader=loader)
else:
file_parsed = parser(file_path)
return file_parsed
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/files/__init__.py | __init__.py |
from pendulum import now
from datetime import timedelta
def previous_date(tz="utc", days=1):
return (now(tz=tz) - timedelta(days=days)).to_date_string()
def previous_hour(tz="utc", hours=1, days=0):
return (
(now(tz=tz) - timedelta(hours=hours, days=days))
.replace(minute=0, second=0)
.to_datetime_string()
)
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/functions/time.py | time.py |
from absql.functions.env import env_var
from sqlalchemy import create_engine, text
from sqlalchemy.engine.base import Engine
def table_exists(table_location, engine_env="AB__URI", engine=None):
table_parts = split_parts(table_location)
engine = (
handle_engine(env_var(engine_env)) if engine is None else handle_engine(engine)
)
if hasattr(engine, "has_table"):
return engine.has_table(
table_name=table_parts["target"], schema=table_parts["namespace"]
)
else:
return engine.reflection.Inspector.has_table(
table_name=table_parts["target"], schema=table_parts["namespace"]
)
def query_db(query, engine_env="AB__URI", engine=None):
engine = (
handle_engine(env_var(engine_env)) if engine is None else handle_engine(engine)
)
with engine.connect() as connection:
return connection.execute(text(query)).fetchall()
def handle_engine(engine):
if isinstance(engine, Engine):
return engine
else:
return create_engine(engine)
def get_max_value(field_location, engine_env="AB__URI", engine=None):
field_parts = split_parts(field_location)
query = "SELECT MAX({field}) AS value FROM {table}".format(
field=field_parts["target"], table=field_parts["namespace"]
)
try:
return query_db(query, engine_env, engine)[0].value
except Exception:
return None
def get_min_value(field_location, engine_env="AB__URI", engine=None):
field_parts = split_parts(field_location)
query = "SELECT MIN({field}) AS value FROM {table}".format(
field=field_parts["target"], table=field_parts["namespace"]
)
try:
return query_db(query, engine_env, engine)[0].value
except Exception:
return None
def split_parts(location):
parts = {}
location_parts = location.split(".")
target = location_parts[-1]
namespace = (
None
if len(location_parts) == 1
else ".".join(location_parts[: len(location_parts) - 1])
)
parts["target"] = target
parts["namespace"] = namespace
return parts
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/functions/db.py | db.py |
from .env import env_var, env_switch
from .db import table_exists, query_db, get_max_value, get_min_value
from .time import previous_date, previous_hour
from datetime import datetime, timedelta
default_functions = {
"datetime": datetime,
"env_switch": env_switch,
"env_var": env_var,
"get_max_value": get_max_value,
"get_min_value": get_min_value,
"previous_date": previous_date,
"previous_hour": previous_hour,
"table_exists": table_exists,
"timedelta": timedelta,
"query_db": query_db,
}
default_constructors = {
"!" + k: v
for k, v in default_functions.items()
if k
in (
"datetime",
"env_switch",
"env_var",
"previous_date",
"previous_hour",
"timedelta",
)
}
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/functions/__init__.py | __init__.py |
import os
def env_var(var, default=None):
return os.environ.get(var, default)
def env_switch(default=None, **kwargs):
value_unspecified = "value_unspecified"
if default is None:
default = value_unspecified
# What is the environment variable that
# tells us what environment we are in?
env_env_var = env_var("AB__ENV", "ENV")
# What environment are we in?
env = env_var(env_env_var, value_unspecified)
# What is the value that corresponds to the environment?
value = kwargs.get(env, default)
return value
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/functions/env.py | env.py |
from inspect import cleandoc
from jinja2 import Environment, DebugUndefined
from absql.text import (
clean_spacing,
create_replacements,
flatten_inputs,
pretty_encode_sql,
)
from absql.files import parse
from absql.files.loader import generate_loader
from absql.functions import default_functions
from absql.utils import nested_apply, partialize_function
def render_text(
text, replace_only=False, pretty_encode=False, partial_kwargs=None, **vars
):
"""
Given some text, render the template with the vars.
If a templated variable is unknown, leave it alone.
"""
if replace_only:
text = clean_spacing(text)
flat_vars = flatten_inputs(**vars)
replacements = create_replacements(**flat_vars)
for k, v in replacements.items():
text = text.replace(k, str(v))
text = cleandoc(text)
else:
env = Environment(undefined=DebugUndefined)
for k, v in vars.items():
if v.__class__.__name__ == "function":
vars[k] = partialize_function(v, partial_kwargs=partial_kwargs, **vars)
env.filters[k] = vars[k]
template = env.from_string(text)
text = cleandoc(template.render(**vars))
if pretty_encode:
return pretty_encode_sql(text)
else:
return text
def render_context(extra_context=None, file_contents=None, partial_kwargs=None):
"""
Render context dictionaries passed through a function call or
file frontmatter (file_contents), with file_contents taking
precedence over other all other provided context.
"""
rendered_context = default_functions.copy()
if extra_context:
rendered_context.update(**extra_context)
if file_contents:
rendered_context.update(**file_contents)
rendered_context = nested_apply(
rendered_context,
lambda x: render_text(x, partial_kwargs=partial_kwargs, **rendered_context),
)
return rendered_context
def render_file(
file_path,
loader=None,
replace_only=False,
extra_constructors=None,
file_context_from=None,
pretty_encode=False,
partial_kwargs=None,
**extra_context,
):
"""
Given a file path, render SQL with a combination of
the vars in the file and any extras passed to extra_context.
"""
if loader is None:
loader = generate_loader(extra_constructors or [])
file_contents = parse(file_path, loader=loader)
sql = file_contents["sql"]
file_contents.pop("sql")
if file_context_from:
file_contents.update(file_contents.get(file_context_from, {}))
file_contents.pop(file_context_from, {})
rendered_context = render_context(extra_context, file_contents, partial_kwargs)
rendered = render_text(
text=sql,
replace_only=replace_only,
pretty_encode=pretty_encode,
partial_kwargs=partial_kwargs,
**rendered_context,
)
return rendered
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/render/__init__.py | __init__.py |
from absql.files.parsers import FM_BOUNDARY
from inspect import signature
from functools import partial
def nested_apply(x, f):
if x.__class__.__name__ in ["dict", "list", "str"]:
if isinstance(x, str):
x = f(x)
elif isinstance(x, list):
x = [nested_apply(i, f) for i in x]
elif isinstance(x, dict):
for k in x:
x[k] = nested_apply(x[k], f)
else:
return x
return x
def get_function_arg_names(func):
return list(signature(func).parameters.keys())
def partialize_function(func, partial_kwargs=None, **kwargs):
partial_kwargs = partial_kwargs or ["engine"]
function_args = get_function_arg_names(func)
kwargs_to_partialize = {
k: v for k, v in kwargs.items() if k in function_args and k in partial_kwargs
}
if kwargs_to_partialize:
return partial(func, **kwargs_to_partialize)
else:
return func
def load_body(file_path):
with open(file_path, "r") as file:
text = "".join(file.readlines())
if text.startswith("---"):
_, metadata, content = FM_BOUNDARY.split(text, 2)
return content
else:
return text
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/utils/__init__.py | __init__.py |
import re
from flatdict import FlatDict
from sql_metadata import Parser
from colorama import Fore
def clean_spacing(text):
text = re.sub("\\{\\{", "{{ ", text)
text = re.sub("\\}\\}", " }}", text)
text = re.sub("\\{\\{\\s+", "{{ ", text)
text = re.sub("\\s+\\}\\}", " }}", text)
return text
def create_replacements(**kwargs):
replacements = {}
for k, v in kwargs.items():
replacement = {"{{{{{k}}}}}".format(k=k): v, "{{{{ {k} }}}}".format(k=k): v}
replacements.update(replacement)
return replacements
def flatten_inputs(**kwargs):
flattened = FlatDict(kwargs, delimiter=".")
return flattened
def pretty_encode_sql(
query, keyword_color=Fore.LIGHTCYAN_EX, quote_color=Fore.LIGHTMAGENTA_EX
):
p = Parser(query)
keywords = list(set([t.value for t in p.tokens if t.is_keyword]))
bold_start = "\033[1m"
bold_end = "\033[0m"
replacements = {k: bold_start + keyword_color + k + bold_end for k in keywords}
for keyword, formatted_keyword in replacements.items():
query = re.sub(r"\b{k}\b".format(k=keyword), formatted_keyword, query)
quotes = [
# single_quotes
"'{t}'".format(t=text)
for text in list(set(re.findall("'([^']*)'", query)))
] + [
# double quotes
'"{t}"'.format(t=text)
for text in list(set(re.findall('"([^"]*)"', query)))
]
replacements = {q: quote_color + q + Fore.RESET for q in quotes}
for quote, formatted_quote in replacements.items():
query = re.sub(quote, formatted_quote, query)
return query
| ABSQL | /ABSQL-0.4.2-py3-none-any.whl/absql/text/__init__.py | __init__.py |
from typing import Optional
from sdk.context_event_logger import ContextEventLogger
class ContextConfig:
refresh_interval: int = 50
publish_delay: int = 50 # seconds
event_logger: Optional[ContextEventLogger] = None
cassigmnents: {} = None
overrides: {} = None
attributes: {} = None
units: {} = None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/context_config.py | context_config.py |
import base64
import collections
import hashlib
import threading
from concurrent.futures import Future
from typing import Optional
from sdk.audience_matcher import AudienceMatcher
from sdk.context_config import ContextConfig
from sdk.context_data_provider import ContextDataProvider
from sdk.context_event_handler import ContextEventHandler
from sdk.context_event_logger import ContextEventLogger, EventType
from sdk.internal.lock.atomic_bool import AtomicBool
from sdk.internal.lock.atomic_int import AtomicInt
from sdk.internal.lock.concurrency import Concurrency
from sdk.internal.lock.read_write_lock import ReadWriteLock
from sdk.internal.variant_assigner import VariantAssigner
from sdk.json.attribute import Attribute
from sdk.json.context_data import ContextData
from sdk.json.experiment import Experiment
from sdk.json.exposure import Exposure
from sdk.json.goal_achievement import GoalAchievement
from sdk.json.publish_event import PublishEvent
from sdk.json.unit import Unit
from sdk.time.clock import Clock
from sdk.variable_parser import VariableParser
class Assignment:
def __init__(self):
self.id: Optional[int] = 0
self.iteration: Optional[int] = 0
self.full_on_variant: Optional[int] = 0
self.name: Optional[str] = None
self.unit_type: Optional[str] = None
self.traffic_split: list[int] = []
self.variant: Optional[int] = 0
self.assigned: Optional[bool] = False
self.overridden: Optional[bool] = False
self.eligible: Optional[bool] = False
self.full_on: Optional[bool] = False
self.custom: Optional[bool] = False
self.audience_mismatch: Optional[bool] = False
self.variables: dict = {}
self.exposed = AtomicBool()
class ExperimentVariables:
data: Optional[Experiment]
variables: Optional[list[dict]]
def experiment_matches(experiment: Experiment, assignment: Assignment):
return experiment.id == assignment.id and \
experiment.unitType == assignment.unit_type and \
experiment.iteration == assignment.iteration and \
experiment.fullOnVariant == assignment.full_on_variant and \
collections.Counter(experiment.trafficSplit) == \
collections.Counter(assignment.traffic_split)
class Context:
def __init__(self,
clock: Clock, config: ContextConfig,
data_future: Future, data_provider: ContextDataProvider,
event_handler: ContextEventHandler,
event_logger: ContextEventLogger,
variable_parser: VariableParser,
audience_matcher: AudienceMatcher):
self.clock = clock
self.publish_delay = config.publish_delay
self.refresh_interval = config.refresh_interval
self.event_handler = event_handler
self.event_logger = event_logger
self.data_provider = data_provider
self.variable_parser = variable_parser
self.audience_matcher = audience_matcher
self.units = {}
self.index = {}
self.index_variables = {}
self.assignment_cache = {}
self.cassignments = {}
self.overrides = {}
self.exposures = []
self.achievements = []
self.data: Optional[ContextData] = None
self.failed = False
self.closed = AtomicBool()
self.closing = AtomicBool()
self.refreshing = AtomicBool()
self.pending_count = AtomicInt()
self.context_lock = ReadWriteLock()
self.data_lock = ReadWriteLock()
self.timeout_lock = ReadWriteLock()
self.event_lock = ReadWriteLock()
self.refresh_future: Optional[Future] = None
self.closing_future: Optional[Future] = None
self.refresh_timer: Optional[threading.Timer] = None
self.timeout: Optional[threading.Timer] = None
if config.units is not None:
self.set_units(config.units)
self.assigners = dict.fromkeys((range(len(self.units))))
self.hashed_units = dict.fromkeys((range(len(self.units))))
self.attributes: list[Attribute] = []
if config.attributes is not None:
self.set_attributes(config.attributes)
if config.overrides is not None:
self.overrides = dict(config.overrides)
else:
self.overrides = {}
if config.cassigmnents is not None:
self.cassignments = dict(config.cassigmnents)
else:
self.cassignments = {}
if data_future.done():
def when_finished(data: Future):
if data.done() and data.cancelled() is False and \
data.exception() is None:
self.set_data(data.result())
self.log_event(EventType.READY, data.result())
elif data.cancelled() is False and \
data.exception() is not None:
self.set_data_failed(data.exception())
self.log_error(data.exception())
data_future.add_done_callback(when_finished)
else:
self.ready_future = Future()
def when_finished(data: Future):
if data.done() and data.cancelled() is False and \
data.exception() is None:
self.set_data(data.result())
self.ready_future.set_result(None)
self.ready_future = None
self.log_event(EventType.READY, data.result())
if self.get_pending_count() > 0:
self.set_timeout()
elif data.cancelled() is False and \
data.exception() is not None:
self.set_data_failed(data.exception())
self.ready_future.set_result(None)
self.ready_future = None
self.log_error(data.exception())
data_future.add_done_callback(when_finished)
def set_units(self, units: dict):
for key, value in units.items():
self.set_unit(key, value)
def set_unit(self, unit_type: str, uid: str):
self.check_not_closed()
try:
self.context_lock.acquire_write()
if unit_type in self.units.keys() and self.units[unit_type] != uid:
raise ValueError("Unit already set.")
trimmed = uid.strip()
if len(trimmed) == 0:
raise ValueError("Unit UID must not be blank.")
self.units[unit_type] = trimmed
finally:
self.context_lock.release_write()
def set_attributes(self, attributes: dict):
for key, value in attributes.items():
self.set_attribute(key, value)
def set_attribute(self, name: str, value: object):
self.check_not_closed()
attribute = Attribute()
attribute.name = name
attribute.value = value
attribute.setAt = self.clock.millis()
Concurrency.add_rw(self.context_lock, self.attributes, attribute)
def check_not_closed(self):
if self.closed.value:
raise RuntimeError('ABSmartly Context is closed')
elif self.closing.value:
raise RuntimeError('ABSmartly Context is closing')
def set_data(self, data: ContextData):
index = {}
index_variables = {}
for experiment in data.experiments:
experiment_variables = ExperimentVariables()
experiment_variables.data = experiment
experiment_variables.variables = []
for variant in experiment.variants:
if variant.config is not None and len(variant.config) > 0:
variables = self.variable_parser.parse(
self,
experiment.name,
variant.name,
variant.config)
for key, value in variables.items():
index_variables[key] = experiment_variables
experiment_variables.variables.append(variables)
else:
experiment_variables.variables.append({})
index[experiment.name] = experiment_variables
try:
self.data_lock.acquire_write()
self.index = index
self.index_variables = index_variables
self.data = data
self.set_refresh_timer()
finally:
self.data_lock.release_write()
def set_refresh_timer(self):
if self.refresh_interval > 0 and self.refresh_timer is None and not self.is_closing() and not self.is_closed():
def ref():
self.refresh_async()
self.refresh_timer = threading.Timer(
self.refresh_interval,
ref)
self.refresh_timer.start()
self.refresh_timer = threading.Timer(
self.refresh_interval,
ref)
self.refresh_timer.start()
def set_timeout(self):
if self.is_ready():
if self.timeout is None:
try:
self.timeout_lock.acquire_write()
def flush():
self.flush()
self.timeout = threading.Timer(self.publish_delay, flush)
self.timeout.start()
finally:
self.timeout_lock.release_write()
def is_ready(self):
return self.data is not None
def is_failed(self):
return self.failed
def is_closed(self):
return self.closed.value
def is_closing(self):
return not self.closed.value and self.closing.value
def refresh_async(self):
self.check_not_closed()
if self.refreshing.compare_and_set(False, True):
self.refresh_future = Future()
def when_ready(data):
if data.done() and data.cancelled() is False and \
data.exception() is None:
self.set_data(data.result())
self.refreshing.set(False)
self.refresh_future.set_result(None)
self.log_event(EventType.REFRESH, data.result())
elif data.cancelled() is False and \
data.exception() is not None:
self.refreshing.set(False)
self.refresh_future.set_exception(data.exception())
self.log_error(data.exception())
self.data_provider\
.get_context_data()\
.add_done_callback(when_ready)
if self.refresh_future is not None:
return self.refresh_future
else:
result = Future()
result.set_result(None)
return result
def set_data_failed(self, exception):
try:
self.data_lock.acquire_write()
self.index = {}
self.index_variables = {}
self.data = ContextData()
self.failed = True
finally:
self.data_lock.release_write()
def log_error(self, exception):
if self.event_logger is not None:
self.event_logger.handle_event(EventType.ERROR, exception)
def log_event(self, event: EventType, data: object):
if self.event_logger is not None:
self.event_logger.handle_event(event, data)
def get_pending_count(self):
return self.pending_count.get()
def flush(self):
self.clear_timeout()
if self.failed is False:
if self.pending_count.get() > 0:
exposures = None
achievements = None
event_count = 0
try:
self.event_lock.acquire_write()
event_count = self.pending_count.get()
if event_count > 0:
if len(self.exposures) > 0:
exposures = list(self.exposures)
self.exposures.clear()
if len(self.achievements) > 0:
achievements = list(self.achievements)
self.achievements.clear()
self.pending_count.set(0)
finally:
self.event_lock.release_write()
if event_count > 0:
event = PublishEvent()
event.hashed = True
event.publishedAt = self.clock.millis()
event.units = []
for key, value in self.units.items():
unit = Unit()
unit.type = key
unit.uid = str(
self.get_unit_hash(key, value),
encoding='ascii')\
.encode('ascii', errors='ignore')\
.decode()
event.units.append(unit)
if len(self.attributes) > 0:
event.attributes = list(self.attributes)
else:
event.attributes = None
event.exposures = exposures
event.goals = achievements
result = Future()
def run(data):
if data.done() and \
data.cancelled() is False and \
data.exception() is None:
self.log_event(EventType.PUBLISH, event)
result.set_result(None)
elif data.cancelled() is False and \
data.exception() is not None:
self.log_error(data.exception())
result.set_exception(data.exception())
self.event_handler\
.publish(self, event)\
.add_done_callback(run)
return result
else:
try:
self.event_lock.acquire_write()
self.exposures.clear()
self.achievements.clear()
self.pending_count.set(0)
finally:
self.event_lock.release_write()
result = Future()
result.set_result(None)
return result
def close(self):
self.close_async().result()
def refresh(self):
self.refresh_async().result()
def publish(self):
self.publish_async().result()
def publish_async(self):
self.check_not_closed()
return self.flush()
def track(self, goal_name: str, properties: dict):
self.check_not_closed()
achievement = GoalAchievement()
achievement.achievedAt = self.clock.millis()
achievement.name = goal_name
if properties is None:
achievement.properties = None
else:
achievement.properties = dict(properties)
try:
self.event_lock.acquire_write()
self.pending_count.increment_and_get()
self.achievements.append(achievement)
finally:
self.event_lock.release_write()
self.log_event(EventType.GOAL, achievement)
self.set_timeout()
def wait_until_ready(self):
if self.data is None:
if self.ready_future is not None and not self.ready_future.done():
self.ready_future.result()
return self
def wait_until_ready_async(self):
if self.data is not None:
result = Future()
result.set_result(self)
return result
else:
def apply(fut: Future):
return self
self.ready_future.add_done_callback(apply)
return self.ready_future
def clear_timeout(self):
if self.timeout is not None:
try:
self.timeout_lock.acquire_write()
if self.timeout is not None:
self.timeout.cancel()
self.timeout = None
finally:
self.timeout_lock.release_write()
def clear_refresh_timer(self):
if self.refresh_timer is not None:
self.refresh_timer.cancel()
self.refresh_timer = None
def get_variable_value(self, key: str, default_value: object):
self.check_ready(True)
assignment = self.get_variable_assignment(key)
if assignment is not None:
if assignment.variables is not None:
if not assignment.exposed.value:
self.queue_exposure(assignment)
if key in assignment.variables:
return assignment.variables[key]
return default_value
def peek_variable_value(self, key: str, default_value: object):
self.check_ready(True)
assignment = self.get_variable_assignment(key)
if assignment is not None:
if assignment.variables is not None:
if key in assignment.variables:
return assignment.variables[key]
return default_value
def peek_treatment(self, experiment_name: str):
self.check_ready(True)
return self.get_assignment(experiment_name).variant
def get_unit_hash(self, unit_type: str, unit_uid: str):
def computer(key: str):
dig = hashlib.md5(unit_uid.encode('utf-8')).digest()
unithash = base64.urlsafe_b64encode(dig).rstrip(b'=')
return unithash
return Concurrency.compute_if_absent_rw(
self.context_lock,
self.hashed_units,
unit_type,
computer)
def get_treatment(self, experiment_name: str):
self.check_ready(True)
assignment = self.get_assignment(experiment_name)
if not assignment.exposed.value:
self.queue_exposure(assignment)
return assignment.variant
def get_variable_keys(self):
self.check_ready(True)
variable_keys = {}
try:
self.data_lock.acquire_read()
for key, value in self.index_variables.items():
expr_var: ExperimentVariables = value
variable_keys[key] = expr_var.data.name
finally:
self.data_lock.release_write()
return variable_keys
def get_assignment(self, experiment_name: str):
try:
self.context_lock.acquire_read()
if experiment_name in self.assignment_cache:
assignment: Assignment = self.assignment_cache[experiment_name]
experiment: ExperimentVariables = \
self.get_experiment(experiment_name)
if experiment_name in self.overrides:
override = self.overrides[experiment_name]
if assignment.overridden and \
assignment.variant == override:
return assignment
elif experiment is None:
if assignment.assigned is False:
return assignment
elif experiment_name not in self.cassignments or \
self.cassignments[experiment_name] == \
assignment.variant:
if experiment_matches(experiment.data, assignment):
return assignment
finally:
self.context_lock.release_read()
try:
self.context_lock.acquire_write()
experiment: ExperimentVariables = \
self.get_experiment(experiment_name)
assignment = Assignment()
assignment.name = experiment_name
assignment.eligible = True
if experiment_name in self.overrides:
if experiment is not None:
assignment.id = experiment.data.id
assignment.unit_type = experiment.data.unitType
assignment.overridden = True
assignment.variant = self.overrides[experiment_name]
else:
if experiment is not None:
unit_type = experiment.data.unitType
if experiment.data.audience is not None and \
len(experiment.data.audience) > 0:
attrs = {}
for attr in self.attributes:
attrs[attr.name] = attr.value
match = self.audience_matcher.evaluate(
experiment.data.audience,
attrs)
if match is not None:
assignment.audience_mismatch = not match.result
if experiment.data.audienceStrict and \
assignment.audience_mismatch:
assignment.variant = 0
elif experiment.data.fullOnVariant == 0:
if experiment.data.unitType in self.units:
uid = self.units[experiment.data.unitType]
unit_hash = self.get_unit_hash(unit_type, uid)
assigner: VariantAssigner = \
self.get_variant_assigner(unit_type,
unit_hash)
eligible = \
assigner.assign(
experiment.data.trafficSplit,
experiment.data.trafficSeedHi,
experiment.data.trafficSeedLo) == 1
if eligible:
if experiment_name in self.cassignments:
custom = self.cassignments[experiment_name]
assignment.variant = custom
assignment.custom = True
else:
assignment.variant = \
assigner.assign(experiment.data.split,
experiment.data.seedHi,
experiment.data.seedLo)
else:
assignment.eligible = False
assignment.variant = 0
assignment.assigned = True
else:
assignment.assigned = True
assignment.variant = experiment.data.fullOnVariant
assignment.full_on = True
assignment.unit_type = unit_type
assignment.id = experiment.data.id
assignment.iteration = experiment.data.iteration
assignment.traffic_split = experiment.data.trafficSplit
assignment.full_on_variant = experiment.data.fullOnVariant
if experiment is not None and \
(assignment.variant < len(experiment.data.variants)):
assignment.variables = experiment.variables[assignment.variant]
self.assignment_cache[experiment_name] = assignment
return assignment
finally:
self.context_lock.release_write()
def check_ready(self, expect_not_closed: bool):
if not self.is_ready():
raise RuntimeError('ABSmartly Context is not yet ready')
elif expect_not_closed:
self.check_not_closed()
def get_experiment(self, experiment_name: str):
try:
self.data_lock.acquire_read()
return self.index.get(experiment_name, None)
finally:
self.data_lock.release_read()
def get_experiments(self):
self.check_ready(True)
try:
self.data_lock.acquire_read()
experiment_names = []
for experiment in self.data.experiments:
experiment_names.append(experiment.name)
return experiment_names
finally:
self.data_lock.release_read()
def get_data(self):
self.check_ready(True)
try:
self.data_lock.acquire_read()
return self.data
finally:
self.data_lock.release_read()
def set_override(self, experiment_name: str, variant: int):
self.check_not_closed()
return Concurrency.put_rw(self.context_lock,
self.overrides,
experiment_name, variant)
def get_override(self, experiment_name: str):
return Concurrency.get_rw(self.context_lock,
self.overrides,
experiment_name)
def set_overrides(self, overrides: dict):
for key, value in overrides.items():
self.set_override(key, value)
def set_custom_assignment(self, experiment_name: str, variant: int):
self.check_not_closed()
Concurrency.put_rw(self.context_lock,
self.cassignments,
experiment_name, variant)
def get_custom_assignment(self, experiment_name: str):
return Concurrency.get_rw(self.context_lock,
self.cassignments,
experiment_name)
def set_custom_assignments(self, custom_assignments: dict):
for key, value in custom_assignments.items():
self.set_custom_assignment(key, value)
def get_variant_assigner(self, unit_type: str, unit_hash: bytes):
def apply(key: str):
return VariantAssigner(bytearray(unit_hash))
return Concurrency.compute_if_absent_rw(self.context_lock,
self.assigners,
unit_type, apply)
def get_variable_experiment(self, key: str):
return Concurrency.get_rw(self.data_lock, self.index_variables, key)
def get_variable_assignment(self, key: str):
experiment: ExperimentVariables = self.get_variable_experiment(key)
if experiment is not None:
return self.get_assignment(experiment.data.name)
return None
def close_async(self):
if not self.closed.value:
if self.closing.compare_and_set(False, True):
self.clear_refresh_timer()
if self.pending_count.get() > 0:
self.closing_future = Future()
def accept(res: Future):
if res.done() and res.cancelled() is False \
and res.exception() is None:
self.closed.set(True)
self.closing.set(False)
self.closing_future.set_result(None)
self.log_event(EventType.CLOSE, None)
elif res.cancelled() is False \
and res.exception() is not None:
self.closed.set(True)
self.closing.set(False)
self.closing_future.exception(res.exception())
self.flush().add_done_callback(accept)
return self.closing_future
else:
self.closed.set(True)
self.closing.set(False)
self.log_event(EventType.CLOSE, None)
if self.closing_future is not None:
return self.closing_future
result = Future()
result.set_result(None)
return result
def queue_exposure(self, assignment: Assignment):
if assignment.exposed.compare_and_set(False, True):
exposure = Exposure()
exposure.id = assignment.id
exposure.name = assignment.name
exposure.unit = assignment.unit_type
exposure.variant = assignment.variant
exposure.exposedAt = self.clock.millis()
exposure.assigned = assignment.assigned
exposure.eligible = assignment.eligible
exposure.overridden = assignment.overridden
exposure.fullOn = assignment.full_on
exposure.custom = assignment.custom
exposure.audienceMismatch = assignment.audience_mismatch
try:
self.event_lock.acquire_write()
self.pending_count.increment_and_get()
self.exposures.append(exposure)
finally:
self.event_lock.release_write()
self.log_event(EventType.EXPOSURE, exposure)
self.set_timeout()
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/context.py | context.py |
from abc import abstractmethod
from typing import Optional
class AudienceDeserializer:
@abstractmethod
def deserialize(self,
bytes_: bytes,
offset: int,
length: int) -> Optional[dict]:
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/audience_deserializer.py | audience_deserializer.py |
import jsons
from sdk.context_event_serializer import ContextEventSerializer
from sdk.json.publish_event import PublishEvent
class DefaultContextEventSerializer(ContextEventSerializer):
def serialize(self, publish_event: PublishEvent) -> bytearray:
str_result = jsons.dumps(publish_event, strip_nulls=True)
return bytearray(str_result, encoding='utf-8')
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/default_context_event_serializer.py | default_context_event_serializer.py |
class DefaultHTTPClientConfig:
def __init__(self):
self.connection_timeout = 3 # seconds
self.connection_request_timeout = 1
self.retry_interval = 0.3
self.max_retries = 5
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/default_http_client_config.py | default_http_client_config.py |
from typing import Optional
import jsons
from jsons import DeserializationError
from sdk.context import Context
from sdk.variable_parser import VariableParser
class DefaultVariableParser(VariableParser):
def parse(self,
context: Context,
experiment_name: str,
variant_name: str,
config: str) -> Optional[dict]:
try:
return jsons.loads(config, dict)
except DeserializationError:
return None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/default_variable_parser.py | default_variable_parser.py |
from concurrent.futures import Future
from typing import Optional
from sdk.client import Client
from sdk.context_data_provider import ContextDataProvider
from sdk.json.context_data import ContextData
class DefaultContextDataProvider(ContextDataProvider):
def __init__(self, client: Client):
self.client = client
def get_context_data(self) -> Future[Optional[ContextData]]:
return self.client.get_context_data()
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/default_context_data_provider.py | default_context_data_provider.py |
import os
from concurrent.futures import ThreadPoolExecutor
from sdk.default_context_data_deserializer import \
DefaultContextDataDeserializer
from sdk.default_context_event_serializer import \
DefaultContextEventSerializer
class ClientConfig:
def __init__(self, prefix=""):
self.executor = ThreadPoolExecutor()
self.endpoint = os.environ.get(prefix + "endpoint")
self.environment = os.environ.get(prefix + "environment")
self.application = os.environ.get(prefix + "application")
self.api_key = os.environ.get(prefix + "api_key")
self.serializer = DefaultContextEventSerializer()
self.deserializer = DefaultContextDataDeserializer()
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/client_config.py | client_config.py |
from abc import abstractmethod
from enum import Enum
class EventType(Enum):
ERROR = "error"
READY = "ready"
REFRESH = "refresh"
PUBLISH = "publish"
EXPOSURE = "exposure"
GOAL = "goal"
CLOSE = "close"
class ContextEventLogger:
@abstractmethod
def handle_event(self, event_type: EventType, data: object):
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/context_event_logger.py | context_event_logger.py |
from typing import Optional
from sdk.audience_deserializer import AudienceDeserializer
from sdk.client import Client
from sdk.context_data_provider import ContextDataProvider
from sdk.context_event_handler import ContextEventHandler
from sdk.context_event_logger import ContextEventLogger
from sdk.variable_parser import VariableParser
class ABSmartlyConfig:
context_data_provider: Optional[ContextDataProvider] = None
context_event_handler: Optional[ContextEventHandler] = None
context_event_logger: Optional[ContextEventLogger] = None
audience_deserializer: Optional[AudienceDeserializer] = None
client: Optional[Client] = None
variable_parser: Optional[VariableParser] = None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/absmartly_config.py | absmartly_config.py |
from sdk.client_config import ClientConfig
from sdk.http_client import HTTPClient
from sdk.json.publish_event import PublishEvent
class Client:
def __init__(self, config: ClientConfig, http_client: HTTPClient):
self.serializer = config.serializer
self.deserializer = config.deserializer
self.executor = config.executor
endpoint = config.endpoint
api_key = config.api_key
application = config.application
environment = config.environment
self.url = endpoint + "/context"
self.http_client = http_client
self.headers = {"X-API-Key": api_key,
"X-Application": application,
"X-Environment": environment,
"X-Application-Version": '0',
"X-Agent": "absmartly-python-sdk"}
self.query = {"application": application,
"environment": environment}
def get_context_data(self):
return self.executor.submit(self.send_get, self.url, self.query, {})
def send_get(self, url: str, query: dict, headers: dict):
response = self.http_client.get(url, query, headers)
if response.status_code // 100 == 2:
content = response.content
return self.deserializer.deserialize(content, 0, len(content))
return response.raise_for_status()
def publish(self, event: PublishEvent):
return self.executor.submit(
self.send_put,
self.url,
{},
self.headers,
event)
def send_put(self,
url: str,
query: dict,
headers: dict,
event: PublishEvent):
content = self.serializer.serialize(event)
response = self.http_client.put(url, query, headers, content)
if response.status_code // 100 == 2:
content = response.content
return self.deserializer.deserialize(content, 0, len(content))
return response.raise_for_status()
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/client.py | client.py |
from abc import abstractmethod
from sdk.json.context_data import ContextData
class ContextDataDeserializer:
@abstractmethod
def deserialize(self,
bytes_: bytearray,
offset: int,
length: int) -> ContextData:
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/context_data_deserializer.py | context_data_deserializer.py |
from typing import Optional
import jsons
from jsons import DeserializationError
from sdk.context_data_deserializer import ContextDataDeserializer
from sdk.json.context_data import ContextData
class DefaultContextDataDeserializer(ContextDataDeserializer):
def deserialize(self,
bytes_: bytes,
offset: int,
length: int) -> Optional[ContextData]:
try:
return jsons.loadb(bytes_, ContextData)
except DeserializationError:
return None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/default_context_data_deserializer.py | default_context_data_deserializer.py |
import requests as req
from requests.adapters import HTTPAdapter, Response
from urllib3 import Retry
from sdk.default_http_client_config import DefaultHTTPClientConfig
from sdk.http_client import HTTPClient
class DefaultHTTPClient(HTTPClient):
def __init__(self, config: DefaultHTTPClientConfig):
self.http_client = req.Session()
retry = Retry(total=config.max_retries, read=config.max_retries,
connect=config.max_retries,
backoff_factor=config.retry_interval,
status_forcelist=(502, 503),
allowed_methods=frozenset(['GET', 'POST']))
self.http_client.mount(
'http://', HTTPAdapter(max_retries=retry,
pool_maxsize=200,
pool_connections=20))
self.http_client.mount(
'https://', HTTPAdapter(max_retries=retry,
pool_maxsize=200,
pool_connections=20))
self.timeout = config.connection_timeout
self.request_timeout = config.connection_request_timeout
def get(self,
url: str,
query: dict,
headers: dict) -> Response:
return self.http_client.get(url,
params=query,
headers=headers,
timeout=(self.timeout,
self.request_timeout))
def put(self,
url: str,
query: dict,
headers: dict,
body: bytearray) -> Response:
headers.update({'Content-type': 'application/json'})
return self.http_client.put(url,
data=bytes(body),
params=query,
headers=headers,
timeout=(self.timeout,
self.request_timeout))
def post(self,
url: str,
query: dict,
headers: dict,
body: bytearray) -> Response:
headers.update({'Content-type': 'application/json'})
return self.http_client.put(url,
data=bytes(body),
params=query,
headers=headers,
timeout=(self.timeout,
self.request_timeout))
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/default_http_client.py | default_http_client.py |
from abc import abstractmethod
from concurrent.futures import Future
from typing import Optional
from sdk.json.context_data import ContextData
from sdk.json.publish_event import PublishEvent
class ContextEventHandler:
@abstractmethod
def publish(self, context, event: PublishEvent) -> \
Future[Optional[ContextData]]:
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/context_event_handler.py | context_event_handler.py |
from typing import Optional
import jsons
from jsons import DeserializationError
from sdk.audience_deserializer import AudienceDeserializer
class DefaultAudienceDeserializer(AudienceDeserializer):
def deserialize(self,
bytes_: bytes,
offset: int,
length: int) -> Optional[dict]:
try:
return jsons.loadb(bytes_, dict)
except DeserializationError:
return None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/default_audience_deserializer.py | default_audience_deserializer.py |
from concurrent.futures import Future
from typing import Optional
from sdk.client import Client
from sdk.context import Context
from sdk.context_event_handler import ContextEventHandler
from sdk.json.context_data import ContextData
from sdk.json.publish_event import PublishEvent
class DefaultContextEventHandler(ContextEventHandler):
def __init__(self, client: Client):
self.client = client
def publish(self,
context: Context,
event: PublishEvent) -> Future[Optional[ContextData]]:
return self.client.publish(event)
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/default_context_event_handler.py | default_context_event_handler.py |
from abc import abstractmethod
from sdk.json.publish_event import PublishEvent
class ContextEventSerializer:
@abstractmethod
def serialize(self, publish_event: PublishEvent) -> bytearray:
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/context_event_serializer.py | context_event_serializer.py |
from abc import abstractmethod
from concurrent.futures import Future
from typing import Optional
from sdk.json.context_data import ContextData
class ContextDataProvider:
@abstractmethod
def get_context_data(self) -> Future[Optional[ContextData]]:
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/context_data_provider.py | context_data_provider.py |
from abc import abstractmethod
from requests.adapters import Response
class HTTPClient:
@abstractmethod
def get(self,
url: str,
query: dict,
headers: dict) -> Response:
raise NotImplementedError
@abstractmethod
def put(self,
url: str,
query: dict,
headers: dict,
body: bytearray) -> Response:
raise NotImplementedError
@abstractmethod
def post(self,
url: str,
query: dict,
headers: dict,
body: bytearray) -> Response:
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/http_client.py | http_client.py |
from concurrent.futures import Future
from typing import Optional
from sdk.absmartly_config import ABSmartlyConfig
from sdk.audience_matcher import AudienceMatcher
from sdk.context import Context
from sdk.context_config import ContextConfig
from sdk.default_audience_deserializer import DefaultAudienceDeserializer
from sdk.default_context_data_provider import DefaultContextDataProvider
from sdk.default_context_event_handler import DefaultContextEventHandler
from sdk.default_variable_parser import DefaultVariableParser
from sdk.json.context_data import ContextData
from sdk.time.system_clock_utc import SystemClockUTC
class ABSmartly:
def __init__(self, config: ABSmartlyConfig):
self.context_data_provider = config.context_data_provider
self.context_event_handler = config.context_event_handler
self.context_event_logger = config.context_event_logger
self.variable_parser = config.variable_parser
self.audience_deserializer = config.audience_deserializer
if self.context_data_provider is None or \
self.context_event_handler is None:
self.client = config.client
if self.context_data_provider is None:
self.context_data_provider = \
DefaultContextDataProvider(self.client)
if self.context_event_handler is None:
self.context_event_handler = \
DefaultContextEventHandler(self.client)
if self.variable_parser is None:
self.variable_parser = DefaultVariableParser()
if self.audience_deserializer is None:
self.audience_deserializer = DefaultAudienceDeserializer()
def get_context_data(self) -> Future[Optional[ContextData]]:
return self.context_data_provider.get_context_data()
def create_context(self, config: ContextConfig) -> Context:
return Context(SystemClockUTC(),
config,
self.context_data_provider.get_context_data(),
self.context_data_provider,
self.context_event_handler,
self.context_event_logger,
self.variable_parser,
AudienceMatcher(self.audience_deserializer))
def create_context_with(self,
config: ContextConfig,
data: ContextData) -> Context:
future_data = Future()
future_data.set_result(data)
return Context(SystemClockUTC(), config,
future_data,
self.context_data_provider,
self.context_event_handler,
self.context_event_logger,
self.variable_parser,
AudienceMatcher(self.audience_deserializer))
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/absmartly.py | absmartly.py |
from typing import Optional
from sdk.audience_deserializer import AudienceDeserializer
from sdk.jsonexpr.json_expr import JsonExpr
class Result:
def __init__(self, result: bool):
self.result = result
class AudienceMatcher:
def __init__(self, deserializer: AudienceDeserializer):
self.deserializer = deserializer
self.json_expr = JsonExpr()
def evaluate(self, audience: str, attributes: dict) -> Optional[Result]:
bytes_arr = bytes(audience, encoding="utf-8")
audience_map = self.deserializer.deserialize(bytes_arr,
0,
len(bytes_arr))
if audience_map is not None:
if "filter" in audience_map:
fl = audience_map["filter"]
if type(fl) is dict or type(fl) is list:
expr = self.json_expr.evaluate_boolean_expr(fl,
attributes)
return Result(expr)
return None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/audience_matcher.py | audience_matcher.py |
from abc import abstractmethod
from typing import Optional
class VariableParser:
@abstractmethod
def parse(self,
context,
experiment_name: str,
variant_name: str,
variable_value: str) -> Optional[dict]:
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/variable_parser.py | variable_parser.py |
def put_uint32(buf: bytearray, offset: int, x: int):
buf[offset] = x & 0xff
buf[offset + 1] = (x >> 8) & 0xff
buf[offset + 2] = (x >> 16) & 0xff
buf[offset + 3] = (x >> 24) & 0xff
def get_uint32(buf: bytearray, offset: int):
return (buf[offset] & 0xff) \
| ((buf[offset + 1] & 0xff) << 8) \
| ((buf[offset + 2] & 0xff) << 16) \
| ((buf[offset + 3] & 0xff) << 24)
def get_uint24(buf: bytearray, offset: int):
return (buf[offset] & 0xff) \
| ((buf[offset + 1] & 0xff) << 8) \
| ((buf[offset + 2] & 0xff) << 16)
def get_uint16(buf: bytearray, offset: int):
return (buf[offset] & 0xff) \
| ((buf[offset + 1] & 0xff) << 8)
def get_uint8(buf: bytearray, offset: int):
return buf[offset] & 0xff
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/internal/buffers.py | buffers.py |
import sys as _sys
if _sys.version_info > (3, 0):
def xrange(a, b, c):
return range(a, b, c)
def xencode(x):
if isinstance(x, bytes) or isinstance(x, bytearray):
return x
else:
return x.encode()
else:
def xencode(x):
return x
del _sys
def digest(key, seed):
key = bytearray(xencode(key))
length = len(key)
nblocks = int(length / 4)
h1 = seed
c1 = 0xcc9e2d51
c2 = 0x1b873593
for block_start in xrange(0, nblocks * 4, 4):
k1 = key[block_start + 3] << 24 | \
key[block_start + 2] << 16 | \
key[block_start + 1] << 8 | \
key[block_start + 0]
k1 = (c1 * k1) & 0xFFFFFFFF
k1 = rotate_right(k1, 15)
k1 = (c2 * k1) & 0xFFFFFFFF
h1 ^= k1
h1 = rotate_right(h1, 13)
h1 = (h1 * 5 + 0xe6546b64) & 0xFFFFFFFF
tail_index = nblocks * 4
k1 = 0
tail_size = length & 3
if tail_size >= 3:
k1 ^= key[tail_index + 2] << 16
if tail_size >= 2:
k1 ^= key[tail_index + 1] << 8
if tail_size >= 1:
k1 ^= key[tail_index + 0]
if tail_size > 0:
k1 = (k1 * c1) & 0xFFFFFFFF
k1 = rotate_right(k1, 15)
k1 = (k1 * c2) & 0xFFFFFFFF
h1 ^= k1
unsigned_val = fmix(h1 ^ length)
if unsigned_val & 0x80000000 == 0:
return unsigned_val
else:
return -((unsigned_val ^ 0xFFFFFFFF) + 1)
def fmix(h: int):
h ^= h >> 16
h = (h * 0x85ebca6b) & 0xFFFFFFFF
h ^= h >> 13
h = (h * 0xc2b2ae35) & 0xFFFFFFFF
h ^= h >> 16
return h
def rotate_right(n, d):
return (n << d) | (n >> (32 - d)) & 0xFFFFFFFF
def to_signed32(n):
n = n & 0xffffffff
return (n ^ 0x80000000) - 0x80000000
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/internal/murmur32.py | murmur32.py |
import threading
from sdk.internal import murmur32, buffers
class VariantAssigner:
def __init__(self, unithash: bytearray):
self.unitHash_ = murmur32.digest(unithash, 0)
self.threadBuffer = threading.local()
self.threadBuffer.value = bytearray(12)
def assign(self, split: list, seed_hi: int, seed_lo: int):
prob = self.probability(seed_hi, seed_lo)
return self.choose_variant(split, prob)
@staticmethod
def choose_variant(split: list, prob: float):
cum_sum = 0.0
for index, item in enumerate(split):
cum_sum += item
if prob < cum_sum:
return index
return len(split) - 1
def probability(self, seed_hi: int, seed_lo: int):
buff = self.threadBuffer.value
buffers.put_uint32(buff, 0, seed_lo)
buffers.put_uint32(buff, 4, seed_hi)
buffers.put_uint32(buff, 8, self.unitHash_)
hashing = murmur32.digest(buff, 0)
return (hashing & 0xffffffff) * VariantAssigner.__normalizer
__normalizer = 1.0 / 0xffffffff
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/internal/variant_assigner.py | variant_assigner.py |
import threading
class ReadWriteLock:
def __init__(self):
self.w_lock = threading.RLock()
self.num_r_lock = threading.RLock()
self.num_r = 0
def acquire_read(self):
self.num_r_lock.acquire()
self.num_r += 1
if self.num_r == 1:
self.w_lock.acquire()
self.num_r_lock.release()
def release_read(self):
assert self.num_r > 0
self.num_r_lock.acquire()
self.num_r -= 1
if self.num_r == 0:
self.w_lock.release()
self.num_r_lock.release()
def acquire_write(self):
self.w_lock.acquire()
def release_write(self):
self.w_lock.release()
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/internal/lock/read_write_lock.py | read_write_lock.py |
from sdk.internal.lock.read_write_lock import ReadWriteLock
class Concurrency:
@staticmethod
def add_rw(lock: ReadWriteLock, lst: list, value: object):
try:
lock.acquire_write()
lst.append(value)
finally:
lock.release_write()
@staticmethod
def compute_if_absent_rw(lock: ReadWriteLock,
mp: dict,
key: object, computer):
try:
lock.acquire_read()
if key in mp:
return mp[key]
finally:
lock.release_read()
try:
lock.acquire_write()
if key in mp:
return mp[key]
new_value = computer(key)
mp[key] = new_value
return new_value
finally:
lock.release_write()
@staticmethod
def get_rw(lock: ReadWriteLock, mp: dict, key: object):
try:
lock.acquire_write()
if key not in mp:
return None
else:
return mp[key]
finally:
lock.release_write()
@staticmethod
def put_rw(lock: ReadWriteLock, mp: dict, key: object, value: object):
try:
lock.acquire_write()
previous = None
if key in mp:
previous = mp[key]
mp[key] = value
return previous
finally:
lock.release_write()
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/internal/lock/concurrency.py | concurrency.py |
import threading
class AtomicInt(object):
def __init__(self):
self.value = 0
self._lock = threading.Lock()
def set(self, value: int):
with self._lock:
self.value = value
def increment(self):
with self._lock:
self.value += 1
def increment_and_get(self):
with self._lock:
self.value += 1
return self.value
def get(self):
with self._lock:
return self.value
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/internal/lock/atomic_int.py | atomic_int.py |
import threading
class AtomicBool(object):
def __init__(self):
self.value = False
self._lock = threading.Lock()
def set(self, value: bool):
with self._lock:
self.value = value
def compare_and_set(self, expected_value: bool, new_value: bool):
with self._lock:
result = expected_value == self.value
if result:
self.value = new_value
return result
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/internal/lock/atomic_bool.py | atomic_bool.py |
import decimal
from decimal import Decimal
from sdk.jsonexpr.evaluator import Evaluator
def compare_to(this, that):
if this > that:
return 1
elif this < that:
return -1
elif this == that:
return 0
class ExprEvaluator(Evaluator):
def __init__(self, operators: dict, vars: dict):
self.vars = vars
self.operators = operators
def evaluate(self, expr: object):
if type(expr) is list:
return self.operators["and"].evaluate(self, expr)
elif type(expr) is dict:
for key, value in expr.items():
if key not in self.operators:
return None
op = self.operators[key]
if op is not None:
res = op.evaluate(self, value)
return res
break
return None
def boolean_convert(self, x: object):
if type(x) is bool:
return x
elif type(x) is str:
return x != "False" and x != "0" and x != ""
elif type(x) is int or type(x) is float or type(x) is complex:
return x != 0
return x is not None
def number_convert(self, x: object):
if type(x) is int or type(x) is float or type(x) is complex:
return x
elif type(x) is bool:
return 1.0 if x is True else 0.0
elif type(x) is str:
try:
return Decimal(x)
except decimal.InvalidOperation:
return None
return None
def string_convert(self, x: object):
if type(x) is str:
return x
elif type(x) is bool:
return str(x)
elif type(x) is int or type(x) is float or type(x) is complex:
return str(x)
return None
def extract_var(self, path: str):
frags = path.split("/")
target = self.vars if self.vars is not None else {}
for frag in frags:
value = None
if type(target) is list:
try:
value = target[int(frag)]
except BaseException as err:
print(err)
elif type(target) is dict:
if frag not in target:
return None
value = target[frag]
if value is not None:
target = value
continue
return None
return target
def compare(self, lhs: object, rhs: object):
if lhs is None:
return 0 if rhs is None else None
elif rhs is None:
return None
if type(lhs) is int or type(lhs) is float or type(lhs) is complex:
rvalue = self.number_convert(rhs)
if rvalue is not None:
return compare_to(lhs, rvalue)
elif type(lhs) is str:
rvalue = self.string_convert(rhs)
if rvalue is not None:
return compare_to(lhs, rvalue)
elif type(lhs) is bool:
rvalue = self.boolean_convert(rhs)
if rvalue is not None:
return compare_to(lhs, rvalue)
elif type(lhs) == type(rhs) and lhs == rhs:
return 0
return None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/expr_evaluator.py | expr_evaluator.py |
from abc import abstractmethod
class Evaluator:
@abstractmethod
def evaluate(self, expr: object):
raise NotImplementedError
@abstractmethod
def boolean_convert(self, x: object):
raise NotImplementedError
@abstractmethod
def number_convert(self, x: object):
raise NotImplementedError
@abstractmethod
def string_convert(self, x: object):
raise NotImplementedError
@abstractmethod
def extract_var(self, path: str):
raise NotImplementedError
# returns
# -1 -> lesser, 0 -> equals, 1 -> greater, null -> undefined comparison
@abstractmethod
def compare(self, lhs: object, rhs: object):
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/evaluator.py | evaluator.py |
from sdk.jsonexpr.expr_evaluator import ExprEvaluator
from sdk.jsonexpr.operators.and_combinator import AndCombinator
from sdk.jsonexpr.operators.equals_operator import EqualsOperator
from sdk.jsonexpr.operators.greater_than_operator import GreaterThanOperator
from sdk.jsonexpr.operators.greater_than_or_equal_operator \
import GreaterThanOrEqualOperator
from sdk.jsonexpr.operators.in_operator import InOperator
from sdk.jsonexpr.operators.less_than_operator import LessThanOperator
from sdk.jsonexpr.operators.less_than_or_equal_operator \
import LessThanOrEqualOperator
from sdk.jsonexpr.operators.match_operator import MatchOperator
from sdk.jsonexpr.operators.not_operator import NotOperator
from sdk.jsonexpr.operators.null_operator import NullOperator
from sdk.jsonexpr.operators.or_combinator import OrCombinator
from sdk.jsonexpr.operators.value_operator import ValueOperator
from sdk.jsonexpr.operators.var_operator import VarOperator
class JsonExpr:
operators = {
"and": AndCombinator(),
"or": OrCombinator(),
"value": ValueOperator(),
"var": VarOperator(),
"null": NullOperator(),
"not": NotOperator(),
"in": InOperator(),
"match": MatchOperator(),
"eq": EqualsOperator(),
"gt": GreaterThanOperator(),
"gte": GreaterThanOrEqualOperator(),
"lt": LessThanOperator(),
"lte": LessThanOrEqualOperator()
}
def evaluate_boolean_expr(self, expr: object, var: dict):
evaluator = ExprEvaluator(self.operators, var)
return evaluator.boolean_convert(evaluator.evaluate(expr))
def evaluate_expr(self, expr: object, var: dict):
evaluator = ExprEvaluator(self.operators, var)
return evaluator.evaluate(expr)
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/json_expr.py | json_expr.py |
from abc import abstractmethod
from sdk.jsonexpr.evaluator import Evaluator
class Operator:
@abstractmethod
def evaluate(self, evaluator: Evaluator, args: object):
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operator.py | operator.py |
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operators.boolean_combinator import BooleanCombinator
class OrCombinator(BooleanCombinator):
def combine(self, evaluator: Evaluator, args: list):
for item in args:
if evaluator.boolean_convert(evaluator.evaluate(item)):
return True
return len(args) == 0
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/or_combinator.py | or_combinator.py |
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operators.unary_operator import UnaryOperator
class NullOperator(UnaryOperator):
def unary(self, evaluator: Evaluator, arg: object):
return arg is None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/null_operator.py | null_operator.py |
import re
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operators.binary_operator import BinaryOperator
class MatchOperator(BinaryOperator):
def binary(self, evaluator: Evaluator, lhs: object, rhs: object):
text = evaluator.string_convert(lhs)
if text is not None:
pattern = evaluator.string_convert(rhs)
if pattern is not None:
compiled = re.compile(pattern)
return bool(compiled.match(text))
return None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/match_operator.py | match_operator.py |
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operators.unary_operator import UnaryOperator
class NotOperator(UnaryOperator):
def unary(self, evaluator: Evaluator, arg: object):
evaluator.boolean_convert(arg)
return evaluator.boolean_convert(arg) is not True
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/not_operator.py | not_operator.py |
from abc import abstractmethod
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operator import Operator
class BinaryOperator(Operator):
def evaluate(self, evaluator: Evaluator, args: object):
if type(args) is list:
lhs = evaluator.evaluate(args[0]) if len(args) > 0 else None
if lhs is not None:
rhs = evaluator.evaluate(args[1]) if len(args) > 1 else None
if rhs is not None:
return self.binary(evaluator, lhs, rhs)
return None
@abstractmethod
def binary(self, evaluator: Evaluator, lhs: object, rhs: object):
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/binary_operator.py | binary_operator.py |
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operators.binary_operator import BinaryOperator
class LessThanOrEqualOperator(BinaryOperator):
def binary(self, evaluator: Evaluator, lhs: object, rhs: object):
result = evaluator.compare(lhs, rhs)
return result <= 0 if result is not None else None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/less_than_or_equal_operator.py | less_than_or_equal_operator.py |
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operators.binary_operator import BinaryOperator
class InOperator(BinaryOperator):
def binary(self, evaluator: Evaluator, haystack: object, needle: object):
if type(haystack) is list:
for item in haystack:
if evaluator.compare(item, needle) == 0:
return True
return False
elif type(haystack) is str or type(haystack) is dict:
needle_str = evaluator.string_convert(needle)
return needle_str is not None and needle_str in haystack
else:
return None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/in_operator.py | in_operator.py |
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operators.binary_operator import BinaryOperator
class EqualsOperator(BinaryOperator):
def binary(self, evaluator: Evaluator, lhs: object, rhs: object):
result = evaluator.compare(lhs, rhs)
if result is not None:
return result == 0
else:
return None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/equals_operator.py | equals_operator.py |
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operators.binary_operator import BinaryOperator
class GreaterThanOrEqualOperator(BinaryOperator):
def binary(self, evaluator: Evaluator, lhs: object, rhs: object):
result = evaluator.compare(lhs, rhs)
return result >= 0 if result is not None else None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/greater_than_or_equal_operator.py | greater_than_or_equal_operator.py |
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operator import Operator
class VarOperator(Operator):
def evaluate(self, evaluator: Evaluator, path: object):
if type(path) is dict:
if "path" not in path:
path = None
else:
path = path["path"]
return evaluator.extract_var(path) if type(path) is str else None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/var_operator.py | var_operator.py |
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operators.binary_operator import BinaryOperator
class LessThanOperator(BinaryOperator):
def binary(self, evaluator: Evaluator, lhs: object, rhs: object):
result = evaluator.compare(lhs, rhs)
return result < 0 if result is not None else None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/less_than_operator.py | less_than_operator.py |
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operators.binary_operator import BinaryOperator
class GreaterThanOperator(BinaryOperator):
def binary(self, evaluator: Evaluator, lhs: object, rhs: object):
result = evaluator.compare(lhs, rhs)
return result > 0 if result is not None else None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/greater_than_operator.py | greater_than_operator.py |
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operator import Operator
class ValueOperator(Operator):
def evaluate(self, evaluator: Evaluator, value: object):
return value
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/value_operator.py | value_operator.py |
from abc import abstractmethod
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operator import Operator
class BooleanCombinator(Operator):
def evaluate(self, evaluator: Evaluator, args: object):
if type(args) is list:
return self.combine(evaluator, args)
else:
return None
@abstractmethod
def combine(self, evaluator: Evaluator, args: list):
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/boolean_combinator.py | boolean_combinator.py |
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operators.boolean_combinator import BooleanCombinator
class AndCombinator(BooleanCombinator):
def combine(self, evaluator: Evaluator, exprs: list):
for item in exprs:
if not evaluator.boolean_convert(evaluator.evaluate(item)):
return False
return True
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/and_combinator.py | and_combinator.py |
from abc import abstractmethod
from sdk.jsonexpr.evaluator import Evaluator
from sdk.jsonexpr.operator import Operator
class UnaryOperator(Operator):
def evaluate(self, evaluator: Evaluator, args: object):
arg = evaluator.evaluate(args)
return self.unary(evaluator, arg)
@abstractmethod
def unary(self, evaluator: Evaluator, arg: object):
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/jsonexpr/operators/unary_operator.py | unary_operator.py |
import time
from sdk.time.clock import Clock
class SystemClockUTC(Clock):
def millis(self):
return round(time.mktime(time.gmtime()) * 1000)
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/time/system_clock_utc.py | system_clock_utc.py |
from abc import abstractmethod
class Clock:
@abstractmethod
def millis(self):
raise NotImplementedError
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/time/clock.py | clock.py |
from sdk.time.clock import Clock
class FixedClock(Clock):
def __init__(self, millis: int):
self.value = millis
def millis(self):
return self.value
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/time/fixed_clock.py | fixed_clock.py |
import typing
from sdk.json.experiement_application import ExperimentApplication
from sdk.json.experiment_variant import ExperimentVariant
class Experiment:
id: typing.Optional[int] = 0
name: typing.Optional[str] = None
unitType: typing.Optional[str] = None
iteration: typing.Optional[int] = 0
seedHi: typing.Optional[int] = 0
seedLo: typing.Optional[int] = 0
split: typing.Optional[list[float]] = None
trafficSeedHi: typing.Optional[int] = 0
trafficSeedLo: typing.Optional[int] = 0
trafficSplit: typing.Optional[list[float]] = None
fullOnVariant: typing.Optional[int] = 0
applications: typing.Optional[list[ExperimentApplication]] = None
variants: typing.Optional[list[ExperimentVariant]] = None
audienceStrict: typing.Optional[bool] = False
audience: typing.Optional[str] = None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/json/experiment.py | experiment.py |
import typing
class ExperimentVariant:
name: typing.Optional[str] = None
config: typing.Optional[str] = None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/json/experiment_variant.py | experiment_variant.py |
import typing
from sdk.json.experiment import Experiment
class ContextData:
experiments: typing.Optional[list[Experiment]] = None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/json/context_data.py | context_data.py |
import typing
class GoalAchievement:
name: typing.Optional[str] = None
achievedAt: typing.Optional[int] = 0
properties: {}
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/json/goal_achievement.py | goal_achievement.py |
import typing
from sdk.json.attribute import Attribute
from sdk.json.exposure import Exposure
from sdk.json.goal_achievement import GoalAchievement
from sdk.json.unit import Unit
class PublishEvent:
hashed: typing.Optional[bool] = False
units: typing.Optional[list[Unit]] = None
publishedAt: typing.Optional[int] = 0
exposures: typing.Optional[list[Exposure]] = None
goals: typing.Optional[list[GoalAchievement]] = None
attributes: typing.Optional[list[Attribute]] = None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/json/publish_event.py | publish_event.py |
import typing
class Unit:
type: typing.Optional[str] = None
uid: typing.Optional[str] = None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/json/unit.py | unit.py |
import typing
class Attribute:
name: typing.Optional[str] = None
value: {}
setAt: typing.Optional[int] = 0
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/json/attribute.py | attribute.py |
import typing
class ExperimentApplication:
name: typing.Optional[str] = None
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/json/experiement_application.py | experiement_application.py |
import typing
class Exposure:
id: typing.Optional[int] = 0
name: typing.Optional[str] = None
unit: typing.Optional[str] = None
variant: typing.Optional[int] = 0
exposedAt: typing.Optional[int] = 0
assigned: typing.Optional[bool] = False
eligible: typing.Optional[bool] = False
overridden: typing.Optional[bool] = False
fullOn: typing.Optional[bool] = False
custom: typing.Optional[bool] = False
audienceMismatch: typing.Optional[bool] = False
| ABSmartly | /ABSmartly-0.1.4-py3-none-any.whl/sdk/json/exposure.py | exposure.py |
"""This module contains ABC class
Author: Jalil Nourisa
"""
import time
import numpy as np
import os
from pprogress import ProgressBar
import json
from diversipy import lhd_matrix
from diversipy import transform_spread_out
import plotly.graph_objects as go
import plotly.offline
class clock:
start_t = 0
end_t = 0
@staticmethod
def start():
clock.start_t = time.time()
@staticmethod
def end():
clock.end_t = time.time()
print('Elapsed time: ',clock.end_t - clock.start_t)
def box_plot(scalled_posteriors,path_to_save):
fig = go.Figure()
ii = 0
for key,value in scalled_posteriors.items():
fig.add_trace(go.Box(
y=value,
name=key,
boxpoints='all',
jitter=0,
marker_size=5,
whiskerwidth=0.2,
line_width=2)
)
ii += 1
fig.update_layout(yaxis=dict(
# autorange=True,
# showgrid=False,
dtick=0.2,
zeroline = False,range= [-0.1,1.1]
),
margin=dict(
l=40,
r=30,
b=80,
t=100
),
showlegend=False,
paper_bgcolor='rgb(243, 243, 243)',
plot_bgcolor='rgb(243, 243, 243)',
)
fig.write_html(path_to_save+'/box_plot.html')
class ABC:
""" Contains essential function for ABC
Attributes:
comm : MPI communication object
rank (int): ID of each processor
free_params (dict): Content of free parameteres including their tags and bounds
free_params_bounds (narray): Bounds for each free parameter
free_params_keys (array): Names of free parameters
param_sets (list): The list of pararameter sets created during sampling
settings (dict): Settings of the analysis
"""
def __init__(self,free_params,settings):
"""Generates ABM object. Receives free paramatere lists and settings.
Args:
free_params (dict): Content of free parameteres including their tags and bounds
settings (dict): Settings of the analysis
"""
self.settings = settings
if self.settings["MPI_flag"]:
from mpi4py import MPI
self.comm = MPI.COMM_WORLD
self.rank = self.comm.Get_rank()
else:
self.rank = 0
if self.rank == 0:
print("Number of CPUs assigned: ",self.comm.Get_size())
print("Sample number: ",settings['sample_n'])
self.free_params = free_params
self.free_params_keys = list(free_params.keys())
self.free_params_bounds = list(free_params.values())
print("The list of free parameters: ",self.free_params_keys)
try:
os.makedirs(self.settings["output_path"])
except OSError:
print("Creation of the directory %s failed" % self.settings["output_path"])
else:
print("Successfully created the directory %s " % self.settings["output_path"])
def sample(self):
"""Conducts
- Uniform sampling from n-dimensional space of parameters within the bounds given as ABC.free_params.
- Creates parameter sets and outputs them
"""
if self.rank == 0:
# python version > 3.6
non_scalled_samples = transform_spread_out(lhd_matrix(self.settings["sample_n"], len(self.free_params))).transpose()
scaled_samples = []
ii = 0
for bounds in self.free_params_bounds:
low = bounds[0]
high = bounds[1]
pre_samples_param = non_scalled_samples[ii]
samples_param = list(map(lambda x:x*(high-low)+low ,pre_samples_param))
scaled_samples.append(samples_param)
ii+=1
priors = {key:value for key,value in zip(self.free_params_keys,scaled_samples)}
samples = np.array(scaled_samples).transpose()
np.savetxt(self.settings["output_path"]+'/samples.txt', samples, fmt='%f')
##### create parameter sets
param_sets = []
for sample in samples:
param_set = {}
for i in range(len(sample)):
sample_p = sample[i]
key = self.free_params_keys[i]
param_set.update({key:sample_p})
param_sets.append(param_set)
with open(self.settings["output_path"]+'/param_sets.json','w') as file:
file.write(json.dumps({"param_sets":param_sets}))
self.param_sets = param_sets
def run(self):
"""Runs the user given model for the parameter sets.
"""
if self.rank == 0:
# reload
with open(self.settings["output_path"]+'/param_sets.json') as file:
self.param_sets = json.load(file)["param_sets"]
CPU_n = self.comm.Get_size()
shares = np.ones(CPU_n,dtype=int)*int(len(self.param_sets)/CPU_n)
plus = len(self.param_sets)%CPU_n
for i in range(plus):
shares[i]+=1
portions = []
for i in range(CPU_n):
start = i*shares[i-1]
end = start + shares[i]
portions.append([start,end])
paramsets = self.param_sets
else:
portions = None
paramsets = None
portion = self.comm.scatter(portions,root = 0)
paramsets = self.comm.bcast(paramsets,root = 0)
def run_model(start,end):
pb = ProgressBar(end-start)
distances = []
for i in range(start,end):
distance = self.settings["run_func"](paramsets[i],self.settings["args"])
distances.append(distance)
pb.update()
pb.done()
return distances
distances_perCore = run_model(portion[0],portion[1])
distances_stacks = self.comm.gather(distances_perCore,root = 0)
if self.rank == 0:
distances = np.array([])
for stack in distances_stacks:
distances = np.concatenate([distances,stack],axis = 0)
np.savetxt(self.settings["output_path"]+'/distances.txt',np.array(distances),fmt='%s')
def postprocessing(self):
"""Conducts post processing tasks. Currently it extracts top fits and posteriors and also plots scaled posteriors.
"""
if self.rank == 0:
# reload
distances = []
with open(self.settings["output_path"]+'/distances.txt') as file:
for line in file:
line.strip()
try:
value = float(line)
except:
value = None
distances.append(value)
samples = np.loadtxt(self.settings["output_path"]+'/samples.txt', dtype=float)
# top fitnesses
top_n = self.settings["top_n"]
fitness_values = np.array([])
for item in distances:
if item == None:
fitness = 0
else:
fitness = 1 - item
fitness_values = np.append(fitness_values,fitness)
top_ind = np.argpartition(fitness_values, -top_n)[-top_n:]
top_fitess_values = fitness_values[top_ind]
np.savetxt(self.settings["output_path"]+'/top_fitness.txt',top_fitess_values,fmt='%f')
# extract posteriors
top_fit_samples = samples[top_ind].transpose()
try:
posteriors = {key:list(value) for key,value in zip(self.free_params_keys,top_fit_samples)}
except TypeError:
posteriors = {self.free_params_keys[0]:list(top_fit_samples)}
with open(self.settings["output_path"]+'/posterior.json', 'w') as file:
file.write(json.dumps({'posteriors': posteriors}))
# box plot
scalled_posteriors = {}
for key,values in posteriors.items():
min_v = self.free_params[key][0]
max_v = self.free_params[key][1]
scalled = list(map(lambda x: (x-min_v)/(max_v-min_v),values))
scalled_posteriors.update({key:scalled})
box_plot(scalled_posteriors,self.settings["output_path"])
settings = 0
comm = 0
rank = 0
param_sets = 0
| ABayesianC | /ABayesianC-1.0.7-py3-none-any.whl/ABC/tools.py | tools.py |
__version__ = "1.0.7" | ABayesianC | /ABayesianC-1.0.7-py3-none-any.whl/ABC/__init__.py | __init__.py |
# ABlooper
Antibodies are a key component of the immune system and have been extensively used as biotherapeutics. Accurate knowledge of their structure is central to understanding their antigen binding function. The key area for antigen binding and the main area of structural variation in antibodies is concentrated in the six complementarity determining regions (CDRs), with the most important for binding and most variable being the CDR-H3 loop. The sequence and structural variability of CDR-H3 make it particularly challenging to model. Recently deep learning methods have offered a step change in our ability to predict protein structures. In this work we present ABlooper, an end-to-end equivariant deep-learning based CDR loop structure prediction tool. ABlooper rapidly predicts the structure of CDR loops with high accuracy and provides a confidence estimate for each of its predictions. On the models of the Rosetta Antibody Benchmark, ABlooper makes predictions with an average CDR-H3 RMSD of 2.49Å, which drops to 2.05Å when considering only its 76% most confident predictions.
## Install
To install via PyPi:
```bash
$ pip install ABlooper
```
To download and install the latest version from github:
```bash
$ git clone https://github.com/brennanaba/ABlooper.git
$ pip install ABlooper/
```
This package requires PyTorch. If you do not already have PyTorch installed, you can do so following these <a href="https://pytorch.org/get-started/locally/">instructions</a>.
Either OpenMM or PyRosetta are required for the optional refinement and side-chain prediction steps.
OpenMM and pdbfixer can be installed via conda using:
```bash
$ conda install -c conda-forge openmm pdbfixer
```
If you want to use PyRosetta for refinement and do not have it installed, it can be obtained from <a href="https://www.pyrosetta.org/">here</a>.
## Usage
To use ABlooper, you will need an IMGT numbered antibody model. If you do not already have an antibody model, you can generate one using <a href="http://opig.stats.ox.ac.uk/webapps/newsabdab/sabpred/abodybuilder/">ABodyBuilder</a>.
To remodel the CDRs of an existing antibody model using the command line:
```bash
$ ABlooper my_antibody_model.pdb --output ABlooper_model.pdb --heavy_chain H --light_chain L
```
To remodel the CDRs of an existing model using the python API:
```python
from ABlooper import CDR_Predictor
input_path = "my_antibody_model.pdb"
output_path = "ABlooper_model.pdb"
pred = CDR_Predictor(input_path, chains = ("H", "L"))
pred.write_predictions_in_pdb_format(output_path)
```
Two pretrained models are available. The default predicts CDRs defined by the Chothia numbering scheme (This is the model described in the paper). To get predictions of CDRs defined by the IMGT numbering scheme use:
```python
pred = CDR_Predictor(input_path, chains = ("H", "L"), model = 'imgt')
pred.write_predictions_in_pdb_format(output_path)
```
I would recommend using the command line if you just want a quick antibody model. If speed is a priority, it is probably best to just use the trained pytorch model. The python class will work best if you want to incorporate CDR prediction into a pipeline or access other details such as confidence score or RMSD to original model. Both of which can be obtained as follows:
```python
rmsd_from_input = pred.calculate_BB_rmsd_wrt_input()
confidence_score = pred.decoy_diversity
```
I have been made aware that ABlooper will occasionally generate abnormal geometries. To fix this, and to generate side-chains you can do (Only works if you have PyRosetta or OpenMM installed):
```bash
$ ABlooper my_antibody_model.pdb --output ABlooper_model.pdb --model chothia --side_chains
```
As a default this will use OpenMM if it is installed.
## Citing this work
The code and data in this package is based on the following paper <a href="https://academic.oup.com/bioinformatics/article/38/7/1877/6517780">ABlooper</a>. If you use it, please cite:
```tex
@article{10.1093/bioinformatics/btac016,
author = {Abanades, Brennan and Georges, Guy and Bujotzek, Alexander and Deane, Charlotte M},
title = {ABlooper: fast accurate antibody CDR loop structure prediction with accuracy estimation},
journal = {Bioinformatics},
volume = {38},
number = {7},
pages = {1877-1880},
year = {2022},
month = {01},
issn = {1367-4803},
doi = {10.1093/bioinformatics/btac016},
url = {https://doi.org/10.1093/bioinformatics/btac016},
```
| ABlooper | /ABlooper-1.1.2.tar.gz/ABlooper-1.1.2/README.md | README.md |
from setuptools import setup, find_packages
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='ABlooper',
version='1.1.2',
description='Set of functions to predict CDR structure',
license='BSD 3-clause license',
maintainer='Brennan Abanades',
long_description=long_description,
long_description_content_type='text/markdown',
maintainer_email='brennan.abanadeskenyon@stx.ox.ac.uk',
include_package_data=True,
packages=find_packages(include=('ABlooper', 'ABlooper.*')),
entry_points={'console_scripts': ['ABlooper=ABlooper.command_line:main']},
install_requires=[
'numpy',
'einops>=0.3',
'torch>=1.6',
],
)
| ABlooper | /ABlooper-1.1.2.tar.gz/ABlooper-1.1.2/setup.py | setup.py |
# What is this?
A Python [Flask](http://flask.pocoo.org/)-based library for building [HipChat Connect add-ons](https://www.hipchat.com/docs/apiv2/addons). This is an early, alpha-quality release,
but can be used to build real add-ons today. Future versions may include backward-incompatible changes.
# Getting started
For a simple alternative to the following set up instructions, you may consider using the [Vagrant starter project](https://bitbucket.org/atlassianlabs/ac-flask-hipchat-vagrant) to get up and running quickly.
## Dependencies
In addition to Python 2.7 or later, `ac-flask-hipchat` expects Redis to be available for temporary persistence of
authentication tokens, and MongoDB for a permanent data store.
## A first add-on
Writing basic HipChat add-ons with `ac-flask-hipchat` requires very little code to get up and running. Here's an
example of a simple yet complete add-on, in two files:
### web.py
```
from ac_flask.hipchat import Addon, room_client, sender
from flask import Flask
addon = Addon(app=Flask(__name__),
key="ac-flask-hipchat-greeter",
name="HipChat Greeter Example Add-on",
allow_room=True,
scopes=['send_notification'])
@addon.webhook(event="room_enter")
def room_entered():
room_client.send_notification('hi: %s' % sender.name)
return '', 204
if __name__ == '__main__':
addon.run()
```
### requirements.txt
```
AC-Flask-HipChat
```
## Running the server
To run this example yourself, add these files to a new directory and run the following commands there:
```
$ pip install -r requirements.txt
$ python web.py
```
If the server started as expected, you'll see something like the following emitted:
```
--------------------------------------
Public descriptor base URL: http://localhost:5000
--------------------------------------
INFO:werkzeug: * Running on http://127.0.0.1:5000/
INFO:werkzeug: * Restarting with reloader
```
To double check that the server is running correctly, try requesting it's add-on descriptor:
```
$ curl http://localhost:5000/
```
A successful request will return a HipChat descriptor for the add-on.
## Preparing the add-on for installation
Now that you have a server running, you'll want to try it somehow. The next step is different depending on whether
you're going to be developing with hipchat.com or a private HipChat instance being hosted behind your corporate firewall.
### Developing with HipChat.com
The easiest way to test with hipchat.com while developing on your local machine is to use [ngrok](https://ngrok.com).
Download and install it now if you need to -- it's an amazing tool that will change the way you develop and share web applications.
Start the ngrok tunnel in another terminal window or if using the [Vagrant starter project](https://bitbucket.org/atlassianlabs/ac-flask-hipchat-vagrant),
you should already have ngrok running, and the URL should be printed to the screen when starting the VM. For the
purposes of this tutorial, we'll assume your domain is `https://asdf123.ngrok.com`.
While ngrok will forward both HTTP and HTTPS, for the protection of you and your HipChat group members, you should
always use HTTPS when running your add-on on the public internet.
### Developing with a private server
To install your add-on on a private HipChat server, both the add-on server and HipChat server need to be able to connect
to each other via HTTP or HTTPS on your local network. Simply determine an HTTP url that your HipChat server can use to
connect to your locally running add-on, and use that as the value of your "local base url" needed by the Installation step.
If all goes well, you won't have to change anything from the defaults, as `ac-flask-hipchat` will simply attempt to
use the OS's hostname to build the local base url, which may already be good enough for your private network.
## Installation
### Configuring the add-on's local base url
Now, we need to tell the add-on server where it's running so that it can successfully be installed. By default,
it'll assume your local computer name, but for installation into HipChat, especially if using ngrok,
you'll likely want to set it explicitly.
You can do that by setting the `AC_BASE_URL` environment variable when you start the server:
```
$ AC_BASE_URL=https://asdf123.ngrok.com python web.py
```
When properly configured, you'll see the server report the new local base url when it starts up:
```
--------------------------------------
Public descriptor base URL: https://asdf123.ngrok.com
--------------------------------------
INFO:werkzeug: * Running on http://127.0.0.1:5000/
INFO:werkzeug: * Restarting with reloader
```
__Note__: by signing up for an ngrok account, you can specify a generally stable, custom subdomain for even easier
iterative development. See [ngrok](http://ngrok.com) for more information.
### Manually installing the add-on using HipChat's admin UI
To install your add-on into HipChat, you have to register your addon's descriptor.
HipChat add-ons can operate inside a room or within the entire account. When developing, you should probably register
your add-on inside a room you've created just for testing. Also, you can only register add-ons inside a room where you
are an administrator.
To register your add-on descriptor, navigate to the rooms administration page at
`https://www.hipchat.com/rooms` (or whatever url your private server is running at,
if appropriate). Then select one of your rooms in the list. In the following page, select `Integrations` in the
sidebar, and then click the "Build and install your own integration" link at the bottom of the page:

Paste your descriptor url in the `Integration URL` field of the opened dialog and then click `Add integration`. This
will initiate the installation of your add-on for that room.
# Library Features
This library provides help with many aspects of add-on development, such as:
* Choice of programmatic HipChat add-on descriptor builder or providing a full or partial descriptor object literal
* High-level conveniences for mounting webhook handlers and configuration pages
* A REST API client with built-in OAuth2 token acquisition and refresh
* JWT authentication validation, refresh, and token generation for web UI routes (e.g. the `configurable` capability)
See `test.py` for a very simple example add-on.
### Authenticating requests from the iframe to the add-on
Add-ons typically can't use sessions, because browsers treat cookies set by the add-on as third-party cookies.
You can still make an authenticated call to an endpoint in your add-on, however:
Say there is an endpoint like this:
```
@addon.route(rule='/data')
@addon.json_output
def data():
return {'some': 'data'}
```
You want to call this endpoint from the iframe with the full authentication context. This can be done by rendering
a token into the iframe:
```
@addon.webpanel(key='webpanel.key', name='Panel')
def web_panel():
token = tenant.sign_jwt(sender.id)
return render_template('panel.html', token=token)
```
The template can then render the token into the desired location:
```
var url = '/data?signed_request={{ token }}'
```
or
```
<meta name='token' content='{{ token }}'>
```
You can also include the full context of the original request from HipChat by using:
```
token = tenant.sign_jwt(sender.id, {
'context': dict(context)
})
``` | AC-Flask-HipChat | /AC-Flask-HipChat-0.2.12.tar.gz/AC-Flask-HipChat-0.2.12/readme.md | readme.md |
"""
AC-Flask-HipChat
-------------
A library to help write a Flask-based HipChat add-on
"""
from setuptools import setup
setup(
name='AC-Flask-HipChat',
version='0.2.12',
url='https://bitbucket.org/atlassianlabs/ac-flask-hipchat',
license='APLv2',
author='Don Brown',
author_email='mrdon@twdata.org',
description='Atlassian Connect library based on Flask for HipChat',
long_description=__doc__,
packages=['ac_flask', 'ac_flask.hipchat'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'pymongo',
'redis',
'requests',
'PyJWT'
],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| AC-Flask-HipChat | /AC-Flask-HipChat-0.2.12.tar.gz/AC-Flask-HipChat-0.2.12/setup.py | setup.py |
import json
import logging
from ac_flask.hipchat.events import events
from ac_flask.hipchat.db import mongo, redis
from .tenant import Tenant
from flask import request
import requests
_log = logging.getLogger(__name__)
def _invalid_install(msg):
_log.error("Installation failed: %s" % msg)
return msg, 400
def init(addon, allow_global, allow_room, send_events=True, db_name='clients', require_group_id=False):
# noinspection PyUnusedLocal
@addon.app.route('/addon/installable', methods=['POST'])
def on_install():
clients = mongo[db_name]
data = json.loads(request.data)
if not data.get('roomId', None) and not allow_global:
return _invalid_install("This add-on can only be installed in individual rooms. Please visit the " +
"'Add-ons' link in a room's administration area and install from there.")
if data.get('roomId', None) and not allow_room:
return _invalid_install("This add-on cannot be installed in an individual room. Please visit the " +
"'Add-ons' tab in the 'Group Admin' area and install from there.")
_log.info("Retrieving capabilities doc at %s" % data['capabilitiesUrl'])
capdoc = requests.get(data['capabilitiesUrl'], timeout=10).json()
if capdoc['links'].get('self', None) != data['capabilitiesUrl']:
return _invalid_install("The capabilities URL %s doesn't match the resource's self link %s" %
(data['capabilitiesUrl'], capdoc['links'].get('self', None)))
client = Tenant(data['oauthId'], data['oauthSecret'], room_id=data.get('roomId', None), capdoc=capdoc)
try:
session = client.get_token(redis, token_only=False,
scopes=addon.descriptor['capabilities']['hipchatApiConsumer']['scopes'])
except Exception as e:
_log.warn("Error validating installation by receiving token: %s" % e)
return _invalid_install("Unable to retrieve token using the new OAuth information")
_log.info("session: %s" % json.dumps(session))
if require_group_id and int(require_group_id) != int(session['group_id']):
_log.error("Attempted to install for group %s when group %s is only allowed" %
(session['group_id'], require_group_id))
return _invalid_install("Only group %s is allowed to install this add-on" % require_group_id)
client.group_id = session['group_id']
client.group_name = session['group_name']
clients.remove(client.id_query)
clients.insert(client.to_map())
if send_events:
events.fire_event('install', {"client": client})
return '', 201
# noinspection PyUnusedLocal
@addon.app.route('/addon/installable/<string:oauth_id>', methods=['DELETE'])
def on_uninstall(oauth_id):
uninstall_client(oauth_id, db_name, send_events)
return '', 204
addon.descriptor['capabilities']['installable']['callbackUrl'] = "{base}/addon/installable".format(
base=addon.app.config['BASE_URL']
)
def uninstall_client(oauth_id, db_name='clients', send_events=True):
client = Tenant.load(oauth_id)
clients = mongo[db_name]
client_filter = {"id": oauth_id}
clients.remove(client_filter)
if send_events:
events.fire_event('uninstall', {"client": client}) | AC-Flask-HipChat | /AC-Flask-HipChat-0.2.12.tar.gz/AC-Flask-HipChat-0.2.12/ac_flask/hipchat/installable.py | installable.py |
import json
from ac_flask.hipchat.auth import tenant
from ac_flask.hipchat.db import redis
import requests
def post(url, data, token):
return requests.post(url, headers={
"authorization": "Bearer %s" % token,
"content-type": "application/json"
}, data=json.dumps(data), timeout=10)
class RoomClient(object):
@staticmethod
def send_notification(message):
token = tenant.get_token(redis)
return post("%s/room/%s/notification" % (tenant.api_base_url, tenant.room_id), {"message": message}, token)
room_client = RoomClient()
class AddOnClient(object):
@staticmethod
def update_global_glance(glance_key, glance_data):
token = tenant.get_token(redis, scopes=['view_group'])
return post("%s/addon/ui" % tenant.api_base_url, {
"glance": [
{"content": glance_data, "key": glance_key}
]
}, token)
@staticmethod
def update_room_glance(glance_key, glance_data, room_id):
token = tenant.get_token(redis, scopes=['view_room'])
return post("%s/addon/ui/room/%s" % (tenant.api_base_url, room_id), {
"glance": [
{"content": glance_data, "key": glance_key}
]
}, token)
@staticmethod
def update_user_glance(glance_key, glance_data, user_id):
token = tenant.get_token(redis, scopes=['view_group'])
return post("%s/addon/ui/user/%s" % (tenant.api_base_url, user_id), {
"glance": [
{"content": glance_data, "key": glance_key}
]
}, token)
addon_client = AddOnClient() | AC-Flask-HipChat | /AC-Flask-HipChat-0.2.12.tar.gz/AC-Flask-HipChat-0.2.12/ac_flask/hipchat/clients.py | clients.py |
BASE_URL = "http://localhost:5000"
DEBUG = True
CORS_WHITELIST = ('.hipchat.com',) | AC-Flask-HipChat | /AC-Flask-HipChat-0.2.12.tar.gz/AC-Flask-HipChat-0.2.12/ac_flask/hipchat/default_settings.py | default_settings.py |
from ac_flask.hipchat.tenant import Tenant
from flask import _request_ctx_stack as stack, request
from flask import abort
import jwt
from werkzeug.local import LocalProxy
from functools import wraps
def require_tenant(func):
@wraps(func)
def inner(*args, **kwargs):
if not tenant:
abort(401)
return func(*args, **kwargs)
return inner
def _validate_jwt(req):
if 'signed_request' in req.form:
jwt_data = req.form['signed_request']
else:
jwt_data = req.args.get('signed_request', None)
if not jwt_data:
header = req.headers.get('authorization', '')
jwt_data = header[4:] if header.startswith('JWT ') else None
if not jwt_data:
abort(401)
try:
oauth_id = jwt.decode(jwt_data, verify=False)['iss']
client = Tenant.load(oauth_id)
data = jwt.decode(jwt_data, client.secret, leeway=10)
return client, data
except jwt.DecodeError:
abort(400)
except jwt.ExpiredSignature:
abort(401)
def _get_tenant():
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'tenant'):
body = request.json
cur_sender = cur_context = None
if request.args.get('signed_request', None) or 'authorization' in request.headers:
cur_tenant, data = _validate_jwt(request)
cur_sender = User(data['sub'])
cur_context = data.get('context', None)
elif body and 'oauth_client_id' in body:
tenant_id = body['oauth_client_id']
cur_tenant = Tenant.load(tenant_id)
else:
cur_tenant = None
if body and 'item' in body:
sent_by = _extract_sender(body['item'])
if sent_by:
user = User(user_id=sent_by['id'], name=sent_by['name'], mention_name=sent_by['mention_name'])
# Check if the sender in the webhook matches the one provided in the JWT
if cur_sender and str(cur_sender.id) != str(user.id):
abort(400)
cur_sender = user
ctx.tenant = cur_tenant
ctx.sender = cur_sender
ctx.context = cur_context
return ctx.tenant
def _extract_sender(item):
if 'sender' in item:
return item['sender']
if 'message' in item and 'from' in item['message']:
return item['message']['from']
return None
def _get_sender():
_get_tenant()
if hasattr(stack.top, 'sender'):
return stack.top.sender
else:
return None
def _get_context():
_get_tenant()
if hasattr(stack.top, 'context'):
return stack.top.context
else:
return None
tenant = LocalProxy(_get_tenant)
sender = LocalProxy(_get_sender)
context = LocalProxy(_get_context)
class User(object):
def __init__(self, user_id, name=None, mention_name=None):
super(User, self).__init__()
self.id = user_id
self.name = name
self.mention_name = mention_name
| AC-Flask-HipChat | /AC-Flask-HipChat-0.2.12.tar.gz/AC-Flask-HipChat-0.2.12/ac_flask/hipchat/auth.py | auth.py |
from flask import _app_ctx_stack as stack
import os
from pymongo import MongoClient
import redis as redispy
from werkzeug.local import LocalProxy
def _connect_mongo_db():
if os.environ.get('MONGOHQ_URL'):
c = MongoClient(os.environ['MONGOHQ_URL'])
db = c.get_default_database()
else:
c = MongoClient()
db = c.test
return db
def _connect_redis():
redis_url = os.getenv('REDISTOGO_URL', 'redis://localhost:6379')
return redispy.from_url(redis_url)
def _get_mongo_db():
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'mongo_db'):
ctx.mongo_db = _connect_mongo_db()
return ctx.mongo_db
def _get_redis():
ctx = stack.top
if ctx is not None:
if not hasattr(ctx, 'redis'):
ctx.redis = _connect_redis()
return ctx.redis
redis = LocalProxy(_get_redis)
mongo = LocalProxy(_get_mongo_db) | AC-Flask-HipChat | /AC-Flask-HipChat-0.2.12.tar.gz/AC-Flask-HipChat-0.2.12/ac_flask/hipchat/db.py | db.py |
import logging
_log = logging.getLogger(__name__)
class EventBus(object):
def __init__(self):
super(EventBus, self).__init__()
self.events = {}
def fire_event(self, name, obj):
listeners = self.events.get(name, [])
_log.debug("Firing event %s for %s listeners" % (name, len(listeners)))
for listener in listeners:
listener(obj)
def register_event(self, name, func):
_log.debug("Registering event: " + name)
self.events.setdefault(name, []).append(func)
def unregister_event(self, name, func):
del self.events.setdefault(name, [])[func]
def event_listener(self, func):
self.register_event(func.__name__, func)
return func
events = EventBus() | AC-Flask-HipChat | /AC-Flask-HipChat-0.2.12.tar.gz/AC-Flask-HipChat-0.2.12/ac_flask/hipchat/events.py | events.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.