diff --git a/.gitattributes b/.gitattributes index a6344aac8c09253b3b630fb776ae94478aa0275b..f6d259c7bef650b15357adf5b74c58852c5a18d0 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,28 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text *.zip filter=lfs diff=lfs merge=lfs -text *.zst filter=lfs diff=lfs merge=lfs -text *tfevents* filter=lfs diff=lfs merge=lfs -text +examples/pytorch/faces/face_landmarks.csv filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-cognito-identity.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-config.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-core.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-s3.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-36-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-python-sdk/examples/pytorch/faces/face_landmarks.csv filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/mc/petrel_pymc.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rados.cpython-36m-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rgw.cpython-36m-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-core.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-s3.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-36-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-36m-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-37m-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +petrel_client/cache/mc/petrel_pymc.so filter=lfs diff=lfs merge=lfs -text +petrel_client/ceph/librgw/rados.cpython-36m-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +petrel_client/ceph/librgw/rgw.cpython-36m-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-core.so filter=lfs diff=lfs merge=lfs -text +petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-s3.so filter=lfs diff=lfs merge=lfs -text +petrel_client/ceph/s3cpp/pys3client.cpython-36m-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +petrel_client/ceph/s3cpp/pys3client.cpython-37m-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +petrel_client/ceph/s3cpp/pys3client.cpython-38-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..9a20ab336226593de31f22d97c5b818e192b2862 --- /dev/null +++ b/.gitignore @@ -0,0 +1,8 @@ +petrel_client/version.py +.vscode +**/__pycache__ +**/*.pyc +**/*.egg-info +venv +dist +build diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..c2cf1d1f6b95ae824a751b72c32fe3a730855fd6 --- /dev/null +++ b/.gitlab-ci.yml @@ -0,0 +1,24 @@ +image: registry.sensetime.com/library/python:3.6 + + +before_script: + - python -V + - python -m venv venv + - source ./venv/bin/activate + - python setup.py sdist + - pip install dist/* + +stages: + - flake8 + - unit_test + +flake8: + stage: flake8 + script: + - pip install flake8 + - flake8 --ignore E501 --exclude '.git,.tox,*.egg-info,venv,scripts,tests,examples' . # todo remove scripts,tests,examples + +test: + stage: unit_test + script: + - python tests/run_test.py \ No newline at end of file diff --git a/INSTALL b/INSTALL new file mode 100644 index 0000000000000000000000000000000000000000..0b6fbdebe34f04bf7988f1b8c8b8cc3c139bea58 --- /dev/null +++ b/INSTALL @@ -0,0 +1,2 @@ +python setup.py install + diff --git a/README.md b/README.md new file mode 100644 index 0000000000000000000000000000000000000000..b0d58c35c020d1e87ec7a33b6e414a2e65e768fa --- /dev/null +++ b/README.md @@ -0,0 +1,304 @@ +Petrel OSS SDK 2.0 +=== + +注意:该版本SDK需要python3.6环境 + +若之前安装过旧版本,请先运行 + +```bash +$ pip uninstall pycephs3client +$ rm -rf ~/.local/lib/python3.6/site-packages/petrel_client +``` + +## 建议在安装之前先升级 pip + +```bash +source /mnt/lustre/share/platform/env/ # 请根据实际情况确定是否需要 source +python3 -m pip install --upgrade pip # 请根据实际情况确定是否需要 `sudo` 或添加 `--user` 参数 +``` + +## 训练集群环境上安装 + +```bash +$ source /mnt/lustre/share/platform/env/ +$ python setup.py sdist +$ pip install --user dist/* +``` + +## 通过修改 PYTHONPATH 安装 + +```bash +$ source /mnt/lustre/share/platform/env/ + +# 安装SDK依赖 +$ python setup.py egg_info +$ pip install -r *.egg-info/requires.txt + +# 将SDK编译到 ./build 目录 +$ python setup.py build + +# 修改 PYTHONPATH 环境变量 +$ export PYTHONPATH=/build/lib:$PYTHONPATH +``` + +## venv环境上安装 + +```bash +$ python3 -m venv your_venv_name # 若已创建venv环境则无需执行 +$ source your_venv_name/bin/active +$ python setup.py sdist +$ pip install dist/* +``` + +## 系统环境上安装 + +```bash +$ python3 setup.py sdist +$ python3 -m pip install dist/* # 请根据实际情况确定是否需要 `sudo` 或添加 `--user` 参数 +``` + +## 使用 + +SDK 提供 `get` 和 `put` 接口,使用方式为 + +```python +data = client.get(url) # 默认情况由配置文件决定是否使用 MC +data = client.get(url, no_cache=True) # 本次 get 直接从 ceph 读取 +data = client.get(url, update_cache=True) # 本次 get 直接从 ceph 读取,并将数据缓存至 MC +``` + +```python +client.put(url, data) # 默认 put 不会更新 MC +client.put(url, data, update_cache=True) # 本次 put 将数据存入 ceph 之后并更新 MC +``` + +``注意:``若配置文件中没有启用 `MC` ,则 `no_cache` 和 `update_cache` 参数将被忽略 + +以下为使用 SDK 读取图片、进行图片处理后并保存图片的简单例子 + +```python +import cv2 +import numpy as np +from os.path import splitext +from petrel_client.client import Client + +conf_path = '~/petreloss.conf' +client = Client(conf_path) # 若不指定 conf_path ,则从 '~/petreloss.conf' 读取配置文件 +img_url = 's3://bucket1/image.jpeg' +img_gray_url = 's3://bucket1/image_gray.jpeg' +img_ext = splitext(img_gray_url)[-1] + +# 图片读取 +img_bytes = client.get(img_url) +assert(img_bytes is not None) +img_mem_view = memoryview(img_bytes) +img_array = np.frombuffer(img_mem_view, np.uint8) +img = cv2.imdecode(img_array, cv2.IMREAD_COLOR) + +# 图片处理 +img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + +# 图片存储 +success, img_gray_array = cv2.imencode(img_ext, img_gray) +assert(success) +img_gray_bytes = img_gray_array.tostring() +client.put(img_gray_url, img_gray_bytes) +``` + +配置文件请参考 [petreloss.conf](./conf/petreloss.conf) + +``请注意:配置文件中 `key = value` 的 key 前面不能有空格,否则该行视为上一行配置项 value 的一部分`` + +使用样例请参考 [multi_cluster_test.py](./tests/multi_cluster_test.py) + +## `Tensor` 和 `Json` 数据保存与读取 +使用样例 [tensor_json_test.py](./tests/tensor_json_test.py) + +## 数据过大无法上传,则需要分片上传 +使用样例 [multipart_test.py](./tests/multipart_test.py) + +## 创建 Bucket +```python +client.create_bucket('s3://mybucket') +``` + +## 顺序的读取某个前缀的数据 +```python +cluster = 'cluster1' +files = client.get_file_iterator('cluster1:s3://lili1.test2/test3') +for p, k in files + key = '{0}:s3://{1}'.format(cluster, p) + data = client.get(key) +``` +## 使用 anonymous 账户访问数据 +若在配置文件中不设置 `access_key` 和 `secret_key`,将以 `anonymous` 账户访问数据。 + + +## McKeySizeExceed 错误 + +默认情况下,`MC` 所支持 `key` 的最大长度为250个字节。如果路径过长,将会出现 `McKeySizeExceed` 错误。 +此时需要用户定义 `key` 的转换规则来避免该错误。 + +``注意:``中文字符对应多个字节。 + +例如: + +```python +def trim_key(key): + if isinstance(key, str): + key = key.encode('utf-8') + else: + assert isinstance(key, bytes) + + return key[-249:] + +client = Client('~/petreloss.conf', mc_key_cb=trim_key) +``` + +此外,可使用内置函数 `md5`、`sha256` 等,例如: + +```python +client = Client('~/petreloss.conf', mc_key_cb='sha256') +``` + +或在配置文件中指定: + +```conf +[mc] +mc_key_cb = sha512 +``` + +``请注意`` + +- 用户需要保证转换规则结果的唯一性,内置转换函数也有可能发生哈希碰撞。 +- 如果 `key` 为 `str` 类型且其中出现中文字符,请务必用 `encode('utf-8')` 对其进行编码。 + + +## 使用伪客户端 + +在对应客户端添加如下配置: + +```conf +fake = True +``` + +配置文件请参考 [fake_client.conf](./conf/fake_client.conf) + +使用样例请参考 [fake_client_test.py](./tests/fake_client_test.py) + +## 强制更新MC + +使用 `get_and_update` 接口或在 `get` 中传入 `update_cache=True` 参数将直接从存储系统读取数据并更新MC。 + +## IO 统计信息 + +IO 统计信息可通过以下三种方式修改其`log`输出频度: +- 由环境变量 `count_disp` 设置 +- 由配置文件 `count_disp` 设置 (若已设置环境变量,则该方式无效) +- 调用 `client.set_count_disp(count_disp)` (该方式将覆盖上述两种方式),但限于`parrots`和`pytorch`的运行机制,在某些使用场景下可能无法有效修改。 + +若 `count_disp` 为 `0` ,则将关闭 IO 统计信息打印。 + +若需要在 `console` 中打印 IO 统计信息,则需要设置 `console_log_level` 为 `INFO` 或更低级别,且 `count_disp` 需大于 `0`。 + + +## DataLoader + +`SDK` 提供的 `DataLoader` 额外支持如下参数: + +- `prefetch_factor`,默认2。每个 `worker` 预读 `batch` 数目。 +- `persistent_workers`,默认 `False`。如果为 `True`,则每轮 `epoch` 迭代完毕后 `worker` 进程将不会关闭,下轮 `epoch` 将复用该 `worker` 进程。 + +用例: + +```python +from petrel_client.utils.data import DataLoader +dataloader = DataLoader(dataset=xxx, ..., prefetch_factor=4, persistent_workers=True) +``` + +## SSL 验证 + +使用 `https` 协议时默认不会对 `SSL` 进行验证。若需要开启验证,请在配置文件中进行如下设置 +```conf +verify_ssl = True +``` + +## Presigned URL,生成签名链接 + +```python +presigned_url = client.generate_presigned_url(url, client_method ='get_object', expires_in=3600) +``` + +`client_method` 取值为 `get_object` (默认值) 或 `put_object` + +`expires_in` 单位为秒,默认值为 3600 + +## Presigned POST,生成签名 POST + +```python +presigned_post = client.generate_presigned_post(url, fields=None, conditions=None, expires_in=3600) +``` + +参数及返回值详见 [generate_presigned_post](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.generate_presigned_post),其中参数 bucket 和 key 从 url 中提取。 + +## 以流的形式读取数据 +```python +stream = client.get(url, enable_stream=True) +``` +返回的 `stream` 为 `StreamingBody`,使用方法详见 +https://botocore.amazonaws.com/v1/documentation/api/latest/reference/response.html + +## 判断对象是否存在 +```python +exists = client.contains(url) +``` + +## 删除对象 +```python +client.delete(url) +``` + +## 列出当前路径包含的对象或目录 +```python +contents = client.list(url) +for content in contents: + if content.endswith('/'): + print('directory:', content) + else: + print('object:', content) +``` + +## 判断目录是否存在 +```python +client.isdir(url) +``` + +注意:`Ceph`中没有目录的概念,本函数返回`True`时代表存在以该`url`作为前缀的对象,其他情况返回`False`。 + + +## 使用 `/mnt/cache` 目录下的 `Python` 环境 +相对于 `/mnt/lustre` 目录,在 `/mnt/cache` 目录执行 `Python` 有一定的性能提升。 +使用方式如下: +- `source` `/mnt/cache` 目录下的 `Python` 环境 +```bash +### 例如 pt1.3v1 +source /mnt/cache/share/platform/env/pt1.3v1 +### 或 s0.3.3 +source /mnt/cache/share/spring/s0.3.3 +``` + +- 检查 `Python` 路径是否正确 +```bash +which python +### 结果应为 /mnt/cache/... +``` + +- 设定 `PYTHONUSERBASE` 环境变量 +```bash +export PYTHONUSERBASE=/mnt/cache//.local +``` + +- 重新安装相关依赖库(仅需首次使用时执行) +``` +python -m pip install --user +``` \ No newline at end of file diff --git a/conf/ceph.conf b/conf/ceph.conf new file mode 100644 index 0000000000000000000000000000000000000000..ddda66516e872430fc28869b432cd703c3c9acd3 --- /dev/null +++ b/conf/ceph.conf @@ -0,0 +1,24 @@ +[global] +fsid = 85e75c85-ab98-426d-81cf-9daa534887f9 +mon_initial_members = SZ-OFFICE3-172-30-1-75 +mon_host = 172.30.1.75 +auth_cluster_required = cephx +auth_service_required = cephx +auth_client_required = cephx + + +public_network=172.30.1.0/20 +cluster_network=172.30.1.0/20 + +mon_allow_pool_delete = True + +debug_mon = 20/20 +debug_client = 20/20 + +rgw_enable_gc_threads = False +rgw_enable_lc_threads = False +rgw_enable_quota_threads = False +rgw_run_sync_thread = False +rgw enable ops log = False +rgw enable usage log = False +admin socket = "" diff --git a/conf/fake_client.conf b/conf/fake_client.conf new file mode 100644 index 0000000000000000000000000000000000000000..601fec554853434294f4b47dceffdeb897d4ac19 --- /dev/null +++ b/conf/fake_client.conf @@ -0,0 +1,17 @@ +[DEFAULT] +default_cluster = cluster1 + +[dfs] +fake = True +enable_mc = True + +[mc] +fake = True + +[cluster1] +fake = True +enable_mc = True + +[cluster2] +fake = True +enable_mc = False \ No newline at end of file diff --git a/conf/keyring b/conf/keyring new file mode 100644 index 0000000000000000000000000000000000000000..442ea1ccc3f3b4435420c75498d6aae111a0ee47 --- /dev/null +++ b/conf/keyring @@ -0,0 +1,2 @@ +[client.rgw.train] + key = AQBiiqVeaPzSFRAAT1Vc+z8wPI5BkCroB6W/jQ== diff --git a/conf/petreloss.conf b/conf/petreloss.conf new file mode 100644 index 0000000000000000000000000000000000000000..e0f120d37aa2f98f0a1cc272dcbaf5c84c2ba7f3 --- /dev/null +++ b/conf/petreloss.conf @@ -0,0 +1,88 @@ +# 注释以 ’#‘ 或 ‘;’ 开头,单独占一行,不能和配置内容在同一行 +# `key = value` 的 key 前面不能有空格,否则该行视为上一行配置项 value 的一部分 + +[DEFAULT] + +# 启用 Memcached, 默认 False +# enable_mc = True + +# Memcached 相关配置,默认情况下无需设置 +# mc_server_list_path = /mnt/lustre/share/memcached_client/server_list.conf +# mc_client_config_path = /mnt/lustre/share/memcached_client/client.conf + +# console log 级别,默认 WARNING, 选项有 CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +# 若需要在 console 输出 IO 统计信息,需要设置级别为 INFO +# console_log_level = WARNING + +# file log 级别,默认 DEBUG, 选项有 CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +# file_log_level = DEBUG + +# log 文件路径,默认 无 ,即不输出 log 文件 +# 若已配置 log 文件路径,则训练运行之前需要确保 log_file_path 指定的目录已被创建 +# log_file_path = /mnt/lustre//petrel_log_dir + +# log 文件最大长度,默认 1GB +# file_log_max_bytes = 1073741824 + +# log 文件备份数目,默认 1 +# file_log_backup_count = 1 + +# 每隔 count_disp 次 get 操作后,日志记录 IO 统计信息。默认值 5000 +# 如果 IO 统计信息输出过于频繁,可将该数值增大 +# 如果需要关闭 IO 统计信,可将该数值设置为 0 +# count_disp = 5000 + +# 内存统计,默认关闭 +# enable_mem_trace = False + +# get 操作失败后,允许重试的次数,默认 10 +# get_retry_max = 10 + +# 默认 cluster,即当访问 Ceph 没有指定 cluster 时,从 default_cluster 获取数据 +default_cluster = cluster1 + +[mc] +# 若访问的路径过长(超过250个字节),mc 将出现 McKeySizeExceed 错误。 +# 配置 mc_key_cb 可将传给 mc 的路径进行转换,可选的参数有: +# blake2b, blake2s, md5, pbkdf2_hmac, sha1, sha224, +# sha256, sha384, sha3_224, sha3_256, sha3_384, +# sha3_512, sha512, shake_128, shake_256 + +# mc_key_cb = sha256 + + +# 是否输出 mc debug log,默认 True +# 注意最终是否输出到 console 和 file 分别还需要由 console_log_level 和 file_log_level 决定 +# debug_mc = True + + +[dfs] +enable_mc = True + +# 至少需要配置一个 cluster ,否则将出现 ConfigSectionNotFoundError +[cluster1] +# 对于每个集群的具体配置,如果没有指定,则以[DEFAULT]作为取值 +# 例如在此处设置 ‘enable_mc = False’ 将覆盖默认配置 +enable_mc = True + +# 启用 S3 boto,默认 True +# boto = c++ 将启用 c++ 版本实现的 S3 +boto = True + +# 若不设置 access_key 和 secret_key,将以 anonymous 账户访问数据 +access_key = lili1 +secret_key = lili1 + +host_base = http://127.0.0.1:7480 + +# 若 boto = False ,则需要增加以下配置 +# conf = conf/ceph.conf +# keyring = conf/keyring +# name = client.rgw.train +# cluster = ceph + +[cluster2] + +access_key = lili1 +secret_key = lili1 +host_base = http://127.0.0.1:7480 diff --git a/dev.sh b/dev.sh new file mode 100644 index 0000000000000000000000000000000000000000..8072b7fe4058aa50f473de1e09c60ab62dd2658d --- /dev/null +++ b/dev.sh @@ -0,0 +1,4 @@ +python3 -m venv venv +source venv/bin/activate +pip install -U autopep8 +pip install -e . \ No newline at end of file diff --git a/examples/pytorch/MyDataset.py b/examples/pytorch/MyDataset.py new file mode 100644 index 0000000000000000000000000000000000000000..18034f75a3fc28d309f3bc5fbe1337d3eb222289 --- /dev/null +++ b/examples/pytorch/MyDataset.py @@ -0,0 +1,50 @@ +from __future__ import print_function, division +import os +import torch +import pandas as pd +from skimage import io, transform +import matplotlib.pyplot as plt +from torch.utils.data import Dataset, DataLoader +from torchvision import transforms, utils + +import cv2 +import numpy as np +import ceph + +class MyDataset(Dataset): + def __init__(self, csv_file, root_dir): + """ + Args: + csv_file (string): Path to the csv file with annotations. + root_dir (string): Bucket with all the images, such as s3://faces/ + """ + self.landmarks_frame = pd.read_csv(csv_file) + self.root_dir = root_dir + + def __len__(self): + return len(self.landmarks_frame) + + def __getitem__(self, idx): + img_name = os.path.join(self.root_dir, self.landmarks_frame.iloc[idx, 0]) + s3client = ceph.S3Client() + value = s3client.get(img_name) + if not value: + """ + Picture doesn't exist in ceph, your code here to handle error + """ + return None + img_array = np.fromstring(value, np.uint8) + # load image + #img = cvb.img_from_bytes(value) + + + string_data = img_array.tostring() + #print(string_data) + #print(value) + #image = cv2.imdecode(img_array, cv2.CV_LOAD_IMAGE_COLOR) + + landmarks = self.landmarks_frame.iloc[idx, 1:].as_matrix() + landmarks = landmarks.astype('float').reshape(-1, 2) + sample = {'image': img_array, 'landmarks': landmarks} + + return sample, string_data diff --git a/examples/pytorch/MyTest.py b/examples/pytorch/MyTest.py new file mode 100644 index 0000000000000000000000000000000000000000..e6912783bb0fa30734840e60d31f1cbd0bb56aee --- /dev/null +++ b/examples/pytorch/MyTest.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- + +from __future__ import print_function, division +import threading +import logging + +from MyDataset import MyDataset + +import ceph + +class testThread(threading.Thread): + def __init__(self, threadid): + threading.Thread.__init__(self) + self.threadid = threadid + + def run(self): + self.do_tasks() + + def do_tasks(self): + face_dataset = MyDataset(csv_file='faces/face_landmarks.csv',root_dir='s3://yijianliang.test/train-copy0/') + + for i in range(len(face_dataset)): + sample = face_dataset[i] + #print(i, sample['image'].shape, sample['landmarks'].shape) + logging.info('{0} {1} {2}'.format(i, sample['image'].shape, sample['landmarks'].shape)) + +''' +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO, + format='%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s', + datefmt='[%Y-%m_%d %H:%M:%S]', + filename='log/MyTest.log', + filemode='a') + threads = [] + for i in range(0,1): + threads.append(testThread(threadid=i)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() +''' + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO, + format='%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s', + datefmt='[%Y-%m_%d %H:%M:%S]') + #filename='log/MyTest.log', + #filemode='a') + + face_dataset = MyDataset(csv_file='faces/face_landmarks.csv',root_dir='s3://yijianliang.ssd.qos/train') + for i in range(len(face_dataset)): + sample, string_data = face_dataset[i] + #print(i, sample['image'].shape, sample['landmarks'].shape) + object_name = str(i) + if sample and string_data: + #s3client = ceph.S3Client() + s3client = ceph.S3Client(access_key = "DWD2LKXJHJLGYKRDED7T", secret_key = "tzJ2a0g26deZZux3bLOd29YV9zJlaLM400Fu5tdn") + ret = s3client.save_from_string('s3://sensestudytest/save_from_string/', object_name, string_data) + if ret: + logging.info('Save {0}: {1} bytes'.format(object_name, ret)) + logging.info('{0} {1} {2}'.format(i, sample['image'].shape, sample['landmarks'].shape)) diff --git a/examples/pytorch/faces/face_landmarks.csv b/examples/pytorch/faces/face_landmarks.csv new file mode 100644 index 0000000000000000000000000000000000000000..8c7b4bcc655622c48e1cf8b9e43438ff008767a9 --- /dev/null +++ b/examples/pytorch/faces/face_landmarks.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82bd06902826e8a79658a5946187a4f441b9d21f8edceec4dfa8723ffcfcbad0 +size 63189398 diff --git a/examples/pytorch/log/MyTest.log b/examples/pytorch/log/MyTest.log new file mode 100644 index 0000000000000000000000000000000000000000..13f0c29401dd7320b5914863d97438f2b2145929 --- /dev/null +++ b/examples/pytorch/log/MyTest.log @@ -0,0 +1,3 @@ +[2018-04_18 13:45:16] MyTest.py[line:22] INFO 0 (250, 250, 3) (3, 2) +[2018-04_18 13:45:16] MyTest.py[line:22] INFO 0 (250, 250, 3) (3, 2) +[2018-04_18 13:45:16] MyTest.py[line:22] INFO 0 (250, 250, 3) (3, 2) diff --git a/petrel-sdk-H.tar.gz b/petrel-sdk-H.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..ae0351eb476266b4ec3f0ab39df5db39290c943e --- /dev/null +++ b/petrel-sdk-H.tar.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e4bd0ab2f7ca22aa16c83496358025e136ee2eea3fde591d0574c5b3a6f82d41 +size 21200532 diff --git a/petrel-sdk/petrel-oss-cpp-sdk/.gitignore b/petrel-sdk/petrel-oss-cpp-sdk/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..103c6dfc1c1078bd7923fb9b1a3a84a8e3161b03 --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/.gitignore @@ -0,0 +1,5 @@ +build +venv +pys3client.cpp +pys3client.cpython-36m-x86_64-linux-gnu.so +pys3client.so diff --git a/petrel-sdk/petrel-oss-cpp-sdk/.gitmodules b/petrel-sdk/petrel-oss-cpp-sdk/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..e6b829b2cd5e07c32848508abb88b830742f5307 --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/.gitmodules @@ -0,0 +1,3 @@ +[submodule "aws-sdk-cpp"] + path = aws-sdk-cpp + url = http://gitlab.bj.sensetime.com/platform/StorageSystem/aws-sdk-cpp.git diff --git a/petrel-sdk/petrel-oss-cpp-sdk/README.md b/petrel-sdk/petrel-oss-cpp-sdk/README.md new file mode 100644 index 0000000000000000000000000000000000000000..079f172f6e6244e76a07552954aa1fe0a7f3fbb0 --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/README.md @@ -0,0 +1,6 @@ +# petrel-oss-cpp-sdk + +```bash +scl enable devtoolset-8 -- bash +python3 setup.py build_ext --inplace +``` \ No newline at end of file diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-common.so b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-common.so new file mode 100644 index 0000000000000000000000000000000000000000..1a9e3ccf47c2584f8e6ee1add3c417c4427145b6 Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-common.so differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-common.so.1.0.0 b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-common.so.1.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..1a9e3ccf47c2584f8e6ee1add3c417c4427145b6 Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-common.so.1.0.0 differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-event-stream.so b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-event-stream.so new file mode 100644 index 0000000000000000000000000000000000000000..20a53e1a70674ac835f5859c9c7925bc5fe8a477 Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-event-stream.so differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-event-stream.so.0unstable b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-event-stream.so.0unstable new file mode 100644 index 0000000000000000000000000000000000000000..20a53e1a70674ac835f5859c9c7925bc5fe8a477 Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-event-stream.so.0unstable differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-event-stream.so.1.0.0 b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-event-stream.so.1.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..20a53e1a70674ac835f5859c9c7925bc5fe8a477 Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-c-event-stream.so.1.0.0 differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-checksums.so b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-checksums.so new file mode 100644 index 0000000000000000000000000000000000000000..093a15984ab778d321c481c18dbfebb7b720dfea Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-checksums.so differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-checksums.so.0unstable b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-checksums.so.0unstable new file mode 100644 index 0000000000000000000000000000000000000000..093a15984ab778d321c481c18dbfebb7b720dfea Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-checksums.so.0unstable differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-checksums.so.1.0.0 b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-checksums.so.1.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..093a15984ab778d321c481c18dbfebb7b720dfea Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-checksums.so.1.0.0 differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-cognito-identity.so b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-cognito-identity.so new file mode 100644 index 0000000000000000000000000000000000000000..3985929f697841387191fb649a43bc01d324cf96 --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-cognito-identity.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce0525ae1cf99e3f99352f67cb1dbc5cc3f45e007474e71646e445aff283c167 +size 1203624 diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-config.so b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-config.so new file mode 100644 index 0000000000000000000000000000000000000000..8854a79310d7368f739eaa223f2d9f4a9a02d8de --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-config.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d91cc9c68c489ba0a16e63eddd8354fc9b8de53bb991d5f2e7a911fcc38f81a +size 4489616 diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-core.so b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-core.so new file mode 100644 index 0000000000000000000000000000000000000000..b87d446da34b9c9ba3e16f5e785eb46f19d7e101 --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-core.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7eed1dcb18c4481516fabbbe5d1fe11559af4a7efa196227f75da4f38e8428a1 +size 1465968 diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-identity-management.so b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-identity-management.so new file mode 100644 index 0000000000000000000000000000000000000000..21522f24aab977b7a06e697c7feb3e9ac357ea0c Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-identity-management.so differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-s3.so b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-s3.so new file mode 100644 index 0000000000000000000000000000000000000000..02978c8e352e0075594c40f8112e34752af10286 --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-s3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bde1e1ba070640c53b51d885ca4ffd12fcedb031ca3a6231507f499c385d39ef +size 4416312 diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-sts.so b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-sts.so new file mode 100644 index 0000000000000000000000000000000000000000..cf8ca2bafaa143592f8b4110891330096b01cb1b Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-sts.so differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-sts_duplicate_2023-06-05-17-36-57.so b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-sts_duplicate_2023-06-05-17-36-57.so new file mode 100644 index 0000000000000000000000000000000000000000..cf8ca2bafaa143592f8b4110891330096b01cb1b Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-sts_duplicate_2023-06-05-17-36-57.so differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-transfer.so b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-transfer.so new file mode 100644 index 0000000000000000000000000000000000000000..877c8a09a6762c174b9ed99f84b35a3d48f69183 Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-transfer.so differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-transfer_duplicate_2023-06-05-17-36-57.so b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-transfer_duplicate_2023-06-05-17-36-57.so new file mode 100644 index 0000000000000000000000000000000000000000..877c8a09a6762c174b9ed99f84b35a3d48f69183 Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/libs/libaws-cpp-sdk-transfer_duplicate_2023-06-05-17-36-57.so differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpp b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpp new file mode 100644 index 0000000000000000000000000000000000000000..71dabd2eaa8652fa768ec274a17aebe055907f5e --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpp @@ -0,0 +1,8335 @@ +/* Generated by Cython 0.29.34 */ + +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_29_34" +#define CYTHON_HEX_VERSION 0x001D22F0 +#define CYTHON_FUTURE_DIVISION 0 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_NOGIL 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_NOGIL 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif +#elif defined(PY_NOGIL) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_NOGIL 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #ifndef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 1 + #endif + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #define CYTHON_COMPILING_IN_NOGIL 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS (PY_VERSION_HEX < 0x030C00A5) + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #if PY_VERSION_HEX >= 0x030B00A4 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #elif !defined(CYTHON_FAST_THREAD_STATE) + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL (PY_VERSION_HEX < 0x030A0000) + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS ((PY_VERSION_HEX >= 0x030600B1) && (PY_VERSION_HEX < 0x030C00A5)) + #endif + #if PY_VERSION_HEX >= 0x030B00A4 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #elif !defined(CYTHON_USE_EXC_INFO_STACK) + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 1 + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #if PY_MAJOR_VERSION < 3 + #include "longintrepr.h" + #endif + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef __cplusplus + #error "Cython files generated with the C++ option must be compiled with a C++ compiler." +#endif +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #else + #define CYTHON_INLINE inline + #endif +#endif +template +void __Pyx_call_destructor(T& x) { + x.~T(); +} +template +class __Pyx_FakeReference { + public: + __Pyx_FakeReference() : ptr(NULL) { } + __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { } + T *operator->() { return ptr; } + T *operator&() { return ptr; } + operator T&() { return *ptr; } + template bool operator ==(U other) { return *ptr == other; } + template bool operator !=(U other) { return *ptr != other; } + private: + T *ptr; +}; + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" + #define __Pyx_DefaultClassType PyType_Type +#if PY_VERSION_HEX >= 0x030B00A1 + static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int k, int l, int s, int f, + PyObject *code, PyObject *c, PyObject* n, PyObject *v, + PyObject *fv, PyObject *cell, PyObject* fn, + PyObject *name, int fline, PyObject *lnos) { + PyObject *kwds=NULL, *argcount=NULL, *posonlyargcount=NULL, *kwonlyargcount=NULL; + PyObject *nlocals=NULL, *stacksize=NULL, *flags=NULL, *replace=NULL, *call_result=NULL, *empty=NULL; + const char *fn_cstr=NULL; + const char *name_cstr=NULL; + PyCodeObject* co=NULL; + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + if (!(kwds=PyDict_New())) goto end; + if (!(argcount=PyLong_FromLong(a))) goto end; + if (PyDict_SetItemString(kwds, "co_argcount", argcount) != 0) goto end; + if (!(posonlyargcount=PyLong_FromLong(0))) goto end; + if (PyDict_SetItemString(kwds, "co_posonlyargcount", posonlyargcount) != 0) goto end; + if (!(kwonlyargcount=PyLong_FromLong(k))) goto end; + if (PyDict_SetItemString(kwds, "co_kwonlyargcount", kwonlyargcount) != 0) goto end; + if (!(nlocals=PyLong_FromLong(l))) goto end; + if (PyDict_SetItemString(kwds, "co_nlocals", nlocals) != 0) goto end; + if (!(stacksize=PyLong_FromLong(s))) goto end; + if (PyDict_SetItemString(kwds, "co_stacksize", stacksize) != 0) goto end; + if (!(flags=PyLong_FromLong(f))) goto end; + if (PyDict_SetItemString(kwds, "co_flags", flags) != 0) goto end; + if (PyDict_SetItemString(kwds, "co_code", code) != 0) goto end; + if (PyDict_SetItemString(kwds, "co_consts", c) != 0) goto end; + if (PyDict_SetItemString(kwds, "co_names", n) != 0) goto end; + if (PyDict_SetItemString(kwds, "co_varnames", v) != 0) goto end; + if (PyDict_SetItemString(kwds, "co_freevars", fv) != 0) goto end; + if (PyDict_SetItemString(kwds, "co_cellvars", cell) != 0) goto end; + if (PyDict_SetItemString(kwds, "co_linetable", lnos) != 0) goto end; + if (!(fn_cstr=PyUnicode_AsUTF8AndSize(fn, NULL))) goto end; + if (!(name_cstr=PyUnicode_AsUTF8AndSize(name, NULL))) goto end; + if (!(co = PyCode_NewEmpty(fn_cstr, name_cstr, fline))) goto end; + if (!(replace = PyObject_GetAttrString((PyObject*)co, "replace"))) goto cleanup_code_too; + if (!(empty = PyTuple_New(0))) goto cleanup_code_too; // unfortunately __pyx_empty_tuple isn't available here + if (!(call_result = PyObject_Call(replace, empty, kwds))) goto cleanup_code_too; + Py_XDECREF((PyObject*)co); + co = (PyCodeObject*)call_result; + call_result = NULL; + if (0) { + cleanup_code_too: + Py_XDECREF((PyObject*)co); + co = NULL; + } + end: + Py_XDECREF(kwds); + Py_XDECREF(argcount); + Py_XDECREF(posonlyargcount); + Py_XDECREF(kwonlyargcount); + Py_XDECREF(nlocals); + Py_XDECREF(stacksize); + Py_XDECREF(replace); + Py_XDECREF(call_result); + Py_XDECREF(empty); + if (type) { + PyErr_Restore(type, value, traceback); + } + return co; + } +#else + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #if PY_VERSION_HEX >= 0x030C0000 + #define __Pyx_PyUnicode_READY(op) (0) + #else + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #endif + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #if PY_VERSION_HEX >= 0x030C0000 + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #else + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #endif + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? ((void)(klass), PyMethod_New(func, self)) : __Pyx_NewRef(func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS) + #if !defined(_USE_MATH_DEFINES) + #define _USE_MATH_DEFINES + #endif +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#define __PYX_MARK_ERR_POS(f_index, lineno) \ + { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__pys3client +#define __PYX_HAVE_API__pys3client +/* Early includes */ +#include +#include +#include "ios" +#include "new" +#include "stdexcept" +#include "typeinfo" +#include +#include +#include + + #if __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1600) + // move should be defined for these versions of MSVC, but __cplusplus isn't set usefully + #include + + namespace cython_std { + template typename std::remove_reference::type&& move(T& t) noexcept { return std::move(t); } + template typename std::remove_reference::type&& move(T&& t) noexcept { return std::move(t); } + } + + #endif + +#include "s3client.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + + +static const char *__pyx_f[] = { + "stringsource", + "pys3client.pyx", +}; + +/*--- Type declarations ---*/ +struct __pyx_obj_10pys3client_PyS3Client; + +/* "pys3client.pyx":25 + * self.error_message = error_message + * + * cdef class PyS3Client: # <<<<<<<<<<<<<< + * cdef S3Client *client + * + */ +struct __pyx_obj_10pys3client_PyS3Client { + PyObject_HEAD + S3Client *client; +}; + + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif +#if CYTHON_FAST_PYCALL + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" +#if PY_VERSION_HEX >= 0x030b00a6 + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif // CYTHON_FAST_PYCALL +#endif + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallNoArg.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); +#else +#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) +#endif + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* PyObjectCall2Args.proto */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* PyObjectSetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +#define __Pyx_PyObject_DelAttrStr(o,n) __Pyx_PyObject_SetAttrStr(o, n, NULL) +static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value); +#else +#define __Pyx_PyObject_DelAttrStr(o,n) PyObject_DelAttr(o,n) +#define __Pyx_PyObject_SetAttrStr(o,n,v) PyObject_SetAttr(o,n,v) +#endif + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) do {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} while(0) +#define __Pyx_GetModuleGlobalNameUncached(var, name) do {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} while(0) +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* ListAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { + Py_INCREF(x); + PyList_SET_ITEM(list, len, x); + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) +#endif + +/* ListCompAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len)) { + Py_INCREF(x); + PyList_SET_ITEM(list, len, x); + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) +#endif + +/* PyObject_GenericGetAttrNoDict.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr +#endif + +/* PyObject_GenericGetAttr.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr +#endif + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* PyObjectGetAttrStrNoError.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); + +/* SetupReduce.proto */ +static int __Pyx_setup_reduce(PyObject* type_obj); + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* IterFinish.proto */ +static CYTHON_INLINE int __Pyx_IterFinish(void); + +/* UnpackItemEndCheck.proto */ +static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); + +/* CalculateMetaclass.proto */ +static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases); + +/* FetchCommonType.proto */ +static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); + +/* CythonFunctionShared.proto */ +#define __Pyx_CyFunction_USED 1 +#define __Pyx_CYFUNCTION_STATICMETHOD 0x01 +#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 +#define __Pyx_CYFUNCTION_CCLASS 0x04 +#define __Pyx_CyFunction_GetClosure(f)\ + (((__pyx_CyFunctionObject *) (f))->func_closure) +#define __Pyx_CyFunction_GetClassObj(f)\ + (((__pyx_CyFunctionObject *) (f))->func_classobj) +#define __Pyx_CyFunction_Defaults(type, f)\ + ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) +#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ + ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) +typedef struct { + PyCFunctionObject func; +#if PY_VERSION_HEX < 0x030500A0 + PyObject *func_weakreflist; +#endif + PyObject *func_dict; + PyObject *func_name; + PyObject *func_qualname; + PyObject *func_doc; + PyObject *func_globals; + PyObject *func_code; + PyObject *func_closure; + PyObject *func_classobj; + void *defaults; + int defaults_pyobjects; + size_t defaults_size; // used by FusedFunction for copying defaults + int flags; + PyObject *defaults_tuple; + PyObject *defaults_kwdict; + PyObject *(*defaults_getter)(PyObject *); + PyObject *func_annotations; +} __pyx_CyFunctionObject; +static PyTypeObject *__pyx_CyFunctionType = 0; +#define __Pyx_CyFunction_Check(obj) (__Pyx_TypeCheck(obj, __pyx_CyFunctionType)) +static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, + int flags, PyObject* qualname, + PyObject *self, + PyObject *module, PyObject *globals, + PyObject* code); +static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, + size_t size, + int pyobjects); +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, + PyObject *tuple); +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, + PyObject *dict); +static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, + PyObject *dict); +static int __pyx_CyFunction_init(void); + +/* CythonFunction.proto */ +static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, + int flags, PyObject* qualname, + PyObject *closure, + PyObject *module, PyObject *globals, + PyObject* code); + +/* SetNameInClass.proto */ +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 +#define __Pyx_SetNameInClass(ns, name, value)\ + (likely(PyDict_CheckExact(ns)) ? _PyDict_SetItem_KnownHash(ns, name, value, ((PyASCIIObject *) name)->hash) : PyObject_SetItem(ns, name, value)) +#elif CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_SetNameInClass(ns, name, value)\ + (likely(PyDict_CheckExact(ns)) ? PyDict_SetItem(ns, name, value) : PyObject_SetItem(ns, name, value)) +#else +#define __Pyx_SetNameInClass(ns, name, value) PyObject_SetItem(ns, name, value) +#endif + +/* Py3ClassCreate.proto */ +static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, PyObject *qualname, + PyObject *mkw, PyObject *modname, PyObject *doc); +static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, PyObject *dict, + PyObject *mkw, int calculate_metaclass, int allow_py2_metaclass); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* GCCDiagnostics.proto */ +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#define __Pyx_HAS_GCC_DIAGNOSTIC +#endif + +/* CppExceptionConversion.proto */ +#ifndef __Pyx_CppExn2PyErr +#include +#include +#include +#include +static void __Pyx_CppExn2PyErr() { + try { + if (PyErr_Occurred()) + ; // let the latest Python exn pass through and ignore the current one + else + throw; + } catch (const std::bad_alloc& exn) { + PyErr_SetString(PyExc_MemoryError, exn.what()); + } catch (const std::bad_cast& exn) { + PyErr_SetString(PyExc_TypeError, exn.what()); + } catch (const std::bad_typeid& exn) { + PyErr_SetString(PyExc_TypeError, exn.what()); + } catch (const std::domain_error& exn) { + PyErr_SetString(PyExc_ValueError, exn.what()); + } catch (const std::invalid_argument& exn) { + PyErr_SetString(PyExc_ValueError, exn.what()); + } catch (const std::ios_base::failure& exn) { + PyErr_SetString(PyExc_IOError, exn.what()); + } catch (const std::out_of_range& exn) { + PyErr_SetString(PyExc_IndexError, exn.what()); + } catch (const std::overflow_error& exn) { + PyErr_SetString(PyExc_OverflowError, exn.what()); + } catch (const std::range_error& exn) { + PyErr_SetString(PyExc_ArithmeticError, exn.what()); + } catch (const std::underflow_error& exn) { + PyErr_SetString(PyExc_ArithmeticError, exn.what()); + } catch (const std::exception& exn) { + PyErr_SetString(PyExc_RuntimeError, exn.what()); + } + catch (...) + { + PyErr_SetString(PyExc_RuntimeError, "Unknown exception"); + } +} +#endif + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libcpp.string' */ + +/* Module declarations from 'libcpp.vector' */ + +/* Module declarations from 'libcpp.list' */ + +/* Module declarations from 'libcpp.utility' */ + +/* Module declarations from 'libcpp.pair' */ + +/* Module declarations from 's3client' */ + +/* Module declarations from 'pys3client' */ +static PyTypeObject *__pyx_ptype_10pys3client_PyS3Client = 0; +static PyObject *__pyx_7genexpr__pyx_v_10pys3client_k; +static PyObject *__pyx_7genexpr__pyx_v_10pys3client_v; +static CYTHON_INLINE PyObject *__pyx_convert_PyObject_string_to_py_std__in_string(std::string const &); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_convert_PyUnicode_string_to_py_std__in_string(std::string const &); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_convert_PyStr_string_to_py_std__in_string(std::string const &); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_convert_PyBytes_string_to_py_std__in_string(std::string const &); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_convert_PyByteArray_string_to_py_std__in_string(std::string const &); /*proto*/ +static PyObject *__pyx_convert_pair_to_py_int____std_3a__3a_string(std::pair const &); /*proto*/ +static PyObject *__pyx_convert_list_to_py_std_3a__3a_pair_3c_int_2c_std_3a__3a_string_3e___(std::list > const &); /*proto*/ +static std::string __pyx_convert_string_from_py_std__in_string(PyObject *); /*proto*/ +static PyObject *__pyx_convert_vector_to_py_std_3a__3a_string(const std::vector &); /*proto*/ +#define __Pyx_MODULE_NAME "pys3client" +extern int __pyx_module_is_main_pys3client; +int __pyx_module_is_main_pys3client = 0; + +/* Implementation of 'pys3client' */ +static PyObject *__pyx_builtin_TypeError; +static PyObject *__pyx_builtin_range; +static const char __pyx_k_ak[] = "ak"; +static const char __pyx_k_sk[] = "sk"; +static const char __pyx_k_doc[] = "__doc__"; +static const char __pyx_k_get[] = "get"; +static const char __pyx_k_key[] = "key"; +static const char __pyx_k_data[] = "data"; +static const char __pyx_k_init[] = "__init__"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_name[] = "__name__"; +static const char __pyx_k_self[] = "self"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_lower[] = "lower"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_utf_8[] = "utf-8"; +static const char __pyx_k_bucket[] = "bucket"; +static const char __pyx_k_decode[] = "decode"; +static const char __pyx_k_encode[] = "encode"; +static const char __pyx_k_module[] = "__module__"; +static const char __pyx_k_reduce[] = "__reduce__"; +static const char __pyx_k_S3Error[] = "S3Error"; +static const char __pyx_k_prepare[] = "__prepare__"; +static const char __pyx_k_endpoint[] = "endpoint"; +static const char __pyx_k_filename[] = "filename"; +static const char __pyx_k_getstate[] = "__getstate__"; +static const char __pyx_k_init_api[] = "init_api"; +static const char __pyx_k_qualname[] = "__qualname__"; +static const char __pyx_k_setstate[] = "__setstate__"; +static const char __pyx_k_ERROR_MAP[] = "ERROR_MAP"; +static const char __pyx_k_TypeError[] = "TypeError"; +static const char __pyx_k_Undefined[] = "Undefined"; +static const char __pyx_k_log_level[] = "log_level"; +static const char __pyx_k_metaclass[] = "__metaclass__"; +static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; +static const char __pyx_k_ERROR_LIST[] = "ERROR_LIST"; +static const char __pyx_k_PyS3Client[] = "PyS3Client"; +static const char __pyx_k_error_name[] = "error_name"; +static const char __pyx_k_pys3client[] = "pys3client"; +static const char __pyx_k_verify_ssl[] = "verify_ssl"; +static const char __pyx_k_threads_num[] = "threads_num"; +static const char __pyx_k_enable_https[] = "enable_https"; +static const char __pyx_k_shutdown_api[] = "shutdown_api"; +static const char __pyx_k_error_message[] = "error_message"; +static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; +static const char __pyx_k_S3Error___init[] = "S3Error.__init__"; +static const char __pyx_k_pys3client_pyx[] = "pys3client.pyx"; +static const char __pyx_k_use_dual_stack[] = "use_dual_stack"; +static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; +static PyObject *__pyx_n_s_ERROR_LIST; +static PyObject *__pyx_n_s_ERROR_MAP; +static PyObject *__pyx_n_s_PyS3Client; +static PyObject *__pyx_n_s_S3Error; +static PyObject *__pyx_n_s_S3Error___init; +static PyObject *__pyx_n_s_TypeError; +static PyObject *__pyx_n_s_Undefined; +static PyObject *__pyx_n_s_ak; +static PyObject *__pyx_n_s_bucket; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_data; +static PyObject *__pyx_n_s_decode; +static PyObject *__pyx_n_s_doc; +static PyObject *__pyx_n_s_enable_https; +static PyObject *__pyx_n_s_encode; +static PyObject *__pyx_n_s_endpoint; +static PyObject *__pyx_n_s_error_message; +static PyObject *__pyx_n_s_error_name; +static PyObject *__pyx_n_s_filename; +static PyObject *__pyx_n_s_get; +static PyObject *__pyx_n_s_getstate; +static PyObject *__pyx_n_s_init; +static PyObject *__pyx_n_s_init_api; +static PyObject *__pyx_n_s_key; +static PyObject *__pyx_n_s_log_level; +static PyObject *__pyx_n_s_lower; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_metaclass; +static PyObject *__pyx_n_s_module; +static PyObject *__pyx_n_s_name; +static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; +static PyObject *__pyx_n_s_prepare; +static PyObject *__pyx_n_s_pys3client; +static PyObject *__pyx_kp_s_pys3client_pyx; +static PyObject *__pyx_n_s_qualname; +static PyObject *__pyx_n_s_range; +static PyObject *__pyx_n_s_reduce; +static PyObject *__pyx_n_s_reduce_cython; +static PyObject *__pyx_n_s_reduce_ex; +static PyObject *__pyx_n_s_self; +static PyObject *__pyx_n_s_setstate; +static PyObject *__pyx_n_s_setstate_cython; +static PyObject *__pyx_n_s_shutdown_api; +static PyObject *__pyx_n_s_sk; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_threads_num; +static PyObject *__pyx_n_s_use_dual_stack; +static PyObject *__pyx_kp_s_utf_8; +static PyObject *__pyx_n_s_verify_ssl; +static PyObject *__pyx_pf_10pys3client_init_api(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_log_level); /* proto */ +static PyObject *__pyx_pf_10pys3client_2shutdown_api(CYTHON_UNUSED PyObject *__pyx_self); /* proto */ +static PyObject *__pyx_pf_10pys3client_7S3Error___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_error_name, PyObject *__pyx_v_error_message); /* proto */ +static int __pyx_pf_10pys3client_10PyS3Client___cinit__(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_ak, std::string __pyx_v_sk, std::string __pyx_v_endpoint, int __pyx_v_verify_ssl, int __pyx_v_enable_https, int __pyx_v_use_dual_stack, int __pyx_v_threads_num); /* proto */ +static void __pyx_pf_10pys3client_10PyS3Client_2__dealloc__(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_10pys3client_10PyS3Client_4get_object(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key, std::string __pyx_v_range); /* proto */ +static PyObject *__pyx_pf_10pys3client_10PyS3Client_6multipart_download_concurrency(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key, std::string __pyx_v_filename); /* proto */ +static PyObject *__pyx_pf_10pys3client_10PyS3Client_8put_object(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key, std::string __pyx_v_data); /* proto */ +static PyObject *__pyx_pf_10pys3client_10PyS3Client_10multipart_upload_concurrency(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key, std::string __pyx_v_filename); /* proto */ +static PyObject *__pyx_pf_10pys3client_10PyS3Client_12delete(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key); /* proto */ +static PyObject *__pyx_pf_10pys3client_10PyS3Client_14contains(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key); /* proto */ +static PyObject *__pyx_pf_10pys3client_10PyS3Client_16list(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key); /* proto */ +static PyObject *__pyx_pf_10pys3client_10PyS3Client_18__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_10pys3client_10PyS3Client_20__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_tp_new_10pys3client_PyS3Client(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__3; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_codeobj__4; +static PyObject *__pyx_codeobj__5; +static PyObject *__pyx_codeobj__7; +/* Late includes */ + +/* "pys3client.pyx":13 + * ERROR_MAP = {k:v.decode('utf-8') for k,v in ERROR_LIST} + * + * def init_api(log_level): # <<<<<<<<<<<<<< + * log_level = log_level.lower() + * _init_api(log_level.encode('utf-8')) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_10pys3client_1init_api(PyObject *__pyx_self, PyObject *__pyx_v_log_level); /*proto*/ +static PyMethodDef __pyx_mdef_10pys3client_1init_api = {"init_api", (PyCFunction)__pyx_pw_10pys3client_1init_api, METH_O, 0}; +static PyObject *__pyx_pw_10pys3client_1init_api(PyObject *__pyx_self, PyObject *__pyx_v_log_level) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("init_api (wrapper)", 0); + __pyx_r = __pyx_pf_10pys3client_init_api(__pyx_self, ((PyObject *)__pyx_v_log_level)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_10pys3client_init_api(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_log_level) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + std::string __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("init_api", 0); + __Pyx_INCREF(__pyx_v_log_level); + + /* "pys3client.pyx":14 + * + * def init_api(log_level): + * log_level = log_level.lower() # <<<<<<<<<<<<<< + * _init_api(log_level.encode('utf-8')) + * + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_log_level, __pyx_n_s_lower); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + } + } + __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_t_3) : __Pyx_PyObject_CallNoArg(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF_SET(__pyx_v_log_level, __pyx_t_1); + __pyx_t_1 = 0; + + /* "pys3client.pyx":15 + * def init_api(log_level): + * log_level = log_level.lower() + * _init_api(log_level.encode('utf-8')) # <<<<<<<<<<<<<< + * + * def shutdown_api(): + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_log_level, __pyx_n_s_encode); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_2))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_2); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_2, function); + } + } + __pyx_t_1 = (__pyx_t_3) ? __Pyx_PyObject_Call2Args(__pyx_t_2, __pyx_t_3, __pyx_kp_s_utf_8) : __Pyx_PyObject_CallOneArg(__pyx_t_2, __pyx_kp_s_utf_8); + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_4 = __pyx_convert_string_from_py_std__in_string(__pyx_t_1); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + init_api(__pyx_t_4); + + /* "pys3client.pyx":13 + * ERROR_MAP = {k:v.decode('utf-8') for k,v in ERROR_LIST} + * + * def init_api(log_level): # <<<<<<<<<<<<<< + * log_level = log_level.lower() + * _init_api(log_level.encode('utf-8')) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("pys3client.init_api", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_log_level); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "pys3client.pyx":17 + * _init_api(log_level.encode('utf-8')) + * + * def shutdown_api(): # <<<<<<<<<<<<<< + * _shutdown_api() + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_10pys3client_3shutdown_api(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyMethodDef __pyx_mdef_10pys3client_3shutdown_api = {"shutdown_api", (PyCFunction)__pyx_pw_10pys3client_3shutdown_api, METH_NOARGS, 0}; +static PyObject *__pyx_pw_10pys3client_3shutdown_api(PyObject *__pyx_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("shutdown_api (wrapper)", 0); + __pyx_r = __pyx_pf_10pys3client_2shutdown_api(__pyx_self); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_10pys3client_2shutdown_api(CYTHON_UNUSED PyObject *__pyx_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("shutdown_api", 0); + + /* "pys3client.pyx":18 + * + * def shutdown_api(): + * _shutdown_api() # <<<<<<<<<<<<<< + * + * class S3Error(Exception): + */ + shutdown_api(); + + /* "pys3client.pyx":17 + * _init_api(log_level.encode('utf-8')) + * + * def shutdown_api(): # <<<<<<<<<<<<<< + * _shutdown_api() + * + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "pys3client.pyx":21 + * + * class S3Error(Exception): + * def __init__(self, error_name, error_message): # <<<<<<<<<<<<<< + * self.error_name = error_name + * self.error_message = error_message + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_10pys3client_7S3Error_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyMethodDef __pyx_mdef_10pys3client_7S3Error_1__init__ = {"__init__", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_10pys3client_7S3Error_1__init__, METH_VARARGS|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_10pys3client_7S3Error_1__init__(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_self = 0; + PyObject *__pyx_v_error_name = 0; + PyObject *__pyx_v_error_message = 0; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_self,&__pyx_n_s_error_name,&__pyx_n_s_error_message,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_self)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_error_name)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, 1); __PYX_ERR(1, 21, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_error_message)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, 2); __PYX_ERR(1, 21, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 21, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_self = values[0]; + __pyx_v_error_name = values[1]; + __pyx_v_error_message = values[2]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__init__", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 21, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("pys3client.S3Error.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_10pys3client_7S3Error___init__(__pyx_self, __pyx_v_self, __pyx_v_error_name, __pyx_v_error_message); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_10pys3client_7S3Error___init__(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_self, PyObject *__pyx_v_error_name, PyObject *__pyx_v_error_message) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__init__", 0); + + /* "pys3client.pyx":22 + * class S3Error(Exception): + * def __init__(self, error_name, error_message): + * self.error_name = error_name # <<<<<<<<<<<<<< + * self.error_message = error_message + * + */ + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_error_name, __pyx_v_error_name) < 0) __PYX_ERR(1, 22, __pyx_L1_error) + + /* "pys3client.pyx":23 + * def __init__(self, error_name, error_message): + * self.error_name = error_name + * self.error_message = error_message # <<<<<<<<<<<<<< + * + * cdef class PyS3Client: + */ + if (__Pyx_PyObject_SetAttrStr(__pyx_v_self, __pyx_n_s_error_message, __pyx_v_error_message) < 0) __PYX_ERR(1, 23, __pyx_L1_error) + + /* "pys3client.pyx":21 + * + * class S3Error(Exception): + * def __init__(self, error_name, error_message): # <<<<<<<<<<<<<< + * self.error_name = error_name + * self.error_message = error_message + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("pys3client.S3Error.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "pys3client.pyx":28 + * cdef S3Client *client + * + * def __cinit__(self, string ak, string sk, string endpoint, bint verify_ssl, bint enable_https, bint use_dual_stack, int threads_num): # <<<<<<<<<<<<<< + * self.client = new S3Client(ak, sk, endpoint, verify_ssl, enable_https, use_dual_stack, threads_num) + * + */ + +/* Python wrapper */ +static int __pyx_pw_10pys3client_10PyS3Client_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_pw_10pys3client_10PyS3Client_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + std::string __pyx_v_ak; + std::string __pyx_v_sk; + std::string __pyx_v_endpoint; + int __pyx_v_verify_ssl; + int __pyx_v_enable_https; + int __pyx_v_use_dual_stack; + int __pyx_v_threads_num; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_ak,&__pyx_n_s_sk,&__pyx_n_s_endpoint,&__pyx_n_s_verify_ssl,&__pyx_n_s_enable_https,&__pyx_n_s_use_dual_stack,&__pyx_n_s_threads_num,0}; + PyObject* values[7] = {0,0,0,0,0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_ak)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_sk)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 1); __PYX_ERR(1, 28, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_endpoint)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 2); __PYX_ERR(1, 28, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_verify_ssl)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 3); __PYX_ERR(1, 28, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_enable_https)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 4); __PYX_ERR(1, 28, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_use_dual_stack)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 5); __PYX_ERR(1, 28, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 6: + if (likely((values[6] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_threads_num)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, 6); __PYX_ERR(1, 28, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__cinit__") < 0)) __PYX_ERR(1, 28, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 7) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + } + __pyx_v_ak = __pyx_convert_string_from_py_std__in_string(values[0]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 28, __pyx_L3_error) + __pyx_v_sk = __pyx_convert_string_from_py_std__in_string(values[1]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 28, __pyx_L3_error) + __pyx_v_endpoint = __pyx_convert_string_from_py_std__in_string(values[2]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 28, __pyx_L3_error) + __pyx_v_verify_ssl = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_verify_ssl == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 28, __pyx_L3_error) + __pyx_v_enable_https = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_enable_https == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 28, __pyx_L3_error) + __pyx_v_use_dual_stack = __Pyx_PyObject_IsTrue(values[5]); if (unlikely((__pyx_v_use_dual_stack == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 28, __pyx_L3_error) + __pyx_v_threads_num = __Pyx_PyInt_As_int(values[6]); if (unlikely((__pyx_v_threads_num == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 28, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 7, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 28, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("pys3client.PyS3Client.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_10pys3client_10PyS3Client___cinit__(((struct __pyx_obj_10pys3client_PyS3Client *)__pyx_v_self), __pyx_v_ak, __pyx_v_sk, __pyx_v_endpoint, __pyx_v_verify_ssl, __pyx_v_enable_https, __pyx_v_use_dual_stack, __pyx_v_threads_num); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_10pys3client_10PyS3Client___cinit__(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_ak, std::string __pyx_v_sk, std::string __pyx_v_endpoint, int __pyx_v_verify_ssl, int __pyx_v_enable_https, int __pyx_v_use_dual_stack, int __pyx_v_threads_num) { + int __pyx_r; + __Pyx_RefNannyDeclarations + S3Client *__pyx_t_1; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + + /* "pys3client.pyx":29 + * + * def __cinit__(self, string ak, string sk, string endpoint, bint verify_ssl, bint enable_https, bint use_dual_stack, int threads_num): + * self.client = new S3Client(ak, sk, endpoint, verify_ssl, enable_https, use_dual_stack, threads_num) # <<<<<<<<<<<<<< + * + * def __dealloc__(self): + */ + try { + __pyx_t_1 = new S3Client(__pyx_v_ak, __pyx_v_sk, __pyx_v_endpoint, __pyx_v_verify_ssl, __pyx_v_enable_https, __pyx_v_use_dual_stack, __pyx_v_threads_num); + } catch(...) { + __Pyx_CppExn2PyErr(); + __PYX_ERR(1, 29, __pyx_L1_error) + } + __pyx_v_self->client = __pyx_t_1; + + /* "pys3client.pyx":28 + * cdef S3Client *client + * + * def __cinit__(self, string ak, string sk, string endpoint, bint verify_ssl, bint enable_https, bint use_dual_stack, int threads_num): # <<<<<<<<<<<<<< + * self.client = new S3Client(ak, sk, endpoint, verify_ssl, enable_https, use_dual_stack, threads_num) + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("pys3client.PyS3Client.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "pys3client.pyx":31 + * self.client = new S3Client(ak, sk, endpoint, verify_ssl, enable_https, use_dual_stack, threads_num) + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * del self.client + * + */ + +/* Python wrapper */ +static void __pyx_pw_10pys3client_10PyS3Client_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_pw_10pys3client_10PyS3Client_3__dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_pf_10pys3client_10PyS3Client_2__dealloc__(((struct __pyx_obj_10pys3client_PyS3Client *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_pf_10pys3client_10PyS3Client_2__dealloc__(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "pys3client.pyx":32 + * + * def __dealloc__(self): + * del self.client # <<<<<<<<<<<<<< + * + * def get_object(self, string bucket, string key, string range): + */ + delete __pyx_v_self->client; + + /* "pys3client.pyx":31 + * self.client = new S3Client(ak, sk, endpoint, verify_ssl, enable_https, use_dual_stack, threads_num) + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * del self.client + * + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "pys3client.pyx":34 + * del self.client + * + * def get_object(self, string bucket, string key, string range): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_5get_object(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_5get_object(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + std::string __pyx_v_bucket; + std::string __pyx_v_key; + std::string __pyx_v_range; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("get_object (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_bucket,&__pyx_n_s_key,&__pyx_n_s_range,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bucket)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("get_object", 1, 3, 3, 1); __PYX_ERR(1, 34, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_range)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("get_object", 1, 3, 3, 2); __PYX_ERR(1, 34, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "get_object") < 0)) __PYX_ERR(1, 34, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_bucket = __pyx_convert_string_from_py_std__in_string(values[0]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 34, __pyx_L3_error) + __pyx_v_key = __pyx_convert_string_from_py_std__in_string(values[1]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 34, __pyx_L3_error) + __pyx_v_range = __pyx_convert_string_from_py_std__in_string(values[2]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 34, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("get_object", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 34, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("pys3client.PyS3Client.get_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_10pys3client_10PyS3Client_4get_object(((struct __pyx_obj_10pys3client_PyS3Client *)__pyx_v_self), __pyx_v_bucket, __pyx_v_key, __pyx_v_range); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_10pys3client_10PyS3Client_4get_object(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key, std::string __pyx_v_range) { + std::string __pyx_v_error_message; + std::string __pyx_v_result; + int __pyx_v_error_type; + int __pyx_v_ret; + PyObject *__pyx_v_error_name = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_object", 0); + + /* "pys3client.pyx":37 + * cdef string error_message, result + * cdef int error_type + * ret = self.client.get_object(bucket, key, error_type, error_message, result, range) # <<<<<<<<<<<<<< + * if ret == 0: + * return result + */ + __pyx_v_ret = __pyx_v_self->client->get_object(__pyx_v_bucket, __pyx_v_key, __pyx_v_error_type, __pyx_v_error_message, __pyx_v_result, __pyx_v_range); + + /* "pys3client.pyx":38 + * cdef int error_type + * ret = self.client.get_object(bucket, key, error_type, error_message, result, range) + * if ret == 0: # <<<<<<<<<<<<<< + * return result + * else: + */ + __pyx_t_1 = ((__pyx_v_ret == 0) != 0); + if (likely(__pyx_t_1)) { + + /* "pys3client.pyx":39 + * ret = self.client.get_object(bucket, key, error_type, error_message, result, range) + * if ret == 0: + * return result # <<<<<<<<<<<<<< + * else: + * error_name = ERROR_MAP.get(error_type, 'Undefined') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_convert_PyBytes_string_to_py_std__in_string(__pyx_v_result); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 39, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "pys3client.pyx":38 + * cdef int error_type + * ret = self.client.get_object(bucket, key, error_type, error_message, result, range) + * if ret == 0: # <<<<<<<<<<<<<< + * return result + * else: + */ + } + + /* "pys3client.pyx":41 + * return result + * else: + * error_name = ERROR_MAP.get(error_type, 'Undefined') # <<<<<<<<<<<<<< + * raise S3Error(error_name, error_message) + * + */ + /*else*/ { + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_ERROR_MAP); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 41, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_get); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 41, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_error_type); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 41, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = NULL; + __pyx_t_6 = 0; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_6 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_3, __pyx_n_s_Undefined}; + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 41, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_3, __pyx_n_s_Undefined}; + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 41, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else + #endif + { + __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 41, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + if (__pyx_t_5) { + __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; + } + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_3); + __Pyx_INCREF(__pyx_n_s_Undefined); + __Pyx_GIVEREF(__pyx_n_s_Undefined); + PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_n_s_Undefined); + __pyx_t_3 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 41, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_v_error_name = __pyx_t_2; + __pyx_t_2 = 0; + + /* "pys3client.pyx":42 + * else: + * error_name = ERROR_MAP.get(error_type, 'Undefined') + * raise S3Error(error_name, error_message) # <<<<<<<<<<<<<< + * + * def multipart_download_concurrency(self, string bucket, string key, string filename): + */ + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_S3Error); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 42, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_7 = __pyx_convert_PyBytes_string_to_py_std__in_string(__pyx_v_error_message); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 42, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_3 = NULL; + __pyx_t_6 = 0; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_6 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_error_name, __pyx_t_7}; + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 42, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_error_name, __pyx_t_7}; + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 42, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } else + #endif + { + __pyx_t_5 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 42, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (__pyx_t_3) { + __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = NULL; + } + __Pyx_INCREF(__pyx_v_error_name); + __Pyx_GIVEREF(__pyx_v_error_name); + PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_6, __pyx_v_error_name); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_6, __pyx_t_7); + __pyx_t_7 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 42, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 42, __pyx_L1_error) + } + + /* "pys3client.pyx":34 + * del self.client + * + * def get_object(self, string bucket, string key, string range): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("pys3client.PyS3Client.get_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_error_name); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "pys3client.pyx":44 + * raise S3Error(error_name, error_message) + * + * def multipart_download_concurrency(self, string bucket, string key, string filename): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_7multipart_download_concurrency(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_7multipart_download_concurrency(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + std::string __pyx_v_bucket; + std::string __pyx_v_key; + std::string __pyx_v_filename; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("multipart_download_concurrency (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_bucket,&__pyx_n_s_key,&__pyx_n_s_filename,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bucket)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("multipart_download_concurrency", 1, 3, 3, 1); __PYX_ERR(1, 44, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_filename)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("multipart_download_concurrency", 1, 3, 3, 2); __PYX_ERR(1, 44, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multipart_download_concurrency") < 0)) __PYX_ERR(1, 44, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_bucket = __pyx_convert_string_from_py_std__in_string(values[0]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 44, __pyx_L3_error) + __pyx_v_key = __pyx_convert_string_from_py_std__in_string(values[1]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 44, __pyx_L3_error) + __pyx_v_filename = __pyx_convert_string_from_py_std__in_string(values[2]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 44, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("multipart_download_concurrency", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 44, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("pys3client.PyS3Client.multipart_download_concurrency", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_10pys3client_10PyS3Client_6multipart_download_concurrency(((struct __pyx_obj_10pys3client_PyS3Client *)__pyx_v_self), __pyx_v_bucket, __pyx_v_key, __pyx_v_filename); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_10pys3client_10PyS3Client_6multipart_download_concurrency(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key, std::string __pyx_v_filename) { + std::string __pyx_v_error_message; + int __pyx_v_error_type; + int __pyx_v_ret; + PyObject *__pyx_v_error_name = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("multipart_download_concurrency", 0); + + /* "pys3client.pyx":47 + * cdef string error_message, result + * cdef int error_type + * ret = self.client.multipart_download_concurrency(bucket, key, filename, error_type, error_message) # <<<<<<<<<<<<<< + * if ret == 0: + * return ret + */ + __pyx_v_ret = __pyx_v_self->client->multipart_download_concurrency(__pyx_v_bucket, __pyx_v_key, __pyx_v_filename, __pyx_v_error_type, __pyx_v_error_message); + + /* "pys3client.pyx":48 + * cdef int error_type + * ret = self.client.multipart_download_concurrency(bucket, key, filename, error_type, error_message) + * if ret == 0: # <<<<<<<<<<<<<< + * return ret + * else: + */ + __pyx_t_1 = ((__pyx_v_ret == 0) != 0); + if (likely(__pyx_t_1)) { + + /* "pys3client.pyx":49 + * ret = self.client.multipart_download_concurrency(bucket, key, filename, error_type, error_message) + * if ret == 0: + * return ret # <<<<<<<<<<<<<< + * else: + * error_name = ERROR_MAP.get(error_type, 'Undefined') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_ret); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 49, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "pys3client.pyx":48 + * cdef int error_type + * ret = self.client.multipart_download_concurrency(bucket, key, filename, error_type, error_message) + * if ret == 0: # <<<<<<<<<<<<<< + * return ret + * else: + */ + } + + /* "pys3client.pyx":51 + * return ret + * else: + * error_name = ERROR_MAP.get(error_type, 'Undefined') # <<<<<<<<<<<<<< + * raise S3Error(error_name, error_message) + * + */ + /*else*/ { + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_ERROR_MAP); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 51, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_get); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 51, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_error_type); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 51, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = NULL; + __pyx_t_6 = 0; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_6 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_3, __pyx_n_s_Undefined}; + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 51, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_3, __pyx_n_s_Undefined}; + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 51, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else + #endif + { + __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 51, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + if (__pyx_t_5) { + __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; + } + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_3); + __Pyx_INCREF(__pyx_n_s_Undefined); + __Pyx_GIVEREF(__pyx_n_s_Undefined); + PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_n_s_Undefined); + __pyx_t_3 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 51, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_v_error_name = __pyx_t_2; + __pyx_t_2 = 0; + + /* "pys3client.pyx":52 + * else: + * error_name = ERROR_MAP.get(error_type, 'Undefined') + * raise S3Error(error_name, error_message) # <<<<<<<<<<<<<< + * + * def put_object(self, string bucket, string key, string data): + */ + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_S3Error); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 52, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_7 = __pyx_convert_PyBytes_string_to_py_std__in_string(__pyx_v_error_message); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 52, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_3 = NULL; + __pyx_t_6 = 0; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_6 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_error_name, __pyx_t_7}; + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 52, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_error_name, __pyx_t_7}; + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 52, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } else + #endif + { + __pyx_t_5 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 52, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (__pyx_t_3) { + __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = NULL; + } + __Pyx_INCREF(__pyx_v_error_name); + __Pyx_GIVEREF(__pyx_v_error_name); + PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_6, __pyx_v_error_name); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_6, __pyx_t_7); + __pyx_t_7 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 52, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 52, __pyx_L1_error) + } + + /* "pys3client.pyx":44 + * raise S3Error(error_name, error_message) + * + * def multipart_download_concurrency(self, string bucket, string key, string filename): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("pys3client.PyS3Client.multipart_download_concurrency", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_error_name); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "pys3client.pyx":54 + * raise S3Error(error_name, error_message) + * + * def put_object(self, string bucket, string key, string data): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_9put_object(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_9put_object(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + std::string __pyx_v_bucket; + std::string __pyx_v_key; + std::string __pyx_v_data; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("put_object (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_bucket,&__pyx_n_s_key,&__pyx_n_s_data,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bucket)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("put_object", 1, 3, 3, 1); __PYX_ERR(1, 54, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("put_object", 1, 3, 3, 2); __PYX_ERR(1, 54, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "put_object") < 0)) __PYX_ERR(1, 54, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_bucket = __pyx_convert_string_from_py_std__in_string(values[0]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 54, __pyx_L3_error) + __pyx_v_key = __pyx_convert_string_from_py_std__in_string(values[1]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 54, __pyx_L3_error) + __pyx_v_data = __pyx_convert_string_from_py_std__in_string(values[2]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 54, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("put_object", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 54, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("pys3client.PyS3Client.put_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_10pys3client_10PyS3Client_8put_object(((struct __pyx_obj_10pys3client_PyS3Client *)__pyx_v_self), __pyx_v_bucket, __pyx_v_key, __pyx_v_data); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_10pys3client_10PyS3Client_8put_object(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key, std::string __pyx_v_data) { + std::string __pyx_v_error_message; + int __pyx_v_error_type; + int __pyx_v_ret; + PyObject *__pyx_v_error_name = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("put_object", 0); + + /* "pys3client.pyx":57 + * cdef string error_message, result + * cdef int error_type + * ret = self.client.put_object(bucket, key, data, error_type, error_message) # <<<<<<<<<<<<<< + * if ret == 0: + * return data.size() + */ + __pyx_v_ret = __pyx_v_self->client->put_object(__pyx_v_bucket, __pyx_v_key, __pyx_v_data, __pyx_v_error_type, __pyx_v_error_message); + + /* "pys3client.pyx":58 + * cdef int error_type + * ret = self.client.put_object(bucket, key, data, error_type, error_message) + * if ret == 0: # <<<<<<<<<<<<<< + * return data.size() + * else: + */ + __pyx_t_1 = ((__pyx_v_ret == 0) != 0); + if (likely(__pyx_t_1)) { + + /* "pys3client.pyx":59 + * ret = self.client.put_object(bucket, key, data, error_type, error_message) + * if ret == 0: + * return data.size() # <<<<<<<<<<<<<< + * else: + * error_name = ERROR_MAP.get(error_type, 'Undefined') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyInt_FromSize_t(__pyx_v_data.size()); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 59, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "pys3client.pyx":58 + * cdef int error_type + * ret = self.client.put_object(bucket, key, data, error_type, error_message) + * if ret == 0: # <<<<<<<<<<<<<< + * return data.size() + * else: + */ + } + + /* "pys3client.pyx":61 + * return data.size() + * else: + * error_name = ERROR_MAP.get(error_type, 'Undefined') # <<<<<<<<<<<<<< + * raise S3Error(error_name, error_message) + * + */ + /*else*/ { + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_ERROR_MAP); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_get); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_error_type); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = NULL; + __pyx_t_6 = 0; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_6 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_3, __pyx_n_s_Undefined}; + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 61, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_3, __pyx_n_s_Undefined}; + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 61, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else + #endif + { + __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + if (__pyx_t_5) { + __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; + } + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_3); + __Pyx_INCREF(__pyx_n_s_Undefined); + __Pyx_GIVEREF(__pyx_n_s_Undefined); + PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_n_s_Undefined); + __pyx_t_3 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_v_error_name = __pyx_t_2; + __pyx_t_2 = 0; + + /* "pys3client.pyx":62 + * else: + * error_name = ERROR_MAP.get(error_type, 'Undefined') + * raise S3Error(error_name, error_message) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_S3Error); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_7 = __pyx_convert_PyBytes_string_to_py_std__in_string(__pyx_v_error_message); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_3 = NULL; + __pyx_t_6 = 0; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_6 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_error_name, __pyx_t_7}; + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 62, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_error_name, __pyx_t_7}; + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 62, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } else + #endif + { + __pyx_t_5 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (__pyx_t_3) { + __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = NULL; + } + __Pyx_INCREF(__pyx_v_error_name); + __Pyx_GIVEREF(__pyx_v_error_name); + PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_6, __pyx_v_error_name); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_6, __pyx_t_7); + __pyx_t_7 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 62, __pyx_L1_error) + } + + /* "pys3client.pyx":54 + * raise S3Error(error_name, error_message) + * + * def put_object(self, string bucket, string key, string data): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("pys3client.PyS3Client.put_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_error_name); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "pys3client.pyx":65 + * + * + * def multipart_upload_concurrency(self, string bucket, string key, string filename): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_11multipart_upload_concurrency(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_11multipart_upload_concurrency(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + std::string __pyx_v_bucket; + std::string __pyx_v_key; + std::string __pyx_v_filename; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("multipart_upload_concurrency (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_bucket,&__pyx_n_s_key,&__pyx_n_s_filename,0}; + PyObject* values[3] = {0,0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bucket)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("multipart_upload_concurrency", 1, 3, 3, 1); __PYX_ERR(1, 65, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_filename)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("multipart_upload_concurrency", 1, 3, 3, 2); __PYX_ERR(1, 65, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "multipart_upload_concurrency") < 0)) __PYX_ERR(1, 65, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 3) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + } + __pyx_v_bucket = __pyx_convert_string_from_py_std__in_string(values[0]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 65, __pyx_L3_error) + __pyx_v_key = __pyx_convert_string_from_py_std__in_string(values[1]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 65, __pyx_L3_error) + __pyx_v_filename = __pyx_convert_string_from_py_std__in_string(values[2]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 65, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("multipart_upload_concurrency", 1, 3, 3, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 65, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("pys3client.PyS3Client.multipart_upload_concurrency", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_10pys3client_10PyS3Client_10multipart_upload_concurrency(((struct __pyx_obj_10pys3client_PyS3Client *)__pyx_v_self), __pyx_v_bucket, __pyx_v_key, __pyx_v_filename); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_10pys3client_10PyS3Client_10multipart_upload_concurrency(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key, std::string __pyx_v_filename) { + std::string __pyx_v_error_message; + int __pyx_v_error_type; + int __pyx_v_ret; + PyObject *__pyx_v_error_name = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("multipart_upload_concurrency", 0); + + /* "pys3client.pyx":68 + * cdef string error_message, result + * cdef int error_type + * ret = self.client.multipart_upload_concurrency(bucket, key, filename, error_type, error_message) # <<<<<<<<<<<<<< + * if ret == 0: + * return ret + */ + __pyx_v_ret = __pyx_v_self->client->multipart_upload_concurrency(__pyx_v_bucket, __pyx_v_key, __pyx_v_filename, __pyx_v_error_type, __pyx_v_error_message); + + /* "pys3client.pyx":69 + * cdef int error_type + * ret = self.client.multipart_upload_concurrency(bucket, key, filename, error_type, error_message) + * if ret == 0: # <<<<<<<<<<<<<< + * return ret + * else: + */ + __pyx_t_1 = ((__pyx_v_ret == 0) != 0); + if (likely(__pyx_t_1)) { + + /* "pys3client.pyx":70 + * ret = self.client.multipart_upload_concurrency(bucket, key, filename, error_type, error_message) + * if ret == 0: + * return ret # <<<<<<<<<<<<<< + * else: + * error_name = ERROR_MAP.get(error_type, 'Undefined') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyInt_From_int(__pyx_v_ret); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 70, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "pys3client.pyx":69 + * cdef int error_type + * ret = self.client.multipart_upload_concurrency(bucket, key, filename, error_type, error_message) + * if ret == 0: # <<<<<<<<<<<<<< + * return ret + * else: + */ + } + + /* "pys3client.pyx":72 + * return ret + * else: + * error_name = ERROR_MAP.get(error_type, 'Undefined') # <<<<<<<<<<<<<< + * raise S3Error(error_name, error_message) + * + */ + /*else*/ { + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_ERROR_MAP); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 72, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_get); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 72, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyInt_From_int(__pyx_v_error_type); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 72, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = NULL; + __pyx_t_6 = 0; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_5 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_5)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_5); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_6 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_3, __pyx_n_s_Undefined}; + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 72, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_5, __pyx_t_3, __pyx_n_s_Undefined}; + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 72, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } else + #endif + { + __pyx_t_7 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 72, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + if (__pyx_t_5) { + __Pyx_GIVEREF(__pyx_t_5); PyTuple_SET_ITEM(__pyx_t_7, 0, __pyx_t_5); __pyx_t_5 = NULL; + } + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_7, 0+__pyx_t_6, __pyx_t_3); + __Pyx_INCREF(__pyx_n_s_Undefined); + __Pyx_GIVEREF(__pyx_n_s_Undefined); + PyTuple_SET_ITEM(__pyx_t_7, 1+__pyx_t_6, __pyx_n_s_Undefined); + __pyx_t_3 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_7, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 72, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_v_error_name = __pyx_t_2; + __pyx_t_2 = 0; + + /* "pys3client.pyx":73 + * else: + * error_name = ERROR_MAP.get(error_type, 'Undefined') + * raise S3Error(error_name, error_message) # <<<<<<<<<<<<<< + * + * def delete(self, string bucket, string key): + */ + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_S3Error); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 73, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_7 = __pyx_convert_PyBytes_string_to_py_std__in_string(__pyx_v_error_message); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 73, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_3 = NULL; + __pyx_t_6 = 0; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_4))) { + __pyx_t_3 = PyMethod_GET_SELF(__pyx_t_4); + if (likely(__pyx_t_3)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_4); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_4, function); + __pyx_t_6 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_error_name, __pyx_t_7}; + __pyx_t_2 = __Pyx_PyFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 73, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_4)) { + PyObject *__pyx_temp[3] = {__pyx_t_3, __pyx_v_error_name, __pyx_t_7}; + __pyx_t_2 = __Pyx_PyCFunction_FastCall(__pyx_t_4, __pyx_temp+1-__pyx_t_6, 2+__pyx_t_6); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 73, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + } else + #endif + { + __pyx_t_5 = PyTuple_New(2+__pyx_t_6); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 73, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (__pyx_t_3) { + __Pyx_GIVEREF(__pyx_t_3); PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_t_3); __pyx_t_3 = NULL; + } + __Pyx_INCREF(__pyx_v_error_name); + __Pyx_GIVEREF(__pyx_v_error_name); + PyTuple_SET_ITEM(__pyx_t_5, 0+__pyx_t_6, __pyx_v_error_name); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_5, 1+__pyx_t_6, __pyx_t_7); + __pyx_t_7 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_5, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 73, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 73, __pyx_L1_error) + } + + /* "pys3client.pyx":65 + * + * + * def multipart_upload_concurrency(self, string bucket, string key, string filename): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("pys3client.PyS3Client.multipart_upload_concurrency", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_error_name); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "pys3client.pyx":75 + * raise S3Error(error_name, error_message) + * + * def delete(self, string bucket, string key): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_13delete(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_13delete(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + std::string __pyx_v_bucket; + std::string __pyx_v_key; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("delete (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_bucket,&__pyx_n_s_key,0}; + PyObject* values[2] = {0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bucket)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("delete", 1, 2, 2, 1); __PYX_ERR(1, 75, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "delete") < 0)) __PYX_ERR(1, 75, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + } + __pyx_v_bucket = __pyx_convert_string_from_py_std__in_string(values[0]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 75, __pyx_L3_error) + __pyx_v_key = __pyx_convert_string_from_py_std__in_string(values[1]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 75, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("delete", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 75, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("pys3client.PyS3Client.delete", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_10pys3client_10PyS3Client_12delete(((struct __pyx_obj_10pys3client_PyS3Client *)__pyx_v_self), __pyx_v_bucket, __pyx_v_key); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_10pys3client_10PyS3Client_12delete(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key) { + std::string __pyx_v_error_message; + int __pyx_v_error_type; + int __pyx_v_ret; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("delete", 0); + + /* "pys3client.pyx":78 + * cdef string error_message, result + * cdef int error_type + * ret = self.client.delete_obj(bucket, key, error_type, error_message) # <<<<<<<<<<<<<< + * return ret + * + */ + __pyx_v_ret = __pyx_v_self->client->delete_obj(__pyx_v_bucket, __pyx_v_key, __pyx_v_error_type, __pyx_v_error_message); + + /* "pys3client.pyx":79 + * cdef int error_type + * ret = self.client.delete_obj(bucket, key, error_type, error_message) + * return ret # <<<<<<<<<<<<<< + * + * def contains(self, string bucket, string key): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_ret); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 79, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "pys3client.pyx":75 + * raise S3Error(error_name, error_message) + * + * def delete(self, string bucket, string key): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("pys3client.PyS3Client.delete", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "pys3client.pyx":81 + * return ret + * + * def contains(self, string bucket, string key): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_15contains(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_15contains(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + std::string __pyx_v_bucket; + std::string __pyx_v_key; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("contains (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_bucket,&__pyx_n_s_key,0}; + PyObject* values[2] = {0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bucket)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("contains", 1, 2, 2, 1); __PYX_ERR(1, 81, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "contains") < 0)) __PYX_ERR(1, 81, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + } + __pyx_v_bucket = __pyx_convert_string_from_py_std__in_string(values[0]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 81, __pyx_L3_error) + __pyx_v_key = __pyx_convert_string_from_py_std__in_string(values[1]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 81, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("contains", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 81, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("pys3client.PyS3Client.contains", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_10pys3client_10PyS3Client_14contains(((struct __pyx_obj_10pys3client_PyS3Client *)__pyx_v_self), __pyx_v_bucket, __pyx_v_key); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_10pys3client_10PyS3Client_14contains(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key) { + std::string __pyx_v_error_message; + int __pyx_v_error_type; + int __pyx_v_ret; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("contains", 0); + + /* "pys3client.pyx":84 + * cdef string error_message, result + * cdef int error_type + * ret = self.client.contains(bucket, key, error_type, error_message) # <<<<<<<<<<<<<< + * return ret + * + */ + __pyx_v_ret = __pyx_v_self->client->contains(__pyx_v_bucket, __pyx_v_key, __pyx_v_error_type, __pyx_v_error_message); + + /* "pys3client.pyx":85 + * cdef int error_type + * ret = self.client.contains(bucket, key, error_type, error_message) + * return ret # <<<<<<<<<<<<<< + * + * def list(self, string bucket, string key): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_ret); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 85, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "pys3client.pyx":81 + * return ret + * + * def contains(self, string bucket, string key): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("pys3client.PyS3Client.contains", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "pys3client.pyx":87 + * return ret + * + * def list(self, string bucket, string key): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_17list(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_17list(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + std::string __pyx_v_bucket; + std::string __pyx_v_key; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("list (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_bucket,&__pyx_n_s_key,0}; + PyObject* values[2] = {0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_bucket)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_key)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("list", 1, 2, 2, 1); __PYX_ERR(1, 87, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "list") < 0)) __PYX_ERR(1, 87, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + } + __pyx_v_bucket = __pyx_convert_string_from_py_std__in_string(values[0]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 87, __pyx_L3_error) + __pyx_v_key = __pyx_convert_string_from_py_std__in_string(values[1]); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 87, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("list", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 87, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("pys3client.PyS3Client.list", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_10pys3client_10PyS3Client_16list(((struct __pyx_obj_10pys3client_PyS3Client *)__pyx_v_self), __pyx_v_bucket, __pyx_v_key); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_10pys3client_10PyS3Client_16list(struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, std::string __pyx_v_bucket, std::string __pyx_v_key) { + std::string __pyx_v_error_message; + int __pyx_v_error_type; + std::vector __pyx_v_ret; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("list", 0); + + /* "pys3client.pyx":90 + * cdef string error_message, result + * cdef int error_type + * ret = self.client.list(bucket, key, error_type, error_message) # <<<<<<<<<<<<<< + * return ret + */ + __pyx_v_ret = __pyx_v_self->client->list(__pyx_v_bucket, __pyx_v_key, __pyx_v_error_type, __pyx_v_error_message); + + /* "pys3client.pyx":91 + * cdef int error_type + * ret = self.client.list(bucket, key, error_type, error_message) + * return ret # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_convert_vector_to_py_std_3a__3a_string(__pyx_v_ret); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 91, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "pys3client.pyx":87 + * return ret + * + * def list(self, string bucket, string key): # <<<<<<<<<<<<<< + * cdef string error_message, result + * cdef int error_type + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("pys3client.PyS3Client.list", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_19__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_19__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_10pys3client_10PyS3Client_18__reduce_cython__(((struct __pyx_obj_10pys3client_PyS3Client *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_10pys3client_10PyS3Client_18__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(0, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("pys3client.PyS3Client.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_21__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static PyObject *__pyx_pw_10pys3client_10PyS3Client_21__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_10pys3client_10PyS3Client_20__setstate_cython__(((struct __pyx_obj_10pys3client_PyS3Client *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_10pys3client_10PyS3Client_20__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_10pys3client_PyS3Client *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(0, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("pys3client.PyS3Client.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "string.to_py":31 + * + * @cname("__pyx_convert_PyObject_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyObject_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyObject_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + +static CYTHON_INLINE PyObject *__pyx_convert_PyObject_string_to_py_std__in_string(std::string const &__pyx_v_s) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_PyObject_string_to_py_std__in_string", 0); + + /* "string.to_py":32 + * @cname("__pyx_convert_PyObject_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyObject_string_to_py_std__in_string(const string& s): + * return __Pyx_PyObject_FromStringAndSize(s.data(), s.size()) # <<<<<<<<<<<<<< + * cdef extern from *: + * cdef object __Pyx_PyUnicode_FromStringAndSize(const char*, size_t) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_FromStringAndSize(__pyx_v_s.data(), __pyx_v_s.size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 32, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "string.to_py":31 + * + * @cname("__pyx_convert_PyObject_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyObject_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyObject_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("string.to_py.__pyx_convert_PyObject_string_to_py_std__in_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "string.to_py":37 + * + * @cname("__pyx_convert_PyUnicode_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyUnicode_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyUnicode_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + +static CYTHON_INLINE PyObject *__pyx_convert_PyUnicode_string_to_py_std__in_string(std::string const &__pyx_v_s) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_PyUnicode_string_to_py_std__in_string", 0); + + /* "string.to_py":38 + * @cname("__pyx_convert_PyUnicode_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyUnicode_string_to_py_std__in_string(const string& s): + * return __Pyx_PyUnicode_FromStringAndSize(s.data(), s.size()) # <<<<<<<<<<<<<< + * cdef extern from *: + * cdef object __Pyx_PyStr_FromStringAndSize(const char*, size_t) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyUnicode_FromStringAndSize(__pyx_v_s.data(), __pyx_v_s.size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 38, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "string.to_py":37 + * + * @cname("__pyx_convert_PyUnicode_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyUnicode_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyUnicode_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("string.to_py.__pyx_convert_PyUnicode_string_to_py_std__in_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "string.to_py":43 + * + * @cname("__pyx_convert_PyStr_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyStr_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyStr_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + +static CYTHON_INLINE PyObject *__pyx_convert_PyStr_string_to_py_std__in_string(std::string const &__pyx_v_s) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_PyStr_string_to_py_std__in_string", 0); + + /* "string.to_py":44 + * @cname("__pyx_convert_PyStr_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyStr_string_to_py_std__in_string(const string& s): + * return __Pyx_PyStr_FromStringAndSize(s.data(), s.size()) # <<<<<<<<<<<<<< + * cdef extern from *: + * cdef object __Pyx_PyBytes_FromStringAndSize(const char*, size_t) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyStr_FromStringAndSize(__pyx_v_s.data(), __pyx_v_s.size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 44, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "string.to_py":43 + * + * @cname("__pyx_convert_PyStr_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyStr_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyStr_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("string.to_py.__pyx_convert_PyStr_string_to_py_std__in_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "string.to_py":49 + * + * @cname("__pyx_convert_PyBytes_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyBytes_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyBytes_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + +static CYTHON_INLINE PyObject *__pyx_convert_PyBytes_string_to_py_std__in_string(std::string const &__pyx_v_s) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_PyBytes_string_to_py_std__in_string", 0); + + /* "string.to_py":50 + * @cname("__pyx_convert_PyBytes_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyBytes_string_to_py_std__in_string(const string& s): + * return __Pyx_PyBytes_FromStringAndSize(s.data(), s.size()) # <<<<<<<<<<<<<< + * cdef extern from *: + * cdef object __Pyx_PyByteArray_FromStringAndSize(const char*, size_t) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_s.data(), __pyx_v_s.size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 50, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "string.to_py":49 + * + * @cname("__pyx_convert_PyBytes_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyBytes_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyBytes_FromStringAndSize(s.data(), s.size()) + * cdef extern from *: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("string.to_py.__pyx_convert_PyBytes_string_to_py_std__in_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "string.to_py":55 + * + * @cname("__pyx_convert_PyByteArray_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyByteArray_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyByteArray_FromStringAndSize(s.data(), s.size()) + * + */ + +static CYTHON_INLINE PyObject *__pyx_convert_PyByteArray_string_to_py_std__in_string(std::string const &__pyx_v_s) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_PyByteArray_string_to_py_std__in_string", 0); + + /* "string.to_py":56 + * @cname("__pyx_convert_PyByteArray_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyByteArray_string_to_py_std__in_string(const string& s): + * return __Pyx_PyByteArray_FromStringAndSize(s.data(), s.size()) # <<<<<<<<<<<<<< + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyByteArray_FromStringAndSize(__pyx_v_s.data(), __pyx_v_s.size()); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 56, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "string.to_py":55 + * + * @cname("__pyx_convert_PyByteArray_string_to_py_std__in_string") + * cdef inline object __pyx_convert_PyByteArray_string_to_py_std__in_string(const string& s): # <<<<<<<<<<<<<< + * return __Pyx_PyByteArray_FromStringAndSize(s.data(), s.size()) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("string.to_py.__pyx_convert_PyByteArray_string_to_py_std__in_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "pair.to_py":158 + * + * @cname("__pyx_convert_pair_to_py_int____std_3a__3a_string") + * cdef object __pyx_convert_pair_to_py_int____std_3a__3a_string(const pair[X,Y]& p): # <<<<<<<<<<<<<< + * return p.first, p.second + * + */ + +static PyObject *__pyx_convert_pair_to_py_int____std_3a__3a_string(std::pair const &__pyx_v_p) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_pair_to_py_int____std_3a__3a_string", 0); + + /* "pair.to_py":159 + * @cname("__pyx_convert_pair_to_py_int____std_3a__3a_string") + * cdef object __pyx_convert_pair_to_py_int____std_3a__3a_string(const pair[X,Y]& p): + * return p.first, p.second # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_p.first); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_convert_PyBytes_string_to_py_std__in_string(__pyx_v_p.second); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_1); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "pair.to_py":158 + * + * @cname("__pyx_convert_pair_to_py_int____std_3a__3a_string") + * cdef object __pyx_convert_pair_to_py_int____std_3a__3a_string(const pair[X,Y]& p): # <<<<<<<<<<<<<< + * return p.first, p.second + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("pair.to_py.__pyx_convert_pair_to_py_int____std_3a__3a_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "list.to_py":92 + * + * @cname("__pyx_convert_list_to_py_std_3a__3a_pair_3c_int_2c_std_3a__3a_string_3e___") + * cdef object __pyx_convert_list_to_py_std_3a__3a_pair_3c_int_2c_std_3a__3a_string_3e___(const cpp_list[X]& v): # <<<<<<<<<<<<<< + * o = [] + * cdef cpp_list[X].const_iterator iter = v.begin() + */ + +static PyObject *__pyx_convert_list_to_py_std_3a__3a_pair_3c_int_2c_std_3a__3a_string_3e___(std::list > const &__pyx_v_v) { + PyObject *__pyx_v_o = NULL; + std::list > ::const_iterator __pyx_v_iter; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_list_to_py_std_3a__3a_pair_3c_int_2c_std_3a__3a_string_3e___", 0); + + /* "list.to_py":93 + * @cname("__pyx_convert_list_to_py_std_3a__3a_pair_3c_int_2c_std_3a__3a_string_3e___") + * cdef object __pyx_convert_list_to_py_std_3a__3a_pair_3c_int_2c_std_3a__3a_string_3e___(const cpp_list[X]& v): + * o = [] # <<<<<<<<<<<<<< + * cdef cpp_list[X].const_iterator iter = v.begin() + * while iter != v.end(): + */ + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 93, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_o = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "list.to_py":94 + * cdef object __pyx_convert_list_to_py_std_3a__3a_pair_3c_int_2c_std_3a__3a_string_3e___(const cpp_list[X]& v): + * o = [] + * cdef cpp_list[X].const_iterator iter = v.begin() # <<<<<<<<<<<<<< + * while iter != v.end(): + * o.append(cython.operator.dereference(iter)) + */ + __pyx_v_iter = __pyx_v_v.begin(); + + /* "list.to_py":95 + * o = [] + * cdef cpp_list[X].const_iterator iter = v.begin() + * while iter != v.end(): # <<<<<<<<<<<<<< + * o.append(cython.operator.dereference(iter)) + * cython.operator.preincrement(iter) + */ + while (1) { + __pyx_t_2 = ((__pyx_v_iter != __pyx_v_v.end()) != 0); + if (!__pyx_t_2) break; + + /* "list.to_py":96 + * cdef cpp_list[X].const_iterator iter = v.begin() + * while iter != v.end(): + * o.append(cython.operator.dereference(iter)) # <<<<<<<<<<<<<< + * cython.operator.preincrement(iter) + * return o + */ + __pyx_t_1 = __pyx_convert_pair_to_py_int____std_3a__3a_string((*__pyx_v_iter)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 96, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyList_Append(__pyx_v_o, __pyx_t_1); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 96, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "list.to_py":97 + * while iter != v.end(): + * o.append(cython.operator.dereference(iter)) + * cython.operator.preincrement(iter) # <<<<<<<<<<<<<< + * return o + * + */ + (void)((++__pyx_v_iter)); + } + + /* "list.to_py":98 + * o.append(cython.operator.dereference(iter)) + * cython.operator.preincrement(iter) + * return o # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_o); + __pyx_r = __pyx_v_o; + goto __pyx_L0; + + /* "list.to_py":92 + * + * @cname("__pyx_convert_list_to_py_std_3a__3a_pair_3c_int_2c_std_3a__3a_string_3e___") + * cdef object __pyx_convert_list_to_py_std_3a__3a_pair_3c_int_2c_std_3a__3a_string_3e___(const cpp_list[X]& v): # <<<<<<<<<<<<<< + * o = [] + * cdef cpp_list[X].const_iterator iter = v.begin() + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("list.to_py.__pyx_convert_list_to_py_std_3a__3a_pair_3c_int_2c_std_3a__3a_string_3e___", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_o); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "string.from_py":13 + * + * @cname("__pyx_convert_string_from_py_std__in_string") + * cdef string __pyx_convert_string_from_py_std__in_string(object o) except *: # <<<<<<<<<<<<<< + * cdef Py_ssize_t length = 0 + * cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length) + */ + +static std::string __pyx_convert_string_from_py_std__in_string(PyObject *__pyx_v_o) { + Py_ssize_t __pyx_v_length; + char const *__pyx_v_data; + std::string __pyx_r; + __Pyx_RefNannyDeclarations + char const *__pyx_t_1; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_string_from_py_std__in_string", 0); + + /* "string.from_py":14 + * @cname("__pyx_convert_string_from_py_std__in_string") + * cdef string __pyx_convert_string_from_py_std__in_string(object o) except *: + * cdef Py_ssize_t length = 0 # <<<<<<<<<<<<<< + * cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length) + * return string(data, length) + */ + __pyx_v_length = 0; + + /* "string.from_py":15 + * cdef string __pyx_convert_string_from_py_std__in_string(object o) except *: + * cdef Py_ssize_t length = 0 + * cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length) # <<<<<<<<<<<<<< + * return string(data, length) + * + */ + __pyx_t_1 = __Pyx_PyObject_AsStringAndSize(__pyx_v_o, (&__pyx_v_length)); if (unlikely(__pyx_t_1 == ((char const *)NULL))) __PYX_ERR(0, 15, __pyx_L1_error) + __pyx_v_data = __pyx_t_1; + + /* "string.from_py":16 + * cdef Py_ssize_t length = 0 + * cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length) + * return string(data, length) # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = std::string(__pyx_v_data, __pyx_v_length); + goto __pyx_L0; + + /* "string.from_py":13 + * + * @cname("__pyx_convert_string_from_py_std__in_string") + * cdef string __pyx_convert_string_from_py_std__in_string(object o) except *: # <<<<<<<<<<<<<< + * cdef Py_ssize_t length = 0 + * cdef const char* data = __Pyx_PyObject_AsStringAndSize(o, &length) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("string.from_py.__pyx_convert_string_from_py_std__in_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_pretend_to_initialize(&__pyx_r); + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "vector.to_py":60 + * + * @cname("__pyx_convert_vector_to_py_std_3a__3a_string") + * cdef object __pyx_convert_vector_to_py_std_3a__3a_string(vector[X]& v): # <<<<<<<<<<<<<< + * return [v[i] for i in range(v.size())] + * + */ + +static PyObject *__pyx_convert_vector_to_py_std_3a__3a_string(const std::vector &__pyx_v_v) { + size_t __pyx_v_i; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + size_t __pyx_t_2; + size_t __pyx_t_3; + size_t __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_convert_vector_to_py_std_3a__3a_string", 0); + + /* "vector.to_py":61 + * @cname("__pyx_convert_vector_to_py_std_3a__3a_string") + * cdef object __pyx_convert_vector_to_py_std_3a__3a_string(vector[X]& v): + * return [v[i] for i in range(v.size())] # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __pyx_v_v.size(); + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + __pyx_t_5 = __pyx_convert_PyBytes_string_to_py_std__in_string((__pyx_v_v[__pyx_v_i])); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "vector.to_py":60 + * + * @cname("__pyx_convert_vector_to_py_std_3a__3a_string") + * cdef object __pyx_convert_vector_to_py_std_3a__3a_string(vector[X]& v): # <<<<<<<<<<<<<< + * return [v[i] for i in range(v.size())] + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("vector.to_py.__pyx_convert_vector_to_py_std_3a__3a_string", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_tp_new_10pys3client_PyS3Client(PyTypeObject *t, PyObject *a, PyObject *k) { + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + if (unlikely(__pyx_pw_10pys3client_10PyS3Client_1__cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_10pys3client_PyS3Client(PyObject *o) { + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && (!PyType_IS_GC(Py_TYPE(o)) || !_PyGC_FINALIZED(o))) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_pw_10pys3client_10PyS3Client_3__dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + (*Py_TYPE(o)->tp_free)(o); +} + +static PyMethodDef __pyx_methods_10pys3client_PyS3Client[] = { + {"get_object", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_10pys3client_10PyS3Client_5get_object, METH_VARARGS|METH_KEYWORDS, 0}, + {"multipart_download_concurrency", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_10pys3client_10PyS3Client_7multipart_download_concurrency, METH_VARARGS|METH_KEYWORDS, 0}, + {"put_object", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_10pys3client_10PyS3Client_9put_object, METH_VARARGS|METH_KEYWORDS, 0}, + {"multipart_upload_concurrency", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_10pys3client_10PyS3Client_11multipart_upload_concurrency, METH_VARARGS|METH_KEYWORDS, 0}, + {"delete", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_10pys3client_10PyS3Client_13delete, METH_VARARGS|METH_KEYWORDS, 0}, + {"contains", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_10pys3client_10PyS3Client_15contains, METH_VARARGS|METH_KEYWORDS, 0}, + {"list", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_10pys3client_10PyS3Client_17list, METH_VARARGS|METH_KEYWORDS, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw_10pys3client_10PyS3Client_19__reduce_cython__, METH_NOARGS, 0}, + {"__setstate_cython__", (PyCFunction)__pyx_pw_10pys3client_10PyS3Client_21__setstate_cython__, METH_O, 0}, + {0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type_10pys3client_PyS3Client = { + PyVarObject_HEAD_INIT(0, 0) + "pys3client.PyS3Client", /*tp_name*/ + sizeof(struct __pyx_obj_10pys3client_PyS3Client), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_10pys3client_PyS3Client, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE, /*tp_flags*/ + 0, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_10pys3client_PyS3Client, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_10pys3client_PyS3Client, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif + #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, /*tp_print*/ + #endif + #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 + 0, /*tp_pypy_flags*/ + #endif +}; + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_pys3client(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_pys3client}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "pys3client", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_n_s_ERROR_LIST, __pyx_k_ERROR_LIST, sizeof(__pyx_k_ERROR_LIST), 0, 0, 1, 1}, + {&__pyx_n_s_ERROR_MAP, __pyx_k_ERROR_MAP, sizeof(__pyx_k_ERROR_MAP), 0, 0, 1, 1}, + {&__pyx_n_s_PyS3Client, __pyx_k_PyS3Client, sizeof(__pyx_k_PyS3Client), 0, 0, 1, 1}, + {&__pyx_n_s_S3Error, __pyx_k_S3Error, sizeof(__pyx_k_S3Error), 0, 0, 1, 1}, + {&__pyx_n_s_S3Error___init, __pyx_k_S3Error___init, sizeof(__pyx_k_S3Error___init), 0, 0, 1, 1}, + {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, + {&__pyx_n_s_Undefined, __pyx_k_Undefined, sizeof(__pyx_k_Undefined), 0, 0, 1, 1}, + {&__pyx_n_s_ak, __pyx_k_ak, sizeof(__pyx_k_ak), 0, 0, 1, 1}, + {&__pyx_n_s_bucket, __pyx_k_bucket, sizeof(__pyx_k_bucket), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1}, + {&__pyx_n_s_decode, __pyx_k_decode, sizeof(__pyx_k_decode), 0, 0, 1, 1}, + {&__pyx_n_s_doc, __pyx_k_doc, sizeof(__pyx_k_doc), 0, 0, 1, 1}, + {&__pyx_n_s_enable_https, __pyx_k_enable_https, sizeof(__pyx_k_enable_https), 0, 0, 1, 1}, + {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, + {&__pyx_n_s_endpoint, __pyx_k_endpoint, sizeof(__pyx_k_endpoint), 0, 0, 1, 1}, + {&__pyx_n_s_error_message, __pyx_k_error_message, sizeof(__pyx_k_error_message), 0, 0, 1, 1}, + {&__pyx_n_s_error_name, __pyx_k_error_name, sizeof(__pyx_k_error_name), 0, 0, 1, 1}, + {&__pyx_n_s_filename, __pyx_k_filename, sizeof(__pyx_k_filename), 0, 0, 1, 1}, + {&__pyx_n_s_get, __pyx_k_get, sizeof(__pyx_k_get), 0, 0, 1, 1}, + {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, + {&__pyx_n_s_init, __pyx_k_init, sizeof(__pyx_k_init), 0, 0, 1, 1}, + {&__pyx_n_s_init_api, __pyx_k_init_api, sizeof(__pyx_k_init_api), 0, 0, 1, 1}, + {&__pyx_n_s_key, __pyx_k_key, sizeof(__pyx_k_key), 0, 0, 1, 1}, + {&__pyx_n_s_log_level, __pyx_k_log_level, sizeof(__pyx_k_log_level), 0, 0, 1, 1}, + {&__pyx_n_s_lower, __pyx_k_lower, sizeof(__pyx_k_lower), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_metaclass, __pyx_k_metaclass, sizeof(__pyx_k_metaclass), 0, 0, 1, 1}, + {&__pyx_n_s_module, __pyx_k_module, sizeof(__pyx_k_module), 0, 0, 1, 1}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, + {&__pyx_n_s_prepare, __pyx_k_prepare, sizeof(__pyx_k_prepare), 0, 0, 1, 1}, + {&__pyx_n_s_pys3client, __pyx_k_pys3client, sizeof(__pyx_k_pys3client), 0, 0, 1, 1}, + {&__pyx_kp_s_pys3client_pyx, __pyx_k_pys3client_pyx, sizeof(__pyx_k_pys3client_pyx), 0, 0, 1, 0}, + {&__pyx_n_s_qualname, __pyx_k_qualname, sizeof(__pyx_k_qualname), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, + {&__pyx_n_s_self, __pyx_k_self, sizeof(__pyx_k_self), 0, 0, 1, 1}, + {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, + {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, + {&__pyx_n_s_shutdown_api, __pyx_k_shutdown_api, sizeof(__pyx_k_shutdown_api), 0, 0, 1, 1}, + {&__pyx_n_s_sk, __pyx_k_sk, sizeof(__pyx_k_sk), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_threads_num, __pyx_k_threads_num, sizeof(__pyx_k_threads_num), 0, 0, 1, 1}, + {&__pyx_n_s_use_dual_stack, __pyx_k_use_dual_stack, sizeof(__pyx_k_use_dual_stack), 0, 0, 1, 1}, + {&__pyx_kp_s_utf_8, __pyx_k_utf_8, sizeof(__pyx_k_utf_8), 0, 0, 1, 0}, + {&__pyx_n_s_verify_ssl, __pyx_k_verify_ssl, sizeof(__pyx_k_verify_ssl), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 2, __pyx_L1_error) + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 61, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "pys3client.pyx":13 + * ERROR_MAP = {k:v.decode('utf-8') for k,v in ERROR_LIST} + * + * def init_api(log_level): # <<<<<<<<<<<<<< + * log_level = log_level.lower() + * _init_api(log_level.encode('utf-8')) + */ + __pyx_tuple__3 = PyTuple_Pack(1, __pyx_n_s_log_level); if (unlikely(!__pyx_tuple__3)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__3); + __Pyx_GIVEREF(__pyx_tuple__3); + __pyx_codeobj__4 = (PyObject*)__Pyx_PyCode_New(1, 0, 1, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__3, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pys3client_pyx, __pyx_n_s_init_api, 13, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__4)) __PYX_ERR(1, 13, __pyx_L1_error) + + /* "pys3client.pyx":17 + * _init_api(log_level.encode('utf-8')) + * + * def shutdown_api(): # <<<<<<<<<<<<<< + * _shutdown_api() + * + */ + __pyx_codeobj__5 = (PyObject*)__Pyx_PyCode_New(0, 0, 0, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pys3client_pyx, __pyx_n_s_shutdown_api, 17, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__5)) __PYX_ERR(1, 17, __pyx_L1_error) + + /* "pys3client.pyx":21 + * + * class S3Error(Exception): + * def __init__(self, error_name, error_message): # <<<<<<<<<<<<<< + * self.error_name = error_name + * self.error_message = error_message + */ + __pyx_tuple__6 = PyTuple_Pack(3, __pyx_n_s_self, __pyx_n_s_error_name, __pyx_n_s_error_message); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(1, 21, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + __pyx_codeobj__7 = (PyObject*)__Pyx_PyCode_New(3, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__6, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_pys3client_pyx, __pyx_n_s_init, 21, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__7)) __PYX_ERR(1, 21, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(1, 1, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __pyx_7genexpr__pyx_v_10pys3client_k = Py_None; Py_INCREF(Py_None); + __pyx_7genexpr__pyx_v_10pys3client_v = Py_None; Py_INCREF(Py_None); + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + if (PyType_Ready(&__pyx_type_10pys3client_PyS3Client) < 0) __PYX_ERR(1, 25, __pyx_L1_error) + #if PY_VERSION_HEX < 0x030800B1 + __pyx_type_10pys3client_PyS3Client.tp_print = 0; + #endif + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_10pys3client_PyS3Client.tp_dictoffset && __pyx_type_10pys3client_PyS3Client.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type_10pys3client_PyS3Client.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_PyS3Client, (PyObject *)&__pyx_type_10pys3client_PyS3Client) < 0) __PYX_ERR(1, 25, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type_10pys3client_PyS3Client) < 0) __PYX_ERR(1, 25, __pyx_L1_error) + __pyx_ptype_10pys3client_PyS3Client = &__pyx_type_10pys3client_PyS3Client; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#elif PY_MAJOR_VERSION < 3 +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif +#else +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initpys3client(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initpys3client(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_pys3client(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_pys3client(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_pys3client(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + Py_ssize_t __pyx_t_4; + PyObject *(*__pyx_t_5)(PyObject *); + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *(*__pyx_t_9)(PyObject *); + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'pys3client' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_pys3client(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(1, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(1, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(1, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + PyEval_InitThreads(); + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("pys3client", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(1, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(1, 1, __pyx_L1_error) + Py_INCREF(__pyx_b); + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(1, 1, __pyx_L1_error) + Py_INCREF(__pyx_cython_runtime); + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(1, 1, __pyx_L1_error) + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_pys3client) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(1, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(1, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "pys3client")) { + if (unlikely(PyDict_SetItemString(modules, "pys3client", __pyx_m) < 0)) __PYX_ERR(1, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + if (unlikely(__Pyx_modinit_type_init_code() < 0)) __PYX_ERR(1, 1, __pyx_L1_error) + (void)__Pyx_modinit_type_import_code(); + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(1, 1, __pyx_L1_error) + #endif + + /* "pys3client.pyx":10 + * + * + * ERROR_LIST = get_error_list() # <<<<<<<<<<<<<< + * ERROR_MAP = {k:v.decode('utf-8') for k,v in ERROR_LIST} + * + */ + __pyx_t_1 = __pyx_convert_list_to_py_std_3a__3a_pair_3c_int_2c_std_3a__3a_string_3e___(get_error_list()); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 10, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_ERROR_LIST, __pyx_t_1) < 0) __PYX_ERR(1, 10, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "pys3client.pyx":11 + * + * ERROR_LIST = get_error_list() + * ERROR_MAP = {k:v.decode('utf-8') for k,v in ERROR_LIST} # <<<<<<<<<<<<<< + * + * def init_api(log_level): + */ + { /* enter inner scope */ + __pyx_t_1 = PyDict_New(); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 11, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_ERROR_LIST); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 11, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_2); + if (likely(PyList_CheckExact(__pyx_t_2)) || PyTuple_CheckExact(__pyx_t_2)) { + __pyx_t_3 = __pyx_t_2; __Pyx_INCREF(__pyx_t_3); __pyx_t_4 = 0; + __pyx_t_5 = NULL; + } else { + __pyx_t_4 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 11, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 11, __pyx_L4_error) + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + for (;;) { + if (likely(!__pyx_t_5)) { + if (likely(PyList_CheckExact(__pyx_t_3))) { + if (__pyx_t_4 >= PyList_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_2 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(1, 11, __pyx_L4_error) + #else + __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 11, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_2); + #endif + } else { + if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_2 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_4); __Pyx_INCREF(__pyx_t_2); __pyx_t_4++; if (unlikely(0 < 0)) __PYX_ERR(1, 11, __pyx_L4_error) + #else + __pyx_t_2 = PySequence_ITEM(__pyx_t_3, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 11, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_2); + #endif + } + } else { + __pyx_t_2 = __pyx_t_5(__pyx_t_3); + if (unlikely(!__pyx_t_2)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 11, __pyx_L4_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_2); + } + if ((likely(PyTuple_CheckExact(__pyx_t_2))) || (PyList_CheckExact(__pyx_t_2))) { + PyObject* sequence = __pyx_t_2; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 11, __pyx_L4_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + if (likely(PyTuple_CheckExact(sequence))) { + __pyx_t_6 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_7 = PyTuple_GET_ITEM(sequence, 1); + } else { + __pyx_t_6 = PyList_GET_ITEM(sequence, 0); + __pyx_t_7 = PyList_GET_ITEM(sequence, 1); + } + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(__pyx_t_7); + #else + __pyx_t_6 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 11, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 11, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + #endif + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } else { + Py_ssize_t index = -1; + __pyx_t_8 = PyObject_GetIter(__pyx_t_2); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 11, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_9 = Py_TYPE(__pyx_t_8)->tp_iternext; + index = 0; __pyx_t_6 = __pyx_t_9(__pyx_t_8); if (unlikely(!__pyx_t_6)) goto __pyx_L7_unpacking_failed; + __Pyx_GOTREF(__pyx_t_6); + index = 1; __pyx_t_7 = __pyx_t_9(__pyx_t_8); if (unlikely(!__pyx_t_7)) goto __pyx_L7_unpacking_failed; + __Pyx_GOTREF(__pyx_t_7); + if (__Pyx_IternextUnpackEndCheck(__pyx_t_9(__pyx_t_8), 2) < 0) __PYX_ERR(1, 11, __pyx_L4_error) + __pyx_t_9 = NULL; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + goto __pyx_L8_unpacking_done; + __pyx_L7_unpacking_failed:; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_9 = NULL; + if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); + __PYX_ERR(1, 11, __pyx_L4_error) + __pyx_L8_unpacking_done:; + } + __Pyx_XGOTREF(__pyx_7genexpr__pyx_v_10pys3client_k); + __Pyx_DECREF_SET(__pyx_7genexpr__pyx_v_10pys3client_k, __pyx_t_6); + __Pyx_GIVEREF(__pyx_t_6); + __pyx_t_6 = 0; + __Pyx_XGOTREF(__pyx_7genexpr__pyx_v_10pys3client_v); + __Pyx_DECREF_SET(__pyx_7genexpr__pyx_v_10pys3client_v, __pyx_t_7); + __Pyx_GIVEREF(__pyx_t_7); + __pyx_t_7 = 0; + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_7genexpr__pyx_v_10pys3client_v, __pyx_n_s_decode); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 11, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_6 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_7))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_7); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_7); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_7, function); + } + } + __pyx_t_2 = (__pyx_t_6) ? __Pyx_PyObject_Call2Args(__pyx_t_7, __pyx_t_6, __pyx_kp_s_utf_8) : __Pyx_PyObject_CallOneArg(__pyx_t_7, __pyx_kp_s_utf_8); + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 11, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + if (unlikely(PyDict_SetItem(__pyx_t_1, (PyObject*)__pyx_7genexpr__pyx_v_10pys3client_k, (PyObject*)__pyx_t_2))) __PYX_ERR(1, 11, __pyx_L4_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_GOTREF(__pyx_7genexpr__pyx_v_10pys3client_k); + __Pyx_DECREF_SET(__pyx_7genexpr__pyx_v_10pys3client_k, Py_None); + __Pyx_GOTREF(__pyx_7genexpr__pyx_v_10pys3client_v); + __Pyx_DECREF_SET(__pyx_7genexpr__pyx_v_10pys3client_v, Py_None); + goto __pyx_L9_exit_scope; + __pyx_L4_error:; + __Pyx_GOTREF(__pyx_7genexpr__pyx_v_10pys3client_k); + __Pyx_DECREF_SET(__pyx_7genexpr__pyx_v_10pys3client_k, Py_None); + __Pyx_GOTREF(__pyx_7genexpr__pyx_v_10pys3client_v); + __Pyx_DECREF_SET(__pyx_7genexpr__pyx_v_10pys3client_v, Py_None); + goto __pyx_L1_error; + __pyx_L9_exit_scope:; + } /* exit inner scope */ + if (PyDict_SetItem(__pyx_d, __pyx_n_s_ERROR_MAP, __pyx_t_1) < 0) __PYX_ERR(1, 11, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "pys3client.pyx":13 + * ERROR_MAP = {k:v.decode('utf-8') for k,v in ERROR_LIST} + * + * def init_api(log_level): # <<<<<<<<<<<<<< + * log_level = log_level.lower() + * _init_api(log_level.encode('utf-8')) + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_10pys3client_1init_api, NULL, __pyx_n_s_pys3client); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_init_api, __pyx_t_1) < 0) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "pys3client.pyx":17 + * _init_api(log_level.encode('utf-8')) + * + * def shutdown_api(): # <<<<<<<<<<<<<< + * _shutdown_api() + * + */ + __pyx_t_1 = PyCFunction_NewEx(&__pyx_mdef_10pys3client_3shutdown_api, NULL, __pyx_n_s_pys3client); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_shutdown_api, __pyx_t_1) < 0) __PYX_ERR(1, 17, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "pys3client.pyx":20 + * _shutdown_api() + * + * class S3Error(Exception): # <<<<<<<<<<<<<< + * def __init__(self, error_name, error_message): + * self.error_name = error_name + */ + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 20, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + __Pyx_GIVEREF(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + __pyx_t_3 = __Pyx_CalculateMetaclass(NULL, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 20, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = __Pyx_Py3MetaclassPrepare(__pyx_t_3, __pyx_t_1, __pyx_n_s_S3Error, __pyx_n_s_S3Error, (PyObject *) NULL, __pyx_n_s_pys3client, (PyObject *) NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 20, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + + /* "pys3client.pyx":21 + * + * class S3Error(Exception): + * def __init__(self, error_name, error_message): # <<<<<<<<<<<<<< + * self.error_name = error_name + * self.error_message = error_message + */ + __pyx_t_7 = __Pyx_CyFunction_New(&__pyx_mdef_10pys3client_7S3Error_1__init__, 0, __pyx_n_s_S3Error___init, NULL, __pyx_n_s_pys3client, __pyx_d, ((PyObject *)__pyx_codeobj__7)); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 21, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + if (__Pyx_SetNameInClass(__pyx_t_2, __pyx_n_s_init, __pyx_t_7) < 0) __PYX_ERR(1, 21, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "pys3client.pyx":20 + * _shutdown_api() + * + * class S3Error(Exception): # <<<<<<<<<<<<<< + * def __init__(self, error_name, error_message): + * self.error_name = error_name + */ + __pyx_t_7 = __Pyx_Py3ClassCreate(__pyx_t_3, __pyx_n_s_S3Error, __pyx_t_1, __pyx_t_2, NULL, 0, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 20, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_S3Error, __pyx_t_7) < 0) __PYX_ERR(1, 20, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "pys3client.pyx":1 + * # distutils: language = c++ # <<<<<<<<<<<<<< + * + * from libcpp.string cimport string + */ + __pyx_t_1 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_1) < 0) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "vector.to_py":60 + * + * @cname("__pyx_convert_vector_to_py_std_3a__3a_string") + * cdef object __pyx_convert_vector_to_py_std_3a__3a_string(vector[X]& v): # <<<<<<<<<<<<<< + * return [v[i] for i in range(v.size())] + * + */ + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init pys3client", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + Py_CLEAR(__pyx_m); + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init pys3client"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* PyFunctionFastCall */ +#if CYTHON_FAST_PYCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyObjectCall */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = Py_TYPE(func)->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallMethO */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallNoArg */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, NULL, 0); + } +#endif +#if defined(__Pyx_CyFunction_USED) && defined(NDEBUG) + if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) +#else + if (likely(PyCFunction_Check(func))) +#endif + { + if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { + return __Pyx_PyObject_CallMethO(func, NULL); + } + } + return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); +} +#endif + +/* PyCFunctionFastCall */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); + } +} +#endif + +/* PyObjectCallOneArg */ +#if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (__Pyx_PyFastCFunction_Check(func)) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* PyObjectCall2Args */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { + PyObject *args, *result = NULL; + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyFunction_FastCall(function, args, 2); + } + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyCFunction_FastCall(function, args, 2); + } + #endif + args = PyTuple_New(2); + if (unlikely(!args)) goto done; + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 0, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 1, arg2); + Py_INCREF(function); + result = __Pyx_PyObject_Call(function, args, NULL); + Py_DECREF(args); + Py_DECREF(function); +done: + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* PyObjectSetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE int __Pyx_PyObject_SetAttrStr(PyObject* obj, PyObject* attr_name, PyObject* value) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_setattro)) + return tp->tp_setattro(obj, attr_name, value); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_setattr)) + return tp->tp_setattr(obj, PyString_AS_STRING(attr_name), value); +#endif + return PyObject_SetAttr(obj, attr_name, value); +} +#endif + +/* PyDictVersioning */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* PyErrFetchRestore */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* RaiseException */ +#if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_FAST_THREAD_STATE + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#else + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* PyObject_GenericGetAttrNoDict */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'%.50s' object has no attribute '%U'", + tp->tp_name, attr_name); +#else + "'%.50s' object has no attribute '%.400s'", + tp->tp_name, PyString_AS_STRING(attr_name)); +#endif + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { + PyObject *descr; + PyTypeObject *tp = Py_TYPE(obj); + if (unlikely(!PyString_Check(attr_name))) { + return PyObject_GenericGetAttr(obj, attr_name); + } + assert(!tp->tp_dictoffset); + descr = _PyType_Lookup(tp, attr_name); + if (unlikely(!descr)) { + return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); + } + Py_INCREF(descr); + #if PY_MAJOR_VERSION < 3 + if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) + #endif + { + descrgetfunc f = Py_TYPE(descr)->tp_descr_get; + if (unlikely(f)) { + PyObject *res = f(descr, obj, (PyObject *)tp); + Py_DECREF(descr); + return res; + } + } + return descr; +} +#endif + +/* PyObject_GenericGetAttr */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { + if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { + return PyObject_GenericGetAttr(obj, attr_name); + } + return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); +} +#endif + +/* PyErrExceptionMatches */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* PyObjectGetAttrStrNoError */ +static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + __Pyx_PyErr_Clear(); +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { + PyObject *result; +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { + return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); + } +#endif + result = __Pyx_PyObject_GetAttrStr(obj, attr_name); + if (unlikely(!result)) { + __Pyx_PyObject_GetAttrStr_ClearAttributeError(); + } + return result; +} + +/* SetupReduce */ +static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { + int ret; + PyObject *name_attr; + name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name); + if (likely(name_attr)) { + ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); + } else { + ret = -1; + } + if (unlikely(ret < 0)) { + PyErr_Clear(); + ret = 0; + } + Py_XDECREF(name_attr); + return ret; +} +static int __Pyx_setup_reduce(PyObject* type_obj) { + int ret = 0; + PyObject *object_reduce = NULL; + PyObject *object_getstate = NULL; + PyObject *object_reduce_ex = NULL; + PyObject *reduce = NULL; + PyObject *reduce_ex = NULL; + PyObject *reduce_cython = NULL; + PyObject *setstate = NULL; + PyObject *setstate_cython = NULL; + PyObject *getstate = NULL; +#if CYTHON_USE_PYTYPE_LOOKUP + getstate = _PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate); +#else + getstate = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_getstate); + if (!getstate && PyErr_Occurred()) { + goto __PYX_BAD; + } +#endif + if (getstate) { +#if CYTHON_USE_PYTYPE_LOOKUP + object_getstate = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_getstate); +#else + object_getstate = __Pyx_PyObject_GetAttrStrNoError((PyObject*)&PyBaseObject_Type, __pyx_n_s_getstate); + if (!object_getstate && PyErr_Occurred()) { + goto __PYX_BAD; + } +#endif + if (object_getstate != getstate) { + goto __PYX_GOOD; + } + } +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#else + object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#endif + reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; + if (reduce_ex == object_reduce_ex) { +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#else + object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#endif + reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; + if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { + reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); + if (likely(reduce_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (reduce == object_reduce || PyErr_Occurred()) { + goto __PYX_BAD; + } + setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); + if (!setstate) PyErr_Clear(); + if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { + setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); + if (likely(setstate_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (!setstate || PyErr_Occurred()) { + goto __PYX_BAD; + } + } + PyType_Modified((PyTypeObject*)type_obj); + } + } + goto __PYX_GOOD; +__PYX_BAD: + if (!PyErr_Occurred()) + PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); + ret = -1; +__PYX_GOOD: +#if !CYTHON_USE_PYTYPE_LOOKUP + Py_XDECREF(object_reduce); + Py_XDECREF(object_reduce_ex); + Py_XDECREF(object_getstate); + Py_XDECREF(getstate); +#endif + Py_XDECREF(reduce); + Py_XDECREF(reduce_ex); + Py_XDECREF(reduce_cython); + Py_XDECREF(setstate); + Py_XDECREF(setstate_cython); + return ret; +} + +/* RaiseTooManyValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* IterFinish */ +static CYTHON_INLINE int __Pyx_IterFinish(void) { +#if CYTHON_FAST_THREAD_STATE + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* exc_type = tstate->curexc_type; + if (unlikely(exc_type)) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { + PyObject *exc_value, *exc_tb; + exc_value = tstate->curexc_value; + exc_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; + Py_DECREF(exc_type); + Py_XDECREF(exc_value); + Py_XDECREF(exc_tb); + return 0; + } else { + return -1; + } + } + return 0; +#else + if (unlikely(PyErr_Occurred())) { + if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { + PyErr_Clear(); + return 0; + } else { + return -1; + } + } + return 0; +#endif +} + +/* UnpackItemEndCheck */ +static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { + if (unlikely(retval)) { + Py_DECREF(retval); + __Pyx_RaiseTooManyValuesError(expected); + return -1; + } + return __Pyx_IterFinish(); +} + +/* CalculateMetaclass */ +static PyObject *__Pyx_CalculateMetaclass(PyTypeObject *metaclass, PyObject *bases) { + Py_ssize_t i, nbases = PyTuple_GET_SIZE(bases); + for (i=0; i < nbases; i++) { + PyTypeObject *tmptype; + PyObject *tmp = PyTuple_GET_ITEM(bases, i); + tmptype = Py_TYPE(tmp); +#if PY_MAJOR_VERSION < 3 + if (tmptype == &PyClass_Type) + continue; +#endif + if (!metaclass) { + metaclass = tmptype; + continue; + } + if (PyType_IsSubtype(metaclass, tmptype)) + continue; + if (PyType_IsSubtype(tmptype, metaclass)) { + metaclass = tmptype; + continue; + } + PyErr_SetString(PyExc_TypeError, + "metaclass conflict: " + "the metaclass of a derived class " + "must be a (non-strict) subclass " + "of the metaclasses of all its bases"); + return NULL; + } + if (!metaclass) { +#if PY_MAJOR_VERSION < 3 + metaclass = &PyClass_Type; +#else + metaclass = &PyType_Type; +#endif + } + Py_INCREF((PyObject*) metaclass); + return (PyObject*) metaclass; +} + +/* FetchCommonType */ +static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { + PyObject* fake_module; + PyTypeObject* cached_type = NULL; + fake_module = PyImport_AddModule((char*) "_cython_" CYTHON_ABI); + if (!fake_module) return NULL; + Py_INCREF(fake_module); + cached_type = (PyTypeObject*) PyObject_GetAttrString(fake_module, type->tp_name); + if (cached_type) { + if (!PyType_Check((PyObject*)cached_type)) { + PyErr_Format(PyExc_TypeError, + "Shared Cython type %.200s is not a type object", + type->tp_name); + goto bad; + } + if (cached_type->tp_basicsize != type->tp_basicsize) { + PyErr_Format(PyExc_TypeError, + "Shared Cython type %.200s has the wrong size, try recompiling", + type->tp_name); + goto bad; + } + } else { + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; + PyErr_Clear(); + if (PyType_Ready(type) < 0) goto bad; + if (PyObject_SetAttrString(fake_module, type->tp_name, (PyObject*) type) < 0) + goto bad; + Py_INCREF(type); + cached_type = type; + } +done: + Py_DECREF(fake_module); + return cached_type; +bad: + Py_XDECREF(cached_type); + cached_type = NULL; + goto done; +} + +/* CythonFunctionShared */ +#include +static PyObject * +__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *closure) +{ + if (unlikely(op->func_doc == NULL)) { + if (op->func.m_ml->ml_doc) { +#if PY_MAJOR_VERSION >= 3 + op->func_doc = PyUnicode_FromString(op->func.m_ml->ml_doc); +#else + op->func_doc = PyString_FromString(op->func.m_ml->ml_doc); +#endif + if (unlikely(op->func_doc == NULL)) + return NULL; + } else { + Py_INCREF(Py_None); + return Py_None; + } + } + Py_INCREF(op->func_doc); + return op->func_doc; +} +static int +__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) +{ + PyObject *tmp = op->func_doc; + if (value == NULL) { + value = Py_None; + } + Py_INCREF(value); + op->func_doc = value; + Py_XDECREF(tmp); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) +{ + if (unlikely(op->func_name == NULL)) { +#if PY_MAJOR_VERSION >= 3 + op->func_name = PyUnicode_InternFromString(op->func.m_ml->ml_name); +#else + op->func_name = PyString_InternFromString(op->func.m_ml->ml_name); +#endif + if (unlikely(op->func_name == NULL)) + return NULL; + } + Py_INCREF(op->func_name); + return op->func_name; +} +static int +__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) +{ + PyObject *tmp; +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__name__ must be set to a string object"); + return -1; + } + tmp = op->func_name; + Py_INCREF(value); + op->func_name = value; + Py_XDECREF(tmp); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) +{ + Py_INCREF(op->func_qualname); + return op->func_qualname; +} +static int +__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) +{ + PyObject *tmp; +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__qualname__ must be set to a string object"); + return -1; + } + tmp = op->func_qualname; + Py_INCREF(value); + op->func_qualname = value; + Py_XDECREF(tmp); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_self(__pyx_CyFunctionObject *m, CYTHON_UNUSED void *closure) +{ + PyObject *self; + self = m->func_closure; + if (self == NULL) + self = Py_None; + Py_INCREF(self); + return self; +} +static PyObject * +__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) +{ + if (unlikely(op->func_dict == NULL)) { + op->func_dict = PyDict_New(); + if (unlikely(op->func_dict == NULL)) + return NULL; + } + Py_INCREF(op->func_dict); + return op->func_dict; +} +static int +__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, CYTHON_UNUSED void *context) +{ + PyObject *tmp; + if (unlikely(value == NULL)) { + PyErr_SetString(PyExc_TypeError, + "function's dictionary may not be deleted"); + return -1; + } + if (unlikely(!PyDict_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "setting function's dictionary to a non-dict"); + return -1; + } + tmp = op->func_dict; + Py_INCREF(value); + op->func_dict = value; + Py_XDECREF(tmp); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) +{ + Py_INCREF(op->func_globals); + return op->func_globals; +} +static PyObject * +__Pyx_CyFunction_get_closure(CYTHON_UNUSED __pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) +{ + Py_INCREF(Py_None); + return Py_None; +} +static PyObject * +__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) +{ + PyObject* result = (op->func_code) ? op->func_code : Py_None; + Py_INCREF(result); + return result; +} +static int +__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { + int result = 0; + PyObject *res = op->defaults_getter((PyObject *) op); + if (unlikely(!res)) + return -1; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + op->defaults_tuple = PyTuple_GET_ITEM(res, 0); + Py_INCREF(op->defaults_tuple); + op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); + Py_INCREF(op->defaults_kwdict); + #else + op->defaults_tuple = PySequence_ITEM(res, 0); + if (unlikely(!op->defaults_tuple)) result = -1; + else { + op->defaults_kwdict = PySequence_ITEM(res, 1); + if (unlikely(!op->defaults_kwdict)) result = -1; + } + #endif + Py_DECREF(res); + return result; +} +static int +__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { + PyObject* tmp; + if (!value) { + value = Py_None; + } else if (value != Py_None && !PyTuple_Check(value)) { + PyErr_SetString(PyExc_TypeError, + "__defaults__ must be set to a tuple object"); + return -1; + } + Py_INCREF(value); + tmp = op->defaults_tuple; + op->defaults_tuple = value; + Py_XDECREF(tmp); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { + PyObject* result = op->defaults_tuple; + if (unlikely(!result)) { + if (op->defaults_getter) { + if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; + result = op->defaults_tuple; + } else { + result = Py_None; + } + } + Py_INCREF(result); + return result; +} +static int +__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { + PyObject* tmp; + if (!value) { + value = Py_None; + } else if (value != Py_None && !PyDict_Check(value)) { + PyErr_SetString(PyExc_TypeError, + "__kwdefaults__ must be set to a dict object"); + return -1; + } + Py_INCREF(value); + tmp = op->defaults_kwdict; + op->defaults_kwdict = value; + Py_XDECREF(tmp); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { + PyObject* result = op->defaults_kwdict; + if (unlikely(!result)) { + if (op->defaults_getter) { + if (__Pyx_CyFunction_init_defaults(op) < 0) return NULL; + result = op->defaults_kwdict; + } else { + result = Py_None; + } + } + Py_INCREF(result); + return result; +} +static int +__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, CYTHON_UNUSED void *context) { + PyObject* tmp; + if (!value || value == Py_None) { + value = NULL; + } else if (!PyDict_Check(value)) { + PyErr_SetString(PyExc_TypeError, + "__annotations__ must be set to a dict object"); + return -1; + } + Py_XINCREF(value); + tmp = op->func_annotations; + op->func_annotations = value; + Py_XDECREF(tmp); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, CYTHON_UNUSED void *context) { + PyObject* result = op->func_annotations; + if (unlikely(!result)) { + result = PyDict_New(); + if (unlikely(!result)) return NULL; + op->func_annotations = result; + } + Py_INCREF(result); + return result; +} +static PyGetSetDef __pyx_CyFunction_getsets[] = { + {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, + {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, + {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, + {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, + {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, + {(char *) "__self__", (getter)__Pyx_CyFunction_get_self, 0, 0, 0}, + {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, + {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, + {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, + {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, + {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, + {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, + {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, + {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, + {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, + {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, + {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, + {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, + {0, 0, 0, 0, 0} +}; +static PyMemberDef __pyx_CyFunction_members[] = { + {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), PY_WRITE_RESTRICTED, 0}, + {0, 0, 0, 0, 0} +}; +static PyObject * +__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, CYTHON_UNUSED PyObject *args) +{ +#if PY_MAJOR_VERSION >= 3 + Py_INCREF(m->func_qualname); + return m->func_qualname; +#else + return PyString_FromString(m->func.m_ml->ml_name); +#endif +} +static PyMethodDef __pyx_CyFunction_methods[] = { + {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, + {0, 0, 0, 0} +}; +#if PY_VERSION_HEX < 0x030500A0 +#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) +#else +#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func.m_weakreflist) +#endif +static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname, + PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { + if (unlikely(op == NULL)) + return NULL; + op->flags = flags; + __Pyx_CyFunction_weakreflist(op) = NULL; + op->func.m_ml = ml; + op->func.m_self = (PyObject *) op; + Py_XINCREF(closure); + op->func_closure = closure; + Py_XINCREF(module); + op->func.m_module = module; + op->func_dict = NULL; + op->func_name = NULL; + Py_INCREF(qualname); + op->func_qualname = qualname; + op->func_doc = NULL; + op->func_classobj = NULL; + op->func_globals = globals; + Py_INCREF(op->func_globals); + Py_XINCREF(code); + op->func_code = code; + op->defaults_pyobjects = 0; + op->defaults_size = 0; + op->defaults = NULL; + op->defaults_tuple = NULL; + op->defaults_kwdict = NULL; + op->defaults_getter = NULL; + op->func_annotations = NULL; + return (PyObject *) op; +} +static int +__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) +{ + Py_CLEAR(m->func_closure); + Py_CLEAR(m->func.m_module); + Py_CLEAR(m->func_dict); + Py_CLEAR(m->func_name); + Py_CLEAR(m->func_qualname); + Py_CLEAR(m->func_doc); + Py_CLEAR(m->func_globals); + Py_CLEAR(m->func_code); + Py_CLEAR(m->func_classobj); + Py_CLEAR(m->defaults_tuple); + Py_CLEAR(m->defaults_kwdict); + Py_CLEAR(m->func_annotations); + if (m->defaults) { + PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); + int i; + for (i = 0; i < m->defaults_pyobjects; i++) + Py_XDECREF(pydefaults[i]); + PyObject_Free(m->defaults); + m->defaults = NULL; + } + return 0; +} +static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) +{ + if (__Pyx_CyFunction_weakreflist(m) != NULL) + PyObject_ClearWeakRefs((PyObject *) m); + __Pyx_CyFunction_clear(m); + PyObject_GC_Del(m); +} +static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) +{ + PyObject_GC_UnTrack(m); + __Pyx__CyFunction_dealloc(m); +} +static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) +{ + Py_VISIT(m->func_closure); + Py_VISIT(m->func.m_module); + Py_VISIT(m->func_dict); + Py_VISIT(m->func_name); + Py_VISIT(m->func_qualname); + Py_VISIT(m->func_doc); + Py_VISIT(m->func_globals); + Py_VISIT(m->func_code); + Py_VISIT(m->func_classobj); + Py_VISIT(m->defaults_tuple); + Py_VISIT(m->defaults_kwdict); + if (m->defaults) { + PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); + int i; + for (i = 0; i < m->defaults_pyobjects; i++) + Py_VISIT(pydefaults[i]); + } + return 0; +} +static PyObject *__Pyx_CyFunction_descr_get(PyObject *func, PyObject *obj, PyObject *type) +{ +#if PY_MAJOR_VERSION < 3 + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + if (m->flags & __Pyx_CYFUNCTION_STATICMETHOD) { + Py_INCREF(func); + return func; + } + if (m->flags & __Pyx_CYFUNCTION_CLASSMETHOD) { + if (type == NULL) + type = (PyObject *)(Py_TYPE(obj)); + return __Pyx_PyMethod_New(func, type, (PyObject *)(Py_TYPE(type))); + } + if (obj == Py_None) + obj = NULL; +#endif + return __Pyx_PyMethod_New(func, obj, type); +} +static PyObject* +__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) +{ +#if PY_MAJOR_VERSION >= 3 + return PyUnicode_FromFormat("", + op->func_qualname, (void *)op); +#else + return PyString_FromFormat("", + PyString_AsString(op->func_qualname), (void *)op); +#endif +} +static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { + PyCFunctionObject* f = (PyCFunctionObject*)func; + PyCFunction meth = f->m_ml->ml_meth; + Py_ssize_t size; + switch (f->m_ml->ml_flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { + case METH_VARARGS: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) + return (*meth)(self, arg); + break; + case METH_VARARGS | METH_KEYWORDS: + return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); + case METH_NOARGS: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) { + size = PyTuple_GET_SIZE(arg); + if (likely(size == 0)) + return (*meth)(self, NULL); + PyErr_Format(PyExc_TypeError, + "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", + f->m_ml->ml_name, size); + return NULL; + } + break; + case METH_O: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) { + size = PyTuple_GET_SIZE(arg); + if (likely(size == 1)) { + PyObject *result, *arg0; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + arg0 = PyTuple_GET_ITEM(arg, 0); + #else + arg0 = PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; + #endif + result = (*meth)(self, arg0); + #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(arg0); + #endif + return result; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", + f->m_ml->ml_name, size); + return NULL; + } + break; + default: + PyErr_SetString(PyExc_SystemError, "Bad call flags in " + "__Pyx_CyFunction_Call. METH_OLDARGS is no " + "longer supported!"); + return NULL; + } + PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", + f->m_ml->ml_name); + return NULL; +} +static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { + return __Pyx_CyFunction_CallMethod(func, ((PyCFunctionObject*)func)->m_self, arg, kw); +} +static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { + PyObject *result; + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; + if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { + Py_ssize_t argc; + PyObject *new_args; + PyObject *self; + argc = PyTuple_GET_SIZE(args); + new_args = PyTuple_GetSlice(args, 1, argc); + if (unlikely(!new_args)) + return NULL; + self = PyTuple_GetItem(args, 0); + if (unlikely(!self)) { + Py_DECREF(new_args); +#if PY_MAJOR_VERSION > 2 + PyErr_Format(PyExc_TypeError, + "unbound method %.200S() needs an argument", + cyfunc->func_qualname); +#else + PyErr_SetString(PyExc_TypeError, + "unbound method needs an argument"); +#endif + return NULL; + } + result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); + Py_DECREF(new_args); + } else { + result = __Pyx_CyFunction_Call(func, args, kw); + } + return result; +} +static PyTypeObject __pyx_CyFunctionType_type = { + PyVarObject_HEAD_INIT(0, 0) + "cython_function_or_method", + sizeof(__pyx_CyFunctionObject), + 0, + (destructor) __Pyx_CyFunction_dealloc, + 0, + 0, + 0, +#if PY_MAJOR_VERSION < 3 + 0, +#else + 0, +#endif + (reprfunc) __Pyx_CyFunction_repr, + 0, + 0, + 0, + 0, + __Pyx_CyFunction_CallAsMethod, + 0, + 0, + 0, + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC, + 0, + (traverseproc) __Pyx_CyFunction_traverse, + (inquiry) __Pyx_CyFunction_clear, + 0, +#if PY_VERSION_HEX < 0x030500A0 + offsetof(__pyx_CyFunctionObject, func_weakreflist), +#else + offsetof(PyCFunctionObject, m_weakreflist), +#endif + 0, + 0, + __pyx_CyFunction_methods, + __pyx_CyFunction_members, + __pyx_CyFunction_getsets, + 0, + 0, + __Pyx_CyFunction_descr_get, + 0, + offsetof(__pyx_CyFunctionObject, func_dict), + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, +#if PY_VERSION_HEX >= 0x030400a1 + 0, +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, +#endif +#if PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000 + 0, +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 + 0, +#endif +}; +static int __pyx_CyFunction_init(void) { + __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); + if (unlikely(__pyx_CyFunctionType == NULL)) { + return -1; + } + return 0; +} +static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults = PyObject_Malloc(size); + if (unlikely(!m->defaults)) + return PyErr_NoMemory(); + memset(m->defaults, 0, size); + m->defaults_pyobjects = pyobjects; + m->defaults_size = size; + return m->defaults; +} +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults_tuple = tuple; + Py_INCREF(tuple); +} +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults_kwdict = dict; + Py_INCREF(dict); +} +static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->func_annotations = dict; + Py_INCREF(dict); +} + +/* CythonFunction */ +static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, + PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { + PyObject *op = __Pyx_CyFunction_Init( + PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType), + ml, flags, qualname, closure, module, globals, code + ); + if (likely(op)) { + PyObject_GC_Track(op); + } + return op; +} + +/* Py3ClassCreate */ +static PyObject *__Pyx_Py3MetaclassPrepare(PyObject *metaclass, PyObject *bases, PyObject *name, + PyObject *qualname, PyObject *mkw, PyObject *modname, PyObject *doc) { + PyObject *ns; + if (metaclass) { + PyObject *prep = __Pyx_PyObject_GetAttrStr(metaclass, __pyx_n_s_prepare); + if (prep) { + PyObject *pargs = PyTuple_Pack(2, name, bases); + if (unlikely(!pargs)) { + Py_DECREF(prep); + return NULL; + } + ns = PyObject_Call(prep, pargs, mkw); + Py_DECREF(prep); + Py_DECREF(pargs); + } else { + if (unlikely(!PyErr_ExceptionMatches(PyExc_AttributeError))) + return NULL; + PyErr_Clear(); + ns = PyDict_New(); + } + } else { + ns = PyDict_New(); + } + if (unlikely(!ns)) + return NULL; + if (unlikely(PyObject_SetItem(ns, __pyx_n_s_module, modname) < 0)) goto bad; + if (unlikely(PyObject_SetItem(ns, __pyx_n_s_qualname, qualname) < 0)) goto bad; + if (unlikely(doc && PyObject_SetItem(ns, __pyx_n_s_doc, doc) < 0)) goto bad; + return ns; +bad: + Py_DECREF(ns); + return NULL; +} +static PyObject *__Pyx_Py3ClassCreate(PyObject *metaclass, PyObject *name, PyObject *bases, + PyObject *dict, PyObject *mkw, + int calculate_metaclass, int allow_py2_metaclass) { + PyObject *result, *margs; + PyObject *owned_metaclass = NULL; + if (allow_py2_metaclass) { + owned_metaclass = PyObject_GetItem(dict, __pyx_n_s_metaclass); + if (owned_metaclass) { + metaclass = owned_metaclass; + } else if (likely(PyErr_ExceptionMatches(PyExc_KeyError))) { + PyErr_Clear(); + } else { + return NULL; + } + } + if (calculate_metaclass && (!metaclass || PyType_Check(metaclass))) { + metaclass = __Pyx_CalculateMetaclass((PyTypeObject*) metaclass, bases); + Py_XDECREF(owned_metaclass); + if (unlikely(!metaclass)) + return NULL; + owned_metaclass = metaclass; + } + margs = PyTuple_Pack(3, name, bases, dict); + if (unlikely(!margs)) { + result = NULL; + } else { + result = PyObject_Call(metaclass, margs, mkw); + Py_DECREF(margs); + } + Py_XDECREF(owned_metaclass); + return result; +} + +/* CLineInTraceback */ +#ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(CYTHON_UNUSED PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + (void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +#if PY_VERSION_HEX >= 0x030b00a6 + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = NULL; + PyObject *py_funcname = NULL; + #if PY_MAJOR_VERSION < 3 + PyObject *py_srcfile = NULL; + py_srcfile = PyString_FromString(filename); + if (!py_srcfile) goto bad; + #endif + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + if (!py_funcname) goto bad; + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + if (!py_funcname) goto bad; + funcname = PyUnicode_AsUTF8(py_funcname); + if (!funcname) goto bad; + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + if (!py_funcname) goto bad; + #endif + } + #if PY_MAJOR_VERSION < 3 + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + #else + py_code = PyCode_NewEmpty(filename, funcname, py_line); + #endif + Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline + return py_code; +bad: + Py_XDECREF(py_funcname); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_srcfile); + #endif + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject *ptype, *pvalue, *ptraceback; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) { + /* If the code object creation fails, then we should clear the + fetched exception references and propagate the new exception */ + Py_XDECREF(ptype); + Py_XDECREF(pvalue); + Py_XDECREF(ptraceback); + goto bad; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +/* CIntFromPyVerify */ +#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); + } +} + +/* CIntFromPy */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntFromPy */ +static CYTHON_INLINE size_t __Pyx_PyInt_As_size_t(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const size_t neg_one = (size_t) -1, const_zero = (size_t) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(size_t) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(size_t, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (size_t) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (size_t) 0; + case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, digits[0]) + case 2: + if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) >= 2 * PyLong_SHIFT) { + return (size_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) >= 3 * PyLong_SHIFT) { + return (size_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) >= 4 * PyLong_SHIFT) { + return (size_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (size_t) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(size_t) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(size_t) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(size_t, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (size_t) 0; + case -1: __PYX_VERIFY_RETURN_INT(size_t, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(size_t, digit, +digits[0]) + case -2: + if (8 * sizeof(size_t) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { + return (size_t) (((size_t)-1)*(((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(size_t) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { + return (size_t) ((((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(size_t) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { + return (size_t) (((size_t)-1)*(((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(size_t) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { + return (size_t) ((((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(size_t) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) { + return (size_t) (((size_t)-1)*(((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(size_t) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(size_t, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(size_t) - 1 > 4 * PyLong_SHIFT) { + return (size_t) ((((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0]))); + } + } + break; + } +#endif + if (sizeof(size_t) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(size_t, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(size_t) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(size_t, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + size_t val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (size_t) -1; + } + } else { + size_t val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (size_t) -1; + val = __Pyx_PyInt_As_size_t(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to size_t"); + return (size_t) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to size_t"); + return (size_t) -1; +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPy */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* FastTypeChecks */ +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; i '9'); + break; + } + if (rt_from_call[i] != ctversion[i]) { + same = 0; + break; + } + } + if (!same) { + char rtversion[5] = {'\0'}; + char message[200]; + for (i=0; i<4; ++i) { + if (rt_from_call[i] == '.') { + if (found_dot) break; + found_dot = 1; + } else if (rt_from_call[i] < '0' || rt_from_call[i] > '9') { + break; + } + rtversion[i] = rt_from_call[i]; + } + PyOS_snprintf(message, sizeof(message), + "compiletime version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* InitStrings */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { + if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { + return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); +#if PY_MAJOR_VERSION < 3 + } else if (likely(PyInt_CheckExact(o))) { + return PyInt_AS_LONG(o); +#endif + } else { + Py_ssize_t ival; + PyObject *x; + x = PyNumber_Index(o); + if (!x) return -1; + ival = PyInt_AsLong(x); + Py_DECREF(x); + return ival; + } +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-310-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..785ba2e9e5a0685d2fff8e8e580bd61ed041054d Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-310-x86_64-linux-gnu.so differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-36-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-36-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0d34fa9ba0e7e5048923b7d305acdb30af31d098 --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-36-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8484af53fea398ba08cd78502a0400b61aeae73315ffed5c2900d17abbc067d +size 1109320 diff --git a/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-37-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-37-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a85ca22aaec0cb380858f2cb25e81a9a40ed7302 Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-37-x86_64-linux-gnu.so differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-38-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..bfebf846b1a7720208f3c45902cd99ec5090fe87 --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:629201bfbedb0b1cddadbe6ec2f982fab941ee1cb04697d2407e596b2fda3cc5 +size 1145912 diff --git a/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-39-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1ea6d982101c6e2fc91f521c7360bdab120388f2 Binary files /dev/null and b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.cpython-39-x86_64-linux-gnu.so differ diff --git a/petrel-sdk/petrel-oss-cpp-sdk/pys3client.pyx b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.pyx new file mode 100644 index 0000000000000000000000000000000000000000..59791798f32a0c8783d9a9d7a5ec624ac128bc47 --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/pys3client.pyx @@ -0,0 +1,91 @@ +# distutils: language = c++ + +from libcpp.string cimport string +from libcpp.vector cimport vector +from s3client cimport S3Client, get_error_list +from s3client cimport init_api as _init_api +from s3client cimport shutdown_api as _shutdown_api + + +ERROR_LIST = get_error_list() +ERROR_MAP = {k:v.decode('utf-8') for k,v in ERROR_LIST} + +def init_api(log_level): + log_level = log_level.lower() + _init_api(log_level.encode('utf-8')) + +def shutdown_api(): + _shutdown_api() + +class S3Error(Exception): + def __init__(self, error_name, error_message): + self.error_name = error_name + self.error_message = error_message + +cdef class PyS3Client: + cdef S3Client *client + + def __cinit__(self, string ak, string sk, string endpoint, bint verify_ssl, bint enable_https, bint use_dual_stack, int threads_num): + self.client = new S3Client(ak, sk, endpoint, verify_ssl, enable_https, use_dual_stack, threads_num) + + def __dealloc__(self): + del self.client + + def get_object(self, string bucket, string key, string range): + cdef string error_message, result + cdef int error_type + ret = self.client.get_object(bucket, key, error_type, error_message, result, range) + if ret == 0: + return result + else: + error_name = ERROR_MAP.get(error_type, 'Undefined') + raise S3Error(error_name, error_message) + + def multipart_download_concurrency(self, string bucket, string key, string filename): + cdef string error_message, result + cdef int error_type + ret = self.client.multipart_download_concurrency(bucket, key, filename, error_type, error_message) + if ret == 0: + return ret + else: + error_name = ERROR_MAP.get(error_type, 'Undefined') + raise S3Error(error_name, error_message) + + def put_object(self, string bucket, string key, string data): + cdef string error_message, result + cdef int error_type + ret = self.client.put_object(bucket, key, data, error_type, error_message) + if ret == 0: + return data.size() + else: + error_name = ERROR_MAP.get(error_type, 'Undefined') + raise S3Error(error_name, error_message) + + + def multipart_upload_concurrency(self, string bucket, string key, string filename): + cdef string error_message, result + cdef int error_type + ret = self.client.multipart_upload_concurrency(bucket, key, filename, error_type, error_message) + if ret == 0: + return ret + else: + error_name = ERROR_MAP.get(error_type, 'Undefined') + raise S3Error(error_name, error_message) + + def delete(self, string bucket, string key): + cdef string error_message, result + cdef int error_type + ret = self.client.delete_obj(bucket, key, error_type, error_message) + return ret + + def contains(self, string bucket, string key): + cdef string error_message, result + cdef int error_type + ret = self.client.contains(bucket, key, error_type, error_message) + return ret + + def list(self, string bucket, string key): + cdef string error_message, result + cdef int error_type + ret = self.client.list(bucket, key, error_type, error_message) + return ret diff --git a/petrel-sdk/petrel-oss-cpp-sdk/s3client.cpp b/petrel-sdk/petrel-oss-cpp-sdk/s3client.cpp new file mode 100644 index 0000000000000000000000000000000000000000..6ca93ed063ab0501a3915d85d1a552b8432340d3 --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/s3client.cpp @@ -0,0 +1,323 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "s3client.h" + +#include +#include +#include +#include +#include +#include + + +#include +#include + +// mutipart相关 +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#define ERROR_ITEM(error_name) \ + { \ + static_cast(Aws::S3::S3Errors::error_name), #error_name \ + } + +static std::list> ERROR_LIST = { + ERROR_ITEM(INCOMPLETE_SIGNATURE), + ERROR_ITEM(INTERNAL_FAILURE), + ERROR_ITEM(INVALID_ACTION), + ERROR_ITEM(INVALID_CLIENT_TOKEN_ID), + ERROR_ITEM(INVALID_PARAMETER_COMBINATION), + ERROR_ITEM(INVALID_QUERY_PARAMETER), + ERROR_ITEM(INVALID_PARAMETER_VALUE), + ERROR_ITEM(MISSING_ACTION), + ERROR_ITEM(MISSING_AUTHENTICATION_TOKEN), + ERROR_ITEM(MISSING_PARAMETER), + ERROR_ITEM(OPT_IN_REQUIRED), + ERROR_ITEM(REQUEST_EXPIRED), + ERROR_ITEM(SERVICE_UNAVAILABLE), + ERROR_ITEM(THROTTLING), + ERROR_ITEM(VALIDATION), + ERROR_ITEM(ACCESS_DENIED), + ERROR_ITEM(RESOURCE_NOT_FOUND), + ERROR_ITEM(UNRECOGNIZED_CLIENT), + ERROR_ITEM(MALFORMED_QUERY_STRING), + ERROR_ITEM(SLOW_DOWN), + ERROR_ITEM(REQUEST_TIME_TOO_SKEWED), + ERROR_ITEM(INVALID_SIGNATURE), + ERROR_ITEM(SIGNATURE_DOES_NOT_MATCH), + ERROR_ITEM(INVALID_ACCESS_KEY_ID), + ERROR_ITEM(REQUEST_TIMEOUT), + ERROR_ITEM(NETWORK_CONNECTION), + ERROR_ITEM(UNKNOWN), + ERROR_ITEM(BUCKET_ALREADY_EXISTS), + ERROR_ITEM(BUCKET_ALREADY_OWNED_BY_YOU), + ERROR_ITEM(NO_SUCH_BUCKET), + ERROR_ITEM(NO_SUCH_KEY), + ERROR_ITEM(NO_SUCH_UPLOAD), + ERROR_ITEM(OBJECT_ALREADY_IN_ACTIVE_TIER), + ERROR_ITEM(OBJECT_NOT_IN_ACTIVE_TIER), +}; + +std::list> get_error_list() +{ + return ERROR_LIST; +} +static Aws::SDKOptions options; + +static std::unordered_map log_level_map = { + {"off", Aws::Utils::Logging::LogLevel::Off}, + {"fatal", Aws::Utils::Logging::LogLevel::Fatal}, + {"error", Aws::Utils::Logging::LogLevel::Error}, + {"warn", Aws::Utils::Logging::LogLevel::Warn}, + {"info", Aws::Utils::Logging::LogLevel::Info}, + {"debug", Aws::Utils::Logging::LogLevel::Debug}, + {"trace", Aws::Utils::Logging::LogLevel::Trace}, +}; + +void init_api(const std::string &level) +{ + auto itr = log_level_map.find(level); + if (itr != log_level_map.end()) + { + options.loggingOptions.logLevel = itr->second; + } + Aws::InitAPI(options); +} + +void shutdown_api() +{ + Aws::ShutdownAPI(options); +} + +S3Client::S3Client(const std::string &ak, const std::string &sk, const std::string &endpoint, bool verify_ssl, bool enable_https, bool use_dual_stack, int threads_num) +{ + Aws::Client::ClientConfiguration config; + config.endpointOverride = endpoint.c_str(); + config.verifySSL = verify_ssl; + if (enable_https) + { + config.scheme = Aws::Http::Scheme::HTTPS; + } + else + { + config.scheme = Aws::Http::Scheme::HTTP; + } + + config.useDualStack = use_dual_stack; + Aws::Auth::AWSCredentials cred = Aws::Auth::AWSCredentials(ak.c_str(), sk.c_str()); + // this->client = new Aws::S3::S3Client(cred, config, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, false); + this->client = Aws::MakeShared("S3Client", cred, config, Aws::Client::AWSAuthV4Signer::PayloadSigningPolicy::Never, false); + + // multipart使用 + this->threads_num = threads_num; + + return; +} + +S3Client::~S3Client() +{ + // delete this->client; +} + +int S3Client::get_object(const std::string &bucket, const std::string &key, int &error_type, std::string &error_message, std::string &result, std::string &range) +{ + Aws::S3::Model::GetObjectRequest object_request; + object_request.SetBucket(bucket.c_str()); + object_request.SetKey(key.c_str()); + if ("" != range) + object_request.SetRange(std::string("bytes="+range).c_str()); + + /* 这里真正请求数据 */ + auto get_object_outcome = this->client->GetObject(object_request); + + if (get_object_outcome.IsSuccess()) + { + auto &&get_result = get_object_outcome.GetResultWithOwnership(); + auto content_length = get_result.GetContentLength(); + auto &retrieved_object = get_result.GetBody(); + + result.resize(content_length); + long read_offset = 0; + while (true) + { + retrieved_object.read(&result[read_offset], content_length - read_offset); + auto read_count = retrieved_object.gcount(); + read_offset += read_count; + if (read_offset >= content_length || read_count == 0) + { + break; + } + } + if (read_offset != content_length) + { + //todo + } + + return 0; + } + else + { + auto error = get_object_outcome.GetError(); + auto message = error.GetMessage(); + if (!message.empty()) + { + error_message = message.c_str(); + } + error_type = static_cast(error.GetErrorType()); + return -1; + } + return 0; +} + +int S3Client::put_object(const std::string &bucket, const std::string &key, const std::string &data, int &error_type, std::string &error_message) +{ + const std::shared_ptr input_data = Aws::MakeShared(""); + *input_data << data; + + Aws::S3::Model::PutObjectRequest request; + request.SetBucket(bucket.c_str()); + request.SetKey(key.c_str()); + request.SetBody(input_data); + auto outcome = this->client->PutObject(request); + if (outcome.IsSuccess()) + { + return 0; + } + else + { + auto error = outcome.GetError(); + auto message = error.GetMessage(); + if (!message.empty()) + { + error_message = message.c_str(); + } + error_type = static_cast(error.GetErrorType()); + return -1; + } +} + + +int S3Client::multipart_download_concurrency(const std::string &bucket, const std::string &key, const std::string &filename, int &error_type, std::string &error_message) +{ + auto executor = Aws::MakeShared("executor", this->threads_num); + Aws::Transfer::TransferManagerConfiguration transfer_config(executor.get()); + transfer_config.s3Client = this->client; + auto transfer_manager = Aws::Transfer::TransferManager::Create(transfer_config); + + auto downloadHandle = transfer_manager->DownloadFile(bucket.c_str(), + key.c_str(), + [=](){ + return Aws::New("S3_DOWNLOAD", filename.c_str(), std::ios_base::out | std::ios_base::binary); + }); + + downloadHandle->WaitUntilFinished();// Block calling thread until download is complete. + auto downStat = downloadHandle->GetStatus(); + if (downStat != Aws::Transfer::TransferStatus::COMPLETED) + { + auto error = downloadHandle->GetLastError(); + error_message = error.GetMessage().c_str(); + error_type = static_cast(error.GetErrorType()); + return -1; + } + + return 0; +} + +int S3Client::multipart_upload_concurrency(const std::string bucket, const std::string key, const std::string filename, int &error_type, std::string &error_message) +{ + auto executor = Aws::MakeShared("executor", this->threads_num); + Aws::Transfer::TransferManagerConfiguration transfer_config(executor.get()); + transfer_config.s3Client = this->client; + auto transfer_manager = Aws::Transfer::TransferManager::Create(transfer_config); + + auto uploadHandle = transfer_manager->UploadFile(filename.c_str(), bucket.c_str(), key.c_str(), "text/plain", Aws::Map()); + uploadHandle->WaitUntilFinished(); + bool success = uploadHandle->GetStatus() == Aws::Transfer::TransferStatus::COMPLETED; + + if (!success) + { + auto error = uploadHandle->GetLastError(); + error_message = error.GetMessage().c_str(); + error_type = static_cast(error.GetErrorType()); + return -1; + } + else + { + return 0; + } +} + +int S3Client::delete_obj(const std::string &bucket, const std::string &key, int error_type, std::string &error_message) +{ + Aws::S3::Model::DeleteObjectRequest request; + request.WithBucket(bucket.c_str()).WithKey(key.c_str()); + auto outcome = this->client->DeleteObject(request); + + return outcome.IsSuccess(); +} + +int S3Client::contains(const std::string &bucket, const std::string &key, int error_type, std::string &error_message) +{ + Aws::S3::Model::HeadObjectRequest request; + request.WithBucket(bucket.c_str()).WithKey(key.c_str()); + const auto response = client->HeadObject(request); + auto outcome = this->client->HeadObject(request); + + return outcome.IsSuccess(); +} + +std::vector S3Client::list(const std::string &bucket, const std::string &key, int error_type, std::string &error_message) +{ + Aws::S3::Model::ListObjectsRequest request; + request.WithBucket(bucket.c_str()).WithPrefix(key.c_str()); + std::vector res; + auto outcome = this->client->ListObjects(request); + + if (!outcome.IsSuccess()) { + auto error = outcome.GetError(); + auto message = error.GetMessage(); + if (!message.empty()) + { + error_message = message.c_str(); + } + error_type = static_cast(error.GetErrorType()); + return res; + } + else { + Aws::Vector objects = + outcome.GetResult().GetContents(); + + for (Aws::S3::Model::Object &object: objects) { + std::string full_path = object.GetKey().c_str(); + int pos = full_path.find('/', key.size()); + int len = pos - key.size(); + if (-1 != pos) { + len += 1; + } + std::string first_path = full_path.substr(key.size(), len); + res.push_back(first_path); + } + } + + return res; +} diff --git a/petrel-sdk/petrel-oss-cpp-sdk/s3client.h b/petrel-sdk/petrel-oss-cpp-sdk/s3client.h new file mode 100644 index 0000000000000000000000000000000000000000..130fcd38b806023edd0637eb817e776f41a22cab --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/s3client.h @@ -0,0 +1,38 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +class S3Client +{ +public: + S3Client(const std::string &ak, const std::string &sk, const std::string &endpoint, bool verify_ssl, bool enable_https, bool use_dual_stack, int threads_num); + ~S3Client(); + + int get_object(const std::string &bucket, const std::string &key, int &error_type, std::string &error_message, std::string &result, std::string &range); + int multipart_download_concurrency(const std::string &bucket, const std::string &key, const std::string &filename, int &error_type, std::string &error_message); + int put_object(const std::string &bucket, const std::string &key, const std::string &data, int &error_type, std::string &error_message); + int multipart_upload_concurrency(const std::string bucket, const std::string key, const std::string filename, int &error_type, std::string &error_message); + int delete_obj(const std::string &bucket, const std::string &key, int error_type, std::string &error_message); + int contains(const std::string &bucket, const std::string &key, int error_type, std::string &error_message); + std::vector list(const std::string &bucket, const std::string &key, int error_type, std::string &error_message); + +private: + std::shared_ptr client; + int threads_num; +}; + +void init_api(const std::string &level); + +void shutdown_api(); + +std::list> get_error_list(); diff --git a/petrel-sdk/petrel-oss-cpp-sdk/s3client.pxd b/petrel-sdk/petrel-oss-cpp-sdk/s3client.pxd new file mode 100644 index 0000000000000000000000000000000000000000..2201191f45b59e6f7db1cc33fed4186706c99448 --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/s3client.pxd @@ -0,0 +1,33 @@ +from libcpp.string cimport string +from libcpp.list cimport list +from libcpp.pair cimport pair +from libcpp.vector cimport vector + + +cdef extern from "s3client.h": + cdef cppclass S3Client: + S3Client( + const string &ak, + const string &sk, + const string &endpoint, + bint verify_ssl, + bint enable_https, + bint use_dual_stack, + int threads_num, + ) except + + + int get_object(const string &bucket, const string &key, int &error_type, string &error_message, string &result, string &range) nogil + int multipart_download_concurrency(const string &bucket, const string &key, const string &filename, int &error_type, string &error_message) nogil + int put_object(const string &bucket, const string &key, const string &data, int &error_type, string &error_message) nogil + int multipart_upload_concurrency(const string &bucket, const string &key, const string &filename, int &error_type, string &error_message) nogil + int delete_obj(const string &bucket, const string &key, int error_type, const string &error_message) nogil + int contains(const string &bucket, const string &key, int error_type, const string &error_message) nogil + vector[string] list(const string &bucket, const string &key, int error_type, const string &error_message) nogil + + + + cdef void init_api(const string &bucket) + + cdef void shutdown_api() + + cdef list[pair[int, string]] get_error_list() diff --git a/petrel-sdk/petrel-oss-cpp-sdk/setup.py b/petrel-sdk/petrel-oss-cpp-sdk/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..360b22481471a93d7788dc73fc353231a09800ef --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/setup.py @@ -0,0 +1,24 @@ +from setuptools import setup +from Cython.Build import cythonize +from distutils.extension import Extension +from Cython.Distutils import build_ext +from glob import glob +from os.path import basename + +include_dirs = glob('aws-sdk-cpp/aws-cpp-sdk-*/include') +libraries = [':{0}'.format(basename(i)) for i in glob('libs/libaws-*.so*')] + +setup( + cmdclass={'build_ext': build_ext}, + include_dirs=include_dirs, + ext_modules=[ + Extension( + 'pys3client', + ['pys3client.pyx', 's3client.cpp'], + language="c++", + libraries=libraries, + # extra_compile_args=["-O3"], + extra_link_args=["-Wl,-rpath,./libs", "-L./libs"] + ) + ] +) diff --git a/petrel-sdk/petrel-oss-cpp-sdk/setup.py.bak b/petrel-sdk/petrel-oss-cpp-sdk/setup.py.bak new file mode 100644 index 0000000000000000000000000000000000000000..674cf87b364e07254253c3a5f46f76731c6c454c --- /dev/null +++ b/petrel-sdk/petrel-oss-cpp-sdk/setup.py.bak @@ -0,0 +1,28 @@ +from setuptools import setup +from Cython.Build import cythonize +from distutils.extension import Extension +from Cython.Distutils import build_ext +from glob import glob +from os.path import basename + +include_dirs = glob('aws-sdk-cpp/aws-cpp-sdk-*/include') +# print(include_dirs) +# include_dirs = ["~/anaconda3/include"] +# include_dirs=["/home/PJLAB/huipeng/anaconda3/include"] +libraries = [':{0}'.format(basename(i)) for i in glob('libs/libaws-*.so*')] +# print(libraries) + +setup( + cmdclass={'build_ext': build_ext}, + include_dirs=include_dirs, + ext_modules=[ + Extension( + 'pys3client', + ['pys3client.pyx', 's3client.cpp'], + language="c++", + libraries=libraries, + # extra_compile_args=["-O3"], + extra_link_args=["-Wl,-rpath,./libs", "-L./libs"] + ) + ] +) diff --git a/petrel-sdk/petrel-oss-python-sdk/.gitignore b/petrel-sdk/petrel-oss-python-sdk/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..9a20ab336226593de31f22d97c5b818e192b2862 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/.gitignore @@ -0,0 +1,8 @@ +petrel_client/version.py +.vscode +**/__pycache__ +**/*.pyc +**/*.egg-info +venv +dist +build diff --git a/petrel-sdk/petrel-oss-python-sdk/.gitlab-ci.yml b/petrel-sdk/petrel-oss-python-sdk/.gitlab-ci.yml new file mode 100644 index 0000000000000000000000000000000000000000..c2cf1d1f6b95ae824a751b72c32fe3a730855fd6 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/.gitlab-ci.yml @@ -0,0 +1,24 @@ +image: registry.sensetime.com/library/python:3.6 + + +before_script: + - python -V + - python -m venv venv + - source ./venv/bin/activate + - python setup.py sdist + - pip install dist/* + +stages: + - flake8 + - unit_test + +flake8: + stage: flake8 + script: + - pip install flake8 + - flake8 --ignore E501 --exclude '.git,.tox,*.egg-info,venv,scripts,tests,examples' . # todo remove scripts,tests,examples + +test: + stage: unit_test + script: + - python tests/run_test.py \ No newline at end of file diff --git a/petrel-sdk/petrel-oss-python-sdk/INSTALL b/petrel-sdk/petrel-oss-python-sdk/INSTALL new file mode 100644 index 0000000000000000000000000000000000000000..0b6fbdebe34f04bf7988f1b8c8b8cc3c139bea58 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/INSTALL @@ -0,0 +1,2 @@ +python setup.py install + diff --git a/petrel-sdk/petrel-oss-python-sdk/README.md b/petrel-sdk/petrel-oss-python-sdk/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d0b15fc2f66f500e1ddd686400c943df9d5afd66 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/README.md @@ -0,0 +1,336 @@ +Petrel OSS SDK 2.0 +=== + +注意:该版本SDK需要python3.6环境 + +若之前安装过旧版本,请先运行 + +```bash +$ pip uninstall pycephs3client +$ rm -rf ~/.local/lib/python3.6/site-packages/petrel_client +``` + +## 建议在安装之前先升级 pip + +```bash +source /mnt/lustre/share/platform/env/ # 请根据实际情况确定是否需要 source +python3 -m pip install --upgrade pip # 请根据实际情况确定是否需要 `sudo` 或添加 `--user` 参数 +``` + +## 训练集群环境上安装 + +```bash +$ source /mnt/lustre/share/platform/env/ +$ python setup.py sdist +$ pip install --user dist/* +``` + +## 通过修改 PYTHONPATH 安装 + +```bash +$ source /mnt/lustre/share/platform/env/ + +# 安装SDK依赖 +$ python setup.py egg_info +$ pip install -r *.egg-info/requires.txt + +# 将SDK编译到 ./build 目录 +$ python setup.py build + +# 修改 PYTHONPATH 环境变量 +$ export PYTHONPATH=/build/lib:$PYTHONPATH +``` + +## venv环境上安装 + +```bash +$ python3 -m venv your_venv_name # 若已创建venv环境则无需执行 +$ source your_venv_name/bin/active +$ python setup.py sdist +$ pip install dist/* +``` + +## 系统环境上安装 + +```bash +$ python3 setup.py sdist +$ python3 -m pip install dist/* # 请根据实际情况确定是否需要 `sudo` 或添加 `--user` 参数 +``` + +## 使用 + +SDK 提供 `get` 和 `put` 接口,使用方式为 + +```python +data = client.get(url) # 默认情况由配置文件决定是否使用 MC +data = client.get(url, no_cache=True) # 本次 get 直接从 ceph 读取 +data = client.get(url, update_cache=True) # 本次 get 直接从 ceph 读取,并将数据缓存至 MC +``` + +```python +client.put(url, data) # 默认 put 不会更新 MC +client.put(url, data, update_cache=True) # 本次 put 将数据存入 ceph 之后并更新 MC +``` + +``注意:``若配置文件中没有启用 `MC` ,则 `no_cache` 和 `update_cache` 参数将被忽略 + +以下为使用 SDK 读取图片、进行图片处理后并保存图片的简单例子 + +```python +import cv2 +import numpy as np +from os.path import splitext +from petrel_client.client import Client + +conf_path = '~/petreloss.conf' +client = Client(conf_path) # 若不指定 conf_path ,则从 '~/petreloss.conf' 读取配置文件 +img_url = 's3://bucket1/image.jpeg' +img_gray_url = 's3://bucket1/image_gray.jpeg' +img_ext = splitext(img_gray_url)[-1] + +# 图片读取 +img_bytes = client.get(img_url) +assert(img_bytes is not None) +img_mem_view = memoryview(img_bytes) +img_array = np.frombuffer(img_mem_view, np.uint8) +img = cv2.imdecode(img_array, cv2.IMREAD_COLOR) + +# 图片处理 +img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) + +# 图片存储 +success, img_gray_array = cv2.imencode(img_ext, img_gray) +assert(success) +img_gray_bytes = img_gray_array.tostring() +client.put(img_gray_url, img_gray_bytes) +``` + +配置文件请参考 [petreloss.conf](./conf/petreloss.conf) + +``请注意:配置文件中 `key = value` 的 key 前面不能有空格,否则该行视为上一行配置项 value 的一部分`` + +使用样例请参考 [multi_cluster_test.py](./tests/multi_cluster_test.py) + +## `Tensor` 和 `Json` 数据保存与读取 +使用样例 [tensor_json_test.py](./tests/tensor_json_test.py) + +## `Pillow Image` 数据保存与读取 +使用样例 [pillow_image_test.py](./tests/pillow_image_test.py) + +## 数据过大无法上传,则需要分片上传 +使用样例 [multipart_test.py](./tests/multipart_test.py) + +## 创建 Bucket +```python +client.create_bucket('s3://mybucket') +``` + +## 顺序的读取某个前缀的数据 +```python +cluster = 'cluster1' +files = client.get_file_iterator('cluster1:s3://lili1.test2/test3') +for p, k in files + key = '{0}:s3://{1}'.format(cluster, p) + data = client.get(key) +``` +## 使用 anonymous 账户访问数据 +若在配置文件中不设置 `access_key` 和 `secret_key`,将以 `anonymous` 账户访问数据。 + + +## McKeySizeExceed 错误 + +默认情况下,`MC` 所支持 `key` 的最大长度为250个字节。如果路径过长,将会出现 `McKeySizeExceed` 错误。 +此时需要用户定义 `key` 的转换规则来避免该错误。 + +``注意:``中文字符对应多个字节。 + +例如: + +```python +def trim_key(key): + if isinstance(key, str): + key = key.encode('utf-8') + else: + assert isinstance(key, bytes) + + return key[-249:] + +client = Client('~/petreloss.conf', mc_key_cb=trim_key) +``` + +此外,可使用内置函数 `md5`、`sha256` 等,例如: + +```python +client = Client('~/petreloss.conf', mc_key_cb='sha256') +``` + +或在配置文件中指定: + +```conf +[mc] +mc_key_cb = sha512 +``` + +``请注意`` + +- 用户需要保证转换规则结果的唯一性,内置转换函数也有可能发生哈希碰撞。 +- 如果 `key` 为 `str` 类型且其中出现中文字符,请务必用 `encode('utf-8')` 对其进行编码。 + + +## 使用伪客户端 + +在对应客户端添加如下配置: + +```conf +fake = True +``` + +配置文件请参考 [fake_client.conf](./conf/fake_client.conf) + +使用样例请参考 [fake_client_test.py](./tests/fake_client_test.py) + +## 强制更新MC + +使用 `get_and_update` 接口或在 `get` 中传入 `update_cache=True` 参数将直接从存储系统读取数据并更新MC。 + +## IO 统计信息 + +IO 统计信息可通过以下三种方式修改其`log`输出频度: +- 由环境变量 `count_disp` 设置 +- 由配置文件 `count_disp` 设置 (若已设置环境变量,则该方式无效) +- 调用 `client.set_count_disp(count_disp)` (该方式将覆盖上述两种方式),但限于`parrots`和`pytorch`的运行机制,在某些使用场景下可能无法有效修改。 + +若 `count_disp` 为 `0` ,则将关闭 IO 统计信息打印。 + +若需要在 `console` 中打印 IO 统计信息,则需要设置 `console_log_level` 为 `INFO` 或更低级别,且 `count_disp` 需大于 `0`。 + + +## DataLoader + +`SDK` 提供的 `DataLoader` 额外支持如下参数: + +- `prefetch_factor`,默认2。每个 `worker` 预读 `batch` 数目。 +- `persistent_workers`,默认 `False`。如果为 `True`,则每轮 `epoch` 迭代完毕后 `worker` 进程将不会关闭,下轮 `epoch` 将复用该 `worker` 进程。 + +用例: + +```python +from petrel_client.utils.data import DataLoader +dataloader = DataLoader(dataset=xxx, ..., prefetch_factor=4, persistent_workers=True) +``` + +## SSL 验证 + +使用 `https` 协议时默认不会对 `SSL` 进行验证。若需要开启验证,请在配置文件中进行如下设置 +```conf +verify_ssl = True +``` + +## Presigned URL,生成签名链接 + +```python +presigned_url = client.generate_presigned_url(url, client_method ='get_object', expires_in=3600) +``` + +`client_method` 取值为 `get_object` (默认值) 或 `put_object` + +`expires_in` 单位为秒,默认值为 3600 + +## Presigned POST,生成签名 POST + +```python +presigned_post = client.generate_presigned_post(url, fields=None, conditions=None, expires_in=3600) +``` + +参数及返回值详见 [generate_presigned_post](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.generate_presigned_post),其中参数 bucket 和 key 从 url 中提取。 + +## Range 读取 +range 的形式有如下几种 +- '`first_byte_pos`-':读取`first_byte_pos`及后续的所有数据 +- '`first_byte_pos`-`last_byte_pos`':读取`first_byte_pos`到`last_byte_pos`(包含)的数据 +- '-`suffix_length`':读取最后`suffix_length`字节的数据 + +具体详见 HTTP 中 Range 的定义:https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.35 ,但ceph目前不支持多个 Range 读取。 + +```python +# 假设数据长度为 10000 字节 + +# 读取第一段 500 字节数据 +data1 = client.get(url, range='0-499') + +# 读取第二段 500 字节数据 +data2 = client.get(url, range='500-999') + +# 读取最后 500 字节数据 +data_last = client.get(url, range='-500') +# 或 +data_last = client.get(url, range='9500-') + +``` +## 以流的形式读取数据 +```python +stream = client.get(url, enable_stream=True) +``` +返回的 `stream` 为 `StreamingBody`,使用方法详见 +https://botocore.amazonaws.com/v1/documentation/api/latest/reference/response.html + +## 判断对象是否存在 +```python +exists = client.contains(url) +``` + +## 查询对象大小 +```python +size = client.size(url) +``` +若对象不存在,产生 `NoSuchKeyError` 异常 + +## 删除对象 +```python +client.delete(url) +``` + +## 列出当前路径包含的对象或目录 +```python +contents = client.list(url) +for content in contents: + if content.endswith('/'): + print('directory:', content) + else: + print('object:', content) +``` + +## 判断目录是否存在 +```python +client.isdir(url) +``` + +注意:`Ceph`中没有目录的概念,本函数返回`True`时代表存在以该`url`作为前缀的对象,其他情况返回`False`。 + + +## 使用 `/mnt/cache` 目录下的 `Python` 环境 +相对于 `/mnt/lustre` 目录,在 `/mnt/cache` 目录执行 `Python` 有一定的性能提升。 +使用方式如下: +- `source` `/mnt/cache` 目录下的 `Python` 环境 +```bash +### 例如 pt1.3v1 +source /mnt/cache/share/platform/env/pt1.3v1 +### 或 s0.3.3 +source /mnt/cache/share/spring/s0.3.3 +``` + +- 检查 `Python` 路径是否正确 +```bash +which python +### 结果应为 /mnt/cache/... +``` + +- 设定 `PYTHONUSERBASE` 环境变量 +```bash +export PYTHONUSERBASE=/mnt/cache//.local +``` + +- 重新安装相关依赖库(仅需首次使用时执行) +``` +python -m pip install --user +``` diff --git a/petrel-sdk/petrel-oss-python-sdk/conf/ceph.conf b/petrel-sdk/petrel-oss-python-sdk/conf/ceph.conf new file mode 100644 index 0000000000000000000000000000000000000000..ddda66516e872430fc28869b432cd703c3c9acd3 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/conf/ceph.conf @@ -0,0 +1,24 @@ +[global] +fsid = 85e75c85-ab98-426d-81cf-9daa534887f9 +mon_initial_members = SZ-OFFICE3-172-30-1-75 +mon_host = 172.30.1.75 +auth_cluster_required = cephx +auth_service_required = cephx +auth_client_required = cephx + + +public_network=172.30.1.0/20 +cluster_network=172.30.1.0/20 + +mon_allow_pool_delete = True + +debug_mon = 20/20 +debug_client = 20/20 + +rgw_enable_gc_threads = False +rgw_enable_lc_threads = False +rgw_enable_quota_threads = False +rgw_run_sync_thread = False +rgw enable ops log = False +rgw enable usage log = False +admin socket = "" diff --git a/petrel-sdk/petrel-oss-python-sdk/conf/fake_client.conf b/petrel-sdk/petrel-oss-python-sdk/conf/fake_client.conf new file mode 100644 index 0000000000000000000000000000000000000000..601fec554853434294f4b47dceffdeb897d4ac19 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/conf/fake_client.conf @@ -0,0 +1,17 @@ +[DEFAULT] +default_cluster = cluster1 + +[dfs] +fake = True +enable_mc = True + +[mc] +fake = True + +[cluster1] +fake = True +enable_mc = True + +[cluster2] +fake = True +enable_mc = False \ No newline at end of file diff --git a/petrel-sdk/petrel-oss-python-sdk/conf/keyring b/petrel-sdk/petrel-oss-python-sdk/conf/keyring new file mode 100644 index 0000000000000000000000000000000000000000..442ea1ccc3f3b4435420c75498d6aae111a0ee47 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/conf/keyring @@ -0,0 +1,2 @@ +[client.rgw.train] + key = AQBiiqVeaPzSFRAAT1Vc+z8wPI5BkCroB6W/jQ== diff --git a/petrel-sdk/petrel-oss-python-sdk/conf/petreloss.conf b/petrel-sdk/petrel-oss-python-sdk/conf/petreloss.conf new file mode 100644 index 0000000000000000000000000000000000000000..e0f120d37aa2f98f0a1cc272dcbaf5c84c2ba7f3 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/conf/petreloss.conf @@ -0,0 +1,88 @@ +# 注释以 ’#‘ 或 ‘;’ 开头,单独占一行,不能和配置内容在同一行 +# `key = value` 的 key 前面不能有空格,否则该行视为上一行配置项 value 的一部分 + +[DEFAULT] + +# 启用 Memcached, 默认 False +# enable_mc = True + +# Memcached 相关配置,默认情况下无需设置 +# mc_server_list_path = /mnt/lustre/share/memcached_client/server_list.conf +# mc_client_config_path = /mnt/lustre/share/memcached_client/client.conf + +# console log 级别,默认 WARNING, 选项有 CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +# 若需要在 console 输出 IO 统计信息,需要设置级别为 INFO +# console_log_level = WARNING + +# file log 级别,默认 DEBUG, 选项有 CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +# file_log_level = DEBUG + +# log 文件路径,默认 无 ,即不输出 log 文件 +# 若已配置 log 文件路径,则训练运行之前需要确保 log_file_path 指定的目录已被创建 +# log_file_path = /mnt/lustre//petrel_log_dir + +# log 文件最大长度,默认 1GB +# file_log_max_bytes = 1073741824 + +# log 文件备份数目,默认 1 +# file_log_backup_count = 1 + +# 每隔 count_disp 次 get 操作后,日志记录 IO 统计信息。默认值 5000 +# 如果 IO 统计信息输出过于频繁,可将该数值增大 +# 如果需要关闭 IO 统计信,可将该数值设置为 0 +# count_disp = 5000 + +# 内存统计,默认关闭 +# enable_mem_trace = False + +# get 操作失败后,允许重试的次数,默认 10 +# get_retry_max = 10 + +# 默认 cluster,即当访问 Ceph 没有指定 cluster 时,从 default_cluster 获取数据 +default_cluster = cluster1 + +[mc] +# 若访问的路径过长(超过250个字节),mc 将出现 McKeySizeExceed 错误。 +# 配置 mc_key_cb 可将传给 mc 的路径进行转换,可选的参数有: +# blake2b, blake2s, md5, pbkdf2_hmac, sha1, sha224, +# sha256, sha384, sha3_224, sha3_256, sha3_384, +# sha3_512, sha512, shake_128, shake_256 + +# mc_key_cb = sha256 + + +# 是否输出 mc debug log,默认 True +# 注意最终是否输出到 console 和 file 分别还需要由 console_log_level 和 file_log_level 决定 +# debug_mc = True + + +[dfs] +enable_mc = True + +# 至少需要配置一个 cluster ,否则将出现 ConfigSectionNotFoundError +[cluster1] +# 对于每个集群的具体配置,如果没有指定,则以[DEFAULT]作为取值 +# 例如在此处设置 ‘enable_mc = False’ 将覆盖默认配置 +enable_mc = True + +# 启用 S3 boto,默认 True +# boto = c++ 将启用 c++ 版本实现的 S3 +boto = True + +# 若不设置 access_key 和 secret_key,将以 anonymous 账户访问数据 +access_key = lili1 +secret_key = lili1 + +host_base = http://127.0.0.1:7480 + +# 若 boto = False ,则需要增加以下配置 +# conf = conf/ceph.conf +# keyring = conf/keyring +# name = client.rgw.train +# cluster = ceph + +[cluster2] + +access_key = lili1 +secret_key = lili1 +host_base = http://127.0.0.1:7480 diff --git a/petrel-sdk/petrel-oss-python-sdk/dev.sh b/petrel-sdk/petrel-oss-python-sdk/dev.sh new file mode 100644 index 0000000000000000000000000000000000000000..8072b7fe4058aa50f473de1e09c60ab62dd2658d --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/dev.sh @@ -0,0 +1,4 @@ +python3 -m venv venv +source venv/bin/activate +pip install -U autopep8 +pip install -e . \ No newline at end of file diff --git a/petrel-sdk/petrel-oss-python-sdk/examples/pytorch/MyDataset.py b/petrel-sdk/petrel-oss-python-sdk/examples/pytorch/MyDataset.py new file mode 100644 index 0000000000000000000000000000000000000000..18034f75a3fc28d309f3bc5fbe1337d3eb222289 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/examples/pytorch/MyDataset.py @@ -0,0 +1,50 @@ +from __future__ import print_function, division +import os +import torch +import pandas as pd +from skimage import io, transform +import matplotlib.pyplot as plt +from torch.utils.data import Dataset, DataLoader +from torchvision import transforms, utils + +import cv2 +import numpy as np +import ceph + +class MyDataset(Dataset): + def __init__(self, csv_file, root_dir): + """ + Args: + csv_file (string): Path to the csv file with annotations. + root_dir (string): Bucket with all the images, such as s3://faces/ + """ + self.landmarks_frame = pd.read_csv(csv_file) + self.root_dir = root_dir + + def __len__(self): + return len(self.landmarks_frame) + + def __getitem__(self, idx): + img_name = os.path.join(self.root_dir, self.landmarks_frame.iloc[idx, 0]) + s3client = ceph.S3Client() + value = s3client.get(img_name) + if not value: + """ + Picture doesn't exist in ceph, your code here to handle error + """ + return None + img_array = np.fromstring(value, np.uint8) + # load image + #img = cvb.img_from_bytes(value) + + + string_data = img_array.tostring() + #print(string_data) + #print(value) + #image = cv2.imdecode(img_array, cv2.CV_LOAD_IMAGE_COLOR) + + landmarks = self.landmarks_frame.iloc[idx, 1:].as_matrix() + landmarks = landmarks.astype('float').reshape(-1, 2) + sample = {'image': img_array, 'landmarks': landmarks} + + return sample, string_data diff --git a/petrel-sdk/petrel-oss-python-sdk/examples/pytorch/MyTest.py b/petrel-sdk/petrel-oss-python-sdk/examples/pytorch/MyTest.py new file mode 100644 index 0000000000000000000000000000000000000000..e6912783bb0fa30734840e60d31f1cbd0bb56aee --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/examples/pytorch/MyTest.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- + +from __future__ import print_function, division +import threading +import logging + +from MyDataset import MyDataset + +import ceph + +class testThread(threading.Thread): + def __init__(self, threadid): + threading.Thread.__init__(self) + self.threadid = threadid + + def run(self): + self.do_tasks() + + def do_tasks(self): + face_dataset = MyDataset(csv_file='faces/face_landmarks.csv',root_dir='s3://yijianliang.test/train-copy0/') + + for i in range(len(face_dataset)): + sample = face_dataset[i] + #print(i, sample['image'].shape, sample['landmarks'].shape) + logging.info('{0} {1} {2}'.format(i, sample['image'].shape, sample['landmarks'].shape)) + +''' +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO, + format='%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s', + datefmt='[%Y-%m_%d %H:%M:%S]', + filename='log/MyTest.log', + filemode='a') + threads = [] + for i in range(0,1): + threads.append(testThread(threadid=i)) + for thread in threads: + thread.start() + for thread in threads: + thread.join() +''' + +if __name__ == '__main__': + logging.basicConfig(level=logging.INFO, + format='%(asctime)s %(filename)s:%(lineno)d %(levelname)s %(message)s', + datefmt='[%Y-%m_%d %H:%M:%S]') + #filename='log/MyTest.log', + #filemode='a') + + face_dataset = MyDataset(csv_file='faces/face_landmarks.csv',root_dir='s3://yijianliang.ssd.qos/train') + for i in range(len(face_dataset)): + sample, string_data = face_dataset[i] + #print(i, sample['image'].shape, sample['landmarks'].shape) + object_name = str(i) + if sample and string_data: + #s3client = ceph.S3Client() + s3client = ceph.S3Client(access_key = "DWD2LKXJHJLGYKRDED7T", secret_key = "tzJ2a0g26deZZux3bLOd29YV9zJlaLM400Fu5tdn") + ret = s3client.save_from_string('s3://sensestudytest/save_from_string/', object_name, string_data) + if ret: + logging.info('Save {0}: {1} bytes'.format(object_name, ret)) + logging.info('{0} {1} {2}'.format(i, sample['image'].shape, sample['landmarks'].shape)) diff --git a/petrel-sdk/petrel-oss-python-sdk/examples/pytorch/faces/face_landmarks.csv b/petrel-sdk/petrel-oss-python-sdk/examples/pytorch/faces/face_landmarks.csv new file mode 100644 index 0000000000000000000000000000000000000000..8c7b4bcc655622c48e1cf8b9e43438ff008767a9 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/examples/pytorch/faces/face_landmarks.csv @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:82bd06902826e8a79658a5946187a4f441b9d21f8edceec4dfa8723ffcfcbad0 +size 63189398 diff --git a/petrel-sdk/petrel-oss-python-sdk/examples/pytorch/log/MyTest.log b/petrel-sdk/petrel-oss-python-sdk/examples/pytorch/log/MyTest.log new file mode 100644 index 0000000000000000000000000000000000000000..13f0c29401dd7320b5914863d97438f2b2145929 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/examples/pytorch/log/MyTest.log @@ -0,0 +1,3 @@ +[2018-04_18 13:45:16] MyTest.py[line:22] INFO 0 (250, 250, 3) (3, 2) +[2018-04_18 13:45:16] MyTest.py[line:22] INFO 0 (250, 250, 3) (3, 2) +[2018-04_18 13:45:16] MyTest.py[line:22] INFO 0 (250, 250, 3) (3, 2) diff --git a/petrel-sdk/petrel-oss-python-sdk/linkcpp.py b/petrel-sdk/petrel-oss-python-sdk/linkcpp.py new file mode 100644 index 0000000000000000000000000000000000000000..19b08f3a9c945e6e5edbebe8703a137c7f882522 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/linkcpp.py @@ -0,0 +1,22 @@ +import os + +cpp_dir = '../petrel-oss-cpp-sdk/' +dst_dir = './petrel_client/ceph/s3cpp/' + +so_file = 'pys3client.cpython-3*-x86_64-linux-gnu.so' +cpp_so = cpp_dir + so_file + +os.system('cp -f '+cpp_so+' '+dst_dir) + +# src_lib = cpp_dir + lib_dir +# dst_lib = dst_dir + 'libs' +# os.system('ln -sf '+src_lib+' '+dst_dir) + +# find aws associated .so +src_libs = '~/anaconda3/lib/' +need_so = ['libaws-c-common.so','libaws-c-event-stream.so','libaws-checksums.so','libaws-cpp-sdk-core.so', 'libaws-cpp-sdk-s3.so', 'libaws-cpp-sdk-transfer.so'] + +aws_dst_so = dst_dir + 'libs/' +for aws_so in need_so: + src_so = src_libs + aws_so + os.system('cp -f '+src_so+' '+aws_dst_so) diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/__init__.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/__init__.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/cache.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/cache.py new file mode 100644 index 0000000000000000000000000000000000000000..558fc6547319e7a82009cdeb7beedc169d1c3592 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/cache.py @@ -0,0 +1,53 @@ +import logging +import socket +import re +import sys +from petrel_client.client_base import ClientBase +from petrel_client.common.exception import InvalidMcUriError + +LOG = logging.getLogger(__name__) + +_MC_URI_PATTERN = re.compile(r'^mc://(.+)') + +import_map = {'memcached': 'petrel_client.cache.mc.mc.MC'} + + +class Cache(ClientBase): + @staticmethod + def parse_uri(uri): + m = _MC_URI_PATTERN.match(uri) + if m: + return m.group(1) + else: + raise InvalidMcUriError(uri) + + @staticmethod + def get_engine_cls(engine_type): + import_name = import_map.get(engine_type) + module_name, callable_name = import_name.rsplit('.', 1) + __import__(module_name) + module = sys.modules[module_name] + return getattr(module, callable_name) + + @staticmethod + def create(conf, *args, **kwargs): + fake = conf.get_boolean('fake') + if fake: + from petrel_client.fake_client import FakeClient + name = f'MC: {socket.gethostname()}' + return FakeClient(client_type='mc', conf=conf, name=name, **kwargs) + + engine_type = conf.get('cache_engine', 'memcached') + try: + engine_cls = Cache.get_engine_cls(engine_type) + instance = engine_cls(conf, *args, **kwargs) + if not hasattr(instance, 'log'): + setattr(instance, 'log', LOG) + return instance + except Exception as err: + LOG.warn('can not init cache client') + LOG.exception(err) + return None + + def __init__(self, *args, **kwargs): + super(Cache, self).__init__(*args, **kwargs) diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/mc/__init__.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/mc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/mc/mc.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/mc/mc.py new file mode 100644 index 0000000000000000000000000000000000000000..47638cca8bc2552557bd10bd4c7a5efa5652f769 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/mc/mc.py @@ -0,0 +1,116 @@ +import functools +import logging +from functools import partial +from collections import defaultdict + +from petrel_client.cache.cache import Cache +from petrel_client.common.io_profile import profile +from petrel_client.common import exception +from petrel_client.cache.mc.petrel_pymc import McClient +from petrel_client.common import hash + +LOG = logging.getLogger(__name__) + +_STATUS_SUCCESS = 'SUCCESS' +_STATUS_NOT_FOUND = 'NOT FOUND' + +_MAX_KEY_SIZE = 250 +_ITEM_SIZE_RESERVED = 128 + +_EXCEPTION_MAP = defaultdict(lambda: exception.McClientError, { + 'A TIMEOUT OCCURRED': exception.McTimeoutOccur, + 'CONNECTION FAILURE': exception.McConnFailed, + 'FAILURE': exception.McServerDisable, + 'CLIENT ERROR': exception.McServerDisable, + 'SERVER ERROR': exception.McServerDisable, + 'ERROR was returned by server': exception.McServerDisable, + 'SYSTEM ERROR': exception.McServerFailed, + 'A KEY LENGTH OF ZERO WAS PROVIDED': exception.McBadKeyProvided, + 'A BAD KEY WAS PROVIDED/CHARACTERS OUT OF RANGE': exception.McBadKeyProvided, + 'SERVER IS MARKED DEAD': exception.McServerDead, + 'ITEM TOO BIG': exception.McObjectSizeExceed, + 'SERVER HAS FAILED AND IS DISABLED UNTIL TIMED RETRY': exception.McServerFailed, +}) + + +def wrap_io(fn): + @functools.wraps(fn) + def new_fn(self, key, *args, **kwargs): + if self.mc_key_cb: + key = self.mc_key_cb(key) + self.check_key_size(key) + + value, status = fn(self, key, *args, **kwargs) + + if status == _STATUS_SUCCESS: + return value + elif status == _STATUS_NOT_FOUND: + raise exception.McObjectNotFoundError(key) + else: + server, _ = self._mc.get_server(key) + raise _EXCEPTION_MAP[status](key, status, server) + + return new_fn + + +class MC(Cache): + def __init__(self, conf, *args, **kwargs): + mc_server_list_path = conf['mc_server_list_path'] + mc_client_config_path = conf['mc_client_config_path'] + debug_mc = conf.get_boolean('debug_mc') + if debug_mc: + LOG.setLevel(logging.DEBUG) + else: + LOG.setLevel(logging.WARNING) + + self.log = LOG + LOG.debug('init MC, server list path: %s, client config path: %s', + mc_server_list_path, mc_client_config_path) + super(MC, self).__init__(*args, conf=conf, **kwargs) + + self._mc = McClient.GetInstance( + mc_server_list_path, mc_client_config_path) + self._max_item_size = self._mc.max_item_size() - _MAX_KEY_SIZE - \ + _ITEM_SIZE_RESERVED + self._max_key_size = _MAX_KEY_SIZE + + mc_key_cb = kwargs.get('mc_key_cb', None) or conf.get('mc_key_cb') + if mc_key_cb == 'identity': + self.mc_key_cb = None + elif isinstance(mc_key_cb, str): + hash_fn = hash.get_hash_fn(mc_key_cb) + self.mc_key_cb = partial(hash.hexdigest, hash_fn=hash_fn) + LOG.debug('mc: using mc_key_cb %s', mc_key_cb) + elif not callable(mc_key_cb): + raise Exception("argument 'mc_key_cb' should be callable.") + else: + self.mc_key_cb = mc_key_cb + LOG.debug('mc: using user defined mc_key_cb') + + def check_key_size(self, key): + if isinstance(key, str): + key_len = len(key.encode('utf-8')) + elif isinstance(key, bytes): + key_len = len(key) + else: + raise Exception( + 'mc key type is not supported: {}, value: {}'.format(type(key), key)) + + if key_len > self._max_key_size: + raise exception.McKeySizeExceed( + 'size of key must <= {}'.format(self._max_key_size), key) + + @profile('get') + @wrap_io + def get(self, key, **kwargs): + return self._mc.get(key) + + @profile('put') + @wrap_io + def put(self, key, content, **kwargs): + size = len(content) + if size > self._max_item_size: + raise exception.McObjectSizeExceed( + key, 'size of object must <= {}, actual size: {}'.format(self._max_item_size, size)) + status = self._mc.set(key, content) + return size, status diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/mc/petrel_pymc.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/mc/petrel_pymc.so new file mode 100644 index 0000000000000000000000000000000000000000..18b04af64be592c2087d47db273efe0c098a8487 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/cache/mc/petrel_pymc.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a7064faa9f52205d9263333016c32f44bfbc5c43db5a5e32776d0f6a6659ec1 +size 1412352 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/__init__.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/ceph.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/ceph.py new file mode 100644 index 0000000000000000000000000000000000000000..6a6bec260d990fb301b53889625d249b7cf91d98 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/ceph.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- + + +import re +from petrel_client.common.exception import InvalidClusterNameError, InvalidS3UriError, NoDefaultClusterNameError +from petrel_client.client_base import ClientBase + + +# (?:...) +# A non-capturing version of regular parentheses. Matches whatever regular expression is inside the parentheses, but the substring matched by the group cannot be retrieved after performing a match or referenced later in the pattern. + +# *?, +?, ?? +# The '*', '+', and '?' qualifiers are all greedy; they match as much text as possible. Sometimes this behaviour isn’t desired; if the RE <.*> is matched against b , it will match the entire string, and not just . Adding ? after the qualifier makes it perform the match in non-greedy or minimal fashion; as few characters as possible will be matched. Using the RE <.*?> will match only . + +# re.I +# re.IGNORECASE +# Perform case-insensitive matching; expressions like [A-Z] will match lowercase letters, too. This is not affected by the current locale. To get this effect on non-ASCII Unicode characters such as ü and Ü, add the UNICODE flag. +# _S3_URI_PATTERN = re.compile(r'^(?:([^:]+):)?s3://([^/]+)/(.+?)/?$', re.I) + +_S3_URI_PATTERN = re.compile( + r'^(?:(?P[^:]+):)?s3://(?P[^/]+)/?(?P(?:.+?)/?$)?', re.I) + + +class Ceph(ClientBase): + + @staticmethod + def parse_uri(uri, ceph_dict, default_cluster=None): + m = _S3_URI_PATTERN.match(uri) + if m: + cluster, bucket, key = m.group( + 'cluster'), m.group('bucket'), m.group('key') + cluster = cluster or default_cluster + if not cluster: + raise NoDefaultClusterNameError(uri) + + try: + client = ceph_dict[cluster] + enable_cache = client.enable_cache() + return cluster, bucket, key, enable_cache + except KeyError: + raise InvalidClusterNameError(cluster) + else: + raise InvalidS3UriError(uri) + + @staticmethod + def create(cluster, conf, *args, **kwargs): + fake = conf.get_boolean('fake') + enable_s3_cpp = conf.get('boto').lower() in ('cpp', 'c++') + enable_boto = (not enable_s3_cpp) and conf.get_boolean('boto') + anonymous_access = (conf.get('access_key', None) is None) and ( + conf.get('secret_key', None) is None) + + if fake: + from petrel_client.fake_client import FakeClient + name = f'S3: {cluster}' + return FakeClient(client_type='s3', conf=conf, name=name, **kwargs) + elif enable_s3_cpp: + from petrel_client.ceph.s3cpp.s3_cpp_client import S3CppClient + return S3CppClient(cluster, conf, anonymous_access, *args, **kwargs) + elif enable_boto: + from petrel_client.ceph.s3.s3_client import S3Client + return S3Client(cluster, conf, anonymous_access, *args, **kwargs) + else: + from petrel_client.ceph.librgw.rgw_client import RGWClient + return RGWClient(cluster, conf, *args, **kwargs) + + def __init__(self, cluster, conf, *args, **kwargs): + super(Ceph, self).__init__(*args, name=cluster, conf=conf, **kwargs) + self.__enable_cache = conf.get_boolean( + 'enable_mc', False) or conf.get_boolean('enable_cache', False) + + def enable_cache(self): + # 使用 __ 前缀使得 __enable_cache 变量成为私有变量 + # https://docs.python.org/3/tutorial/classes.html#private-variables + # 将 enable_cache 类型定义为方法是为了以后可以动态计算是否需要做 cache + return self.__enable_cache diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/__init__.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rados.cpython-36m-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rados.cpython-36m-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4724b53841375da816a84a69ba1700e5e8377770 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rados.cpython-36m-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2982b57dc9c62e95900e13745c8722985f6837ffc8f1ade5a134d523e2949860 +size 1085000 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rados.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rados.so new file mode 100644 index 0000000000000000000000000000000000000000..5e0052836ebcdaf715acccf8ef1c53b77ac5d9f2 Binary files /dev/null and b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rados.so differ diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rgw.cpython-36m-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rgw.cpython-36m-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a61bd342f352c06e95a4b36e4fec302e0b8aff75 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rgw.cpython-36m-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:460acd1fe6301bb517a98224b69330e885b4f6ccb351963f3121ad82c9e9561c +size 1427450 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rgw.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rgw.so new file mode 100644 index 0000000000000000000000000000000000000000..c4e774b3306ae39e44898194c9e282b9f6ddeff2 Binary files /dev/null and b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rgw.so differ diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rgw_client.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rgw_client.py new file mode 100644 index 0000000000000000000000000000000000000000..c9f2df36f9ab1b62aac833efa3d629bf2b8f5fa2 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/librgw/rgw_client.py @@ -0,0 +1,74 @@ +import logging + +from petrel_client.ceph.ceph import Ceph +from petrel_client.common.io_profile import profile +from petrel_client.common.exception import ObjectNotFoundError +from petrel_client.ceph.librgw import rgw + + +LOG = logging.getLogger(__name__) + + +class RGWClient(Ceph): + + kb = 1024 + mb = 1024 * kb + + def __init__(self, cluster, conf, *args, **kwargs): + LOG.debug('init RGWClient(%s)', cluster) + super(RGWClient, self).__init__(cluster, conf, *args, **kwargs) + conn_args = { + 'conf': conf['conf'], + 'keyring': conf['keyring'], + 'name': conf['name'], + 'cluster': conf['cluster'], + } + uid = conf.get('uid', 'user_id') + self.bucket_fs = {} + self._init_librgw(uid, conf['access_key'], + conf['secret_key'], conn_args) + + def _init_librgw(self, uid=None, key=None, secret=None, connection_kwargs=None): + try: + self.client = rgw.LibRGWFS(uid, key, secret, **connection_kwargs) + self.root_fs = self.client.mount() + LOG.debug("The connection bulid successfully.") + except Exception as e: + LOG.error("The input parameters is invalid. %s", e) + raise Exception("The input parameters is invalid.", e) + + @profile('get') + def get(self, cluster, bucket, key, file_size=4 * mb, **kwargs): + try: + # destination_base_uri = S3Uri(filename) + # bucket = destination_base_uri.bucket() + # key = destination_base_uri.object() + bucket_fs = self.bucket_fs.get(bucket) + if not bucket_fs: + bucket_fs = self.client.opendir(self.root_fs, bucket) + self.bucket_fs[bucket] = bucket_fs + file_fs = self.client.open(bucket_fs, key) + value = self.client.read(file_fs, 0, file_size) + self.client.close(file_fs) + self.client.close(bucket_fs) + # log.debug('filename is: {}'.format(key)) + # log.debug('value size is: {} kB'.format(len(value) / self.kb)) + return value + except rgw.ObjectNotFound as err: + raise ObjectNotFoundError(err) + + def put(self, cluster, bucket, key, body, **kwargs): + # destination_base_uri = S3Uri(filename) + # bucket = destination_base_uri.bucket() + # key = destination_base_uri.object() + bucket_fs = self.client.opendir(self.root_fs, bucket) + try: + file_fs = self.client.create(bucket_fs, key) + except rgw.ObjectExists: + file_fs = self.client.open(bucket_fs, key) + self.client.write(file_fs, 0, body) + self.client.close(file_fs) + self.client.close(bucket_fs) + # log.debug('filename is: {}'.format(key)) + # log.debug('value size is: {} kB'.format(len(body) / self.kb)) + return True diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3/__init__.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3/generator.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3/generator.py new file mode 100644 index 0000000000000000000000000000000000000000..77bd964423569a1a60437e0d127ae18cac40540d --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3/generator.py @@ -0,0 +1,99 @@ +import re + + +_S3_ACCESSPOINT_TO_BUCKET_KEY_REGEX = re.compile( + r'^(?Parn:(aws).*:s3:[a-z\-0-9]+:[0-9]{12}:accesspoint[:/][^/]+)/?' + r'(?P.*)$' +) + + +def find_bucket_key(s3_path): + """ + This is a helper function that given an s3 path such that the path is of + the form: bucket/key + It will return the bucket and the key represented by the s3 path + """ + match = _S3_ACCESSPOINT_TO_BUCKET_KEY_REGEX.match(s3_path) + if match: + return match.group('bucket'), match.group('key') + s3_components = s3_path.split('/', 1) + bucket = s3_components[0] + s3_key = '' + if len(s3_components) > 1: + s3_key = s3_components[1] + return bucket, s3_key + + +class BucketLister(object): + """List keys in a bucket.""" + + def __init__(self, client): + self._client = client + + def list_objects(self, bucket, prefix=None, page_size=1000, max_items=None): + kwargs = {'Bucket': bucket, + 'PaginationConfig': {'PageSize': page_size, + 'MaxItems ': max_items}} + if prefix is not None: + kwargs['Prefix'] = prefix + +# paginator = self._client.get_paginator('list_objects_v2') + paginator = self._client.get_paginator('list_objects') + pages = paginator.paginate(**kwargs) + for page in pages: + contents = page.get('Contents', []) + for content in contents: + source_path = bucket + '/' + content['Key'] + yield source_path, content + + +class FileGenerator(object): + """ + This is a class the creates a generator to yield files based on information + returned from the ``FileFormat`` class. It is universal in the sense that + it will handle s3 files, local files, local directories, and s3 objects + under the same common prefix. The generator yields corresponding + ``FileInfo`` objects to send to a ``Comparator`` or ``S3Handler``. + """ + + def __init__(self, client, page_size=None): + self._client = client + self.page_size = page_size + self.request_parameters = {} + + def __call__(self, path): + + file_iterator = self.list_objects(path) + yield from file_iterator + + def list_objects(self, s3_path): + """ + This function yields the appropriate object or objects under a + common prefix depending if the operation is on objects under a + common prefix. It yields the file's source path, size, and last + update. + """ + if s3_path.startswith('s3://'): + s3_path = s3_path[5:] + bucket, prefix = find_bucket_key(s3_path) + lister = BucketLister(self._client) + for key in lister.list_objects(bucket=bucket, prefix=prefix, + page_size=self.page_size): + source_path, response_data = key + if response_data['Size'] == 0 and source_path.endswith('/'): + pass + else: + yield source_path, response_data + + +class FileIterator(object): + + def __init__(self, client, path, page_size=None): + self._client = client + self.path = path + self.page_size = page_size + self.request_parameters = {} + + def __iter__(self): + generator = FileGenerator(self._client, self.page_size) + return generator(self.path) diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3/s3_client.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3/s3_client.py new file mode 100644 index 0000000000000000000000000000000000000000..f0aa1ea1eb725fb65d61c382c940ce52a6fbb2b3 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3/s3_client.py @@ -0,0 +1,273 @@ +# -*- coding: utf-8 -*- + +from io import BytesIO +import os +import logging +import hashlib + +import boto3 +from botocore.exceptions import ClientError as BotoClientError +from botocore.client import Config +from botocore import UNSIGNED + +from petrel_client.ceph.ceph import Ceph +from petrel_client.common.io_profile import profile +from petrel_client.common.exception import NoSuchBucketError, NoSuchKeyError, S3ClientError, AccessDeniedError, RangeError, MultipartError +from .generator import FileIterator + +LOG = logging.getLogger(__name__) + + +class S3Client(Ceph): + + def __init__(self, cluster, conf, anonymous_access, *args, **kwargs): + if anonymous_access: + s3_args = { + 'config': Config(signature_version=UNSIGNED) + } + else: + s3_args = { + 'aws_access_key_id': conf['access_key'], + 'aws_secret_access_key': conf['secret_key'] + } + + s3_args['endpoint_url'] = conf['endpoint_url'] + s3_args['verify'] = conf.get_boolean('verify_ssl', False) + + + + super(S3Client, self).__init__(cluster, conf, *args, **kwargs) + self._cluster = cluster + self._conf = conf + self._session = boto3.session.Session() + self._s3_resource = self._session.resource( + 's3', + **s3_args + ) + self._multipart_config = boto3.s3.transfer.TransferConfig(conf.get_int('multipart_threshold'), conf.get_int('max_concurrency'), conf.get_int('multipart_chunksize'), conf.get_boolean('use_threads')) + + # 返回值:(1) 未启用multipart时: 返回获取的内容; (2) 启用multipart时,返回写入的文件名 + @profile('get') + def get_with_info(self, cluster, bucket, key, **kwargs): + enable_etag = kwargs.get('enable_etag', False) + enable_stream = kwargs.get('enable_stream', False) + info = {} + get_kwargs = {} + if enable_etag: + info['etag'] = obj['ETag'].strip('"') + assert self._cluster == cluster + range = kwargs.get('range', None) + if range: + get_kwargs['Range'] = f'bytes={range}' + try: + if None != kwargs.get("multipart") and True == kwargs["multipart"]: + if None == kwargs.get("filename"): + raise MultipartError("need filename") + + # MB = 1024**2 + # config = boto3.s3.transfer.TransferConfig(multipart_threshold=100 * MB, max_concurrency=8, multipart_chunksize=2 * MB, use_threads=True) + self._s3_resource.meta.client.download_file(bucket, key, kwargs["filename"], Config=self._multipart_config) + return kwargs["filename"], info + else: + obj = self._s3_resource.Object(bucket, key).get(**get_kwargs) + content = obj['Body'] + if not enable_stream: + content = content.read() + return content, info + except BotoClientError as err: + if type(err).__name__ == 'NoSuchKey': + # 这里的 err 的类型是 botocore.errorfactory.NoSuchKey 或 NoSuchBucket + # 但是该类型是通过 + # type(exception_name, (ClientError,), {}) // botocore.errorfactory.py:83 + # 运行时构造的,目前的办法只能通过其基类 ClientError 来捕捉 + raise NoSuchKeyError(cluster, bucket, key) + elif type(err).__name__ == 'NoSuchBucket': + raise NoSuchBucketError(cluster, bucket) + elif err.response['ResponseMetadata']['HTTPStatusCode'] == 403: + raise AccessDeniedError(err) + elif err.response['ResponseMetadata']['HTTPStatusCode'] == 416: + raise RangeError(range) + else: + raise S3ClientError(err) + + def create_bucket(self, bucket): + return self._s3_resource.create_bucket(Bucket=bucket) + + def isdir(self, bucket, key): + itr = self.list(bucket, key) + try: + next(itr) + return True + except StopIteration: + return False + + def list(self, bucket, key, page_size=None): + if key is None: + key = '' + elif key and not key.endswith('/'): + key = key + '/' + + client = self._s3_resource.meta.client + paginator = client.get_paginator('list_objects') + paging_args = { + 'Bucket': bucket, 'Prefix': key, 'Delimiter': '/', + 'PaginationConfig': {'PageSize': page_size} + } + itr = paginator.paginate(**paging_args) + + for response_data in itr: + common_prefixes = response_data.get('CommonPrefixes', []) + contents = response_data.get('Contents', []) + + for common_prefix in common_prefixes: + prefix_components = common_prefix['Prefix'].split('/') + prefix = prefix_components[-2] + yield prefix + '/' + + for content in contents: + filename_components = content['Key'].split('/') + filename = filename_components[-1] + yield filename + + def get_file_iterator(self, bucket, key): + client = self._s3_resource.meta.client + path = 's3://{0}'.format(bucket) + if key: + path = path + '/' + key + file_iterator = FileIterator(client, path) + return file_iterator + + @profile('put') + def put_with_info(self, cluster, bucket, key, body, **kwargs): + if isinstance(body, (bytes, bytearray)): + if len(body) <= 1024*1024*1024*2: + result, info = self.put_bytes( + cluster, bucket, key, body, **kwargs) + else: + result, info = self.multipart_upload_boto3(cluster, bucket, key, body.name, **kwargs) + elif hasattr(body, 'read'): + result, info = self.multipart_upload_boto3( + cluster, bucket, key, body.name, **kwargs) + else: + raise TypeError( + f'{type(self)} does not support content type {type(body)}') + + if kwargs.get('enable_etag', False): + info['etag'] = result.e_tag.strip('"') + + return result, info + + + def multipart_upload_boto3(self, cluster, bucket, key, filename, **kwargs): + self._s3_resource.meta.client.upload_file(filename, bucket, os.path.basename(filename), Config=self._multipart_config) + return True, {} + + def put_bytes(self, cluster, bucket, key, body, **kwargs): + assert self._cluster == cluster + enable_md5 = kwargs.get('enable_md5', False) + info = {} + try: + obj = self._s3_resource.Object(bucket, key) + obj.put(Body=body) + if enable_md5: + info['md5'] = hashlib.md5(body).hexdigest() + return obj, info + except BotoClientError as err: + if err.response['ResponseMetadata']['HTTPStatusCode'] == 403: + raise AccessDeniedError(err) + else: + raise S3ClientError(err) + + def multipart_upload(self, cluster, bucket, key, stream, chunk_size=1024 * 1024 * 1024 * 2, **kwargs): + assert self._cluster == cluster + info = {} + obj = self._s3_resource.Object(bucket, key) + multipart = obj.initiate_multipart_upload() + part_id = 0 + parts = [] + total_size = 0 + + enable_md5 = kwargs.get('enable_md5', False) + if enable_md5: + md5 = hashlib.md5() + + while True: + chunk = stream.read(chunk_size) + actual_size = len(chunk) + if actual_size == 0: + break + part_id += 1 + total_size += actual_size + part = multipart.Part(part_id) + response = part.upload(Body=chunk) + parts.append({ + "PartNumber": part_id, + "ETag": response["ETag"] + }) + if enable_md5: + md5.update(chunk) + + part_info = { + 'Parts': parts + } + result = multipart.complete(MultipartUpload=part_info) + if enable_md5: + info['md5'] = md5.hexdigest() + return result, info + + def size(self, cluster, bucket, key): + assert self._cluster == cluster + try: + obj = self._s3_resource.Object(bucket, key) + obj.load() + return obj.content_length + except BotoClientError as err: + if err.response['ResponseMetadata']['HTTPStatusCode'] == 404: + raise NoSuchKeyError(cluster, bucket, key, err) from None + elif err.response['ResponseMetadata']['HTTPStatusCode'] == 403: + raise AccessDeniedError(err) from None + else: + raise S3ClientError(err) from None + + def contains(self, cluster, bucket, key): + assert self._cluster == cluster + try: + self._s3_resource.Object(bucket, key).load() + return True + except BotoClientError as err: + if err.response['ResponseMetadata']['HTTPStatusCode'] == 404: + return False + elif err.response['ResponseMetadata']['HTTPStatusCode'] == 403: + raise AccessDeniedError(err) + else: + raise S3ClientError(err) + + def delete(self, cluster, bucket, key, **kwargs): + assert self._cluster == cluster + try: + return self._s3_resource.Object(bucket, key).delete() + except BotoClientError as err: + if type(err).__name__ == 'NoSuchKey': + raise NoSuchKeyError(cluster, bucket, key) + elif type(err).__name__ == 'NoSuchBucket': + raise NoSuchBucketError(cluster, bucket) + elif err.response['ResponseMetadata']['HTTPStatusCode'] == 403: + raise AccessDeniedError(err) + else: + raise S3ClientError(err) + + def generate_presigned_url(self, cluster, bucket, key, client_method, expires_in): + assert self._cluster == cluster + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.generate_presigned_url + return self._s3_resource.meta.client.generate_presigned_url( + client_method, + {'Bucket': bucket, 'Key': key}, + expires_in + ) + + def generate_presigned_post(self, cluster, bucket, key, fields=None, conditions=None, expires_in=3600): + assert self._cluster == cluster + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.generate_presigned_post + return self._s3_resource.meta.client.generate_presigned_post( + bucket, key, fields, conditions, expires_in + ) diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/__init__.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1f53a4ba1af8369fc771656bfe19af8963578b6d --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/__init__.py @@ -0,0 +1,17 @@ +from os import path +from ctypes import cdll + +root_path = path.dirname(__file__) +libs_path = path.join(root_path, 'libs') + +libs = [ + 'libaws-c-common.so', + 'libaws-checksums.so', + 'libaws-c-event-stream.so', + 'libaws-cpp-sdk-core.so', + 'libaws-cpp-sdk-s3.so' +] + +for lib in libs: + lib_path = path.join(libs_path, lib) + cdll.LoadLibrary(lib_path) diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-c-common.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-c-common.so new file mode 100644 index 0000000000000000000000000000000000000000..1a9e3ccf47c2584f8e6ee1add3c417c4427145b6 Binary files /dev/null and b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-c-common.so differ diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-c-event-stream.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-c-event-stream.so new file mode 100644 index 0000000000000000000000000000000000000000..396973c70dc292729f6e826750b416eb070a7e88 Binary files /dev/null and b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-c-event-stream.so differ diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-checksums.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-checksums.so new file mode 100644 index 0000000000000000000000000000000000000000..093a15984ab778d321c481c18dbfebb7b720dfea Binary files /dev/null and b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-checksums.so differ diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-core.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-core.so new file mode 100644 index 0000000000000000000000000000000000000000..7a5247f68c865548f366e169e954d84bf7ae7f14 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-core.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:03ec1ddbd00f3c98810215283235f540e299ab4ae4c0abd1047c0a5c6f1a2600 +size 1465968 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-s3.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-s3.so new file mode 100644 index 0000000000000000000000000000000000000000..02978c8e352e0075594c40f8112e34752af10286 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-s3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bde1e1ba070640c53b51d885ca4ffd12fcedb031ca3a6231507f499c385d39ef +size 4416312 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-transfer.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-transfer.so new file mode 100644 index 0000000000000000000000000000000000000000..877c8a09a6762c174b9ed99f84b35a3d48f69183 Binary files /dev/null and b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-transfer.so differ diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-310-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..785ba2e9e5a0685d2fff8e8e580bd61ed041054d Binary files /dev/null and b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-310-x86_64-linux-gnu.so differ diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-36-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-36-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..0d34fa9ba0e7e5048923b7d305acdb30af31d098 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-36-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8484af53fea398ba08cd78502a0400b61aeae73315ffed5c2900d17abbc067d +size 1109320 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-36m-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-36m-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..c2f6faa7dc72f0672f2c679f78dd5d353c627258 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-36m-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c2975b68d56f94898af6e5df7cc8949556db2a29ebf2ffe8663891911e1246fe +size 3408968 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-37-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-37-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a85ca22aaec0cb380858f2cb25e81a9a40ed7302 Binary files /dev/null and b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-37-x86_64-linux-gnu.so differ diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-37m-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-37m-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..996ef66646e2f0c1dc5c267267fc65eb7ff62efb --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-37m-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11304c2c97b5847b7d91174a1a0868e0b76b8a493196acca02ed58ff7c60e405 +size 3409712 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-38-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..bfebf846b1a7720208f3c45902cd99ec5090fe87 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:629201bfbedb0b1cddadbe6ec2f982fab941ee1cb04697d2407e596b2fda3cc5 +size 1145912 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-39-x86_64-linux-gnu.so b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-39-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..1ea6d982101c6e2fc91f521c7360bdab120388f2 Binary files /dev/null and b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/pys3client.cpython-39-x86_64-linux-gnu.so differ diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/s3_cpp_client.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/s3_cpp_client.py new file mode 100644 index 0000000000000000000000000000000000000000..bf001038d0d44f082e6321714eee985a8ca53dd4 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/ceph/s3cpp/s3_cpp_client.py @@ -0,0 +1,176 @@ +import functools +from urllib.parse import urlparse +import logging +import hashlib +import io + +from petrel_client.common.io_profile import profile +from petrel_client.ceph.s3cpp.pys3client import PyS3Client, S3Error, init_api, shutdown_api +from petrel_client.ceph.ceph import Ceph +from petrel_client.common import exception + +LOG = logging.getLogger(__name__) + +EXCEPTION_MAP = { + 'ACCESS_DENIED': exception.AccessDeniedError, + 'NO_SUCH_BUCKET': exception.NoSuchBucketError, + 'NO_SUCH_KEY': exception.NoSuchKeyError, + 'RESOURCE_NOT_FOUND': exception.ResourceNotFoundError, + 'SIGNATURE_DOES_NOT_MATCH': exception.SignatureNotMatchError, + 'INVALID_ACCESS_KEY_ID': exception.InvalidAccessKeyError, + 'NETWORK_CONNECTION': exception.NetworkConnectionError, +} + +S3_CPP_ENV = None + + +class S3CppEnv(object): + def __init__(self, log_level): + LOG.debug('S3CppEnv init') + init_api(log_level) + + def __del__(self): + # LOG.debug('S3CppEnv del') del 阶段log抛异常 + shutdown_api() + + +def get_s3_cpp_env(log_level): + global S3_CPP_ENV + if S3_CPP_ENV is None: + S3_CPP_ENV = S3CppEnv(log_level) + return S3_CPP_ENV + + +def wrap_error(fn): + @functools.wraps(fn) + def new_fn(self, cluster, bucket, key, *args, **kwargs): + try: + return fn(self, cluster, bucket, key, *args, **kwargs) + except S3Error as err: + err_type = EXCEPTION_MAP.get(err.error_name, None) + if err_type: + new_err = err_type(cluster, bucket, key) + elif err.error_message: + new_err = exception.S3ClientError( + err.error_name, err.error_message) + else: + new_err = exception.S3ClientError(err.error_name) + + new_err.__traceback__ = err.__traceback__ + raise new_err from None + + return new_fn + + +class S3CppClient(Ceph): + def __init__(self, cluster, conf, anonymous_access, *args, **kwargs): + # 如果初始化出现异常,将会调用 __del__ ,这里先赋值避免 __del__ 出现逻辑错误 + self._client = None + self._env = None + + endpoint_url = conf['endpoint_url'] + if '://' not in endpoint_url: + endpoint_url = 'http://' + endpoint_url + parse_result = urlparse(endpoint_url) + s3_args = { + # AWS CPP SDK 中 ak 和 sk 为空时表示匿名访问 + 'ak': b'' if anonymous_access else conf['access_key'].encode('utf-8'), + 'sk': b'' if anonymous_access else conf['secret_key'].encode('utf-8'), + + 'endpoint': parse_result.netloc.encode('utf-8'), + 'enable_https': parse_result.scheme == 'https', + 'verify_ssl': conf.get_boolean('verify_ssl', False), + 'use_dual_stack': False, + 'threads_num': conf.get_int('cpp_multipart_threads', 8) + } + + super(S3CppClient, self).__init__(cluster, conf, *args, **kwargs) + self._cluster = cluster + self._conf = conf + + s3_cpp_log_level = conf.get('s3_cpp_log_level') + self._env = get_s3_cpp_env(s3_cpp_log_level) + self._client = PyS3Client(**s3_args) + + def __del__(self): + del self._client + del self._env + + @profile('get') + @wrap_error + def get_with_info(self, cluster, bucket, key, **kwargs): + info = {} + enable_md5 = kwargs.get('enable_md5', False) + if enable_md5: + info['md5'] = hashlib.md5(data).hexdigest() + + unsupported_ops = [k for k, v in kwargs.items() if k in ('enable_stream', 'enable_etag') and v] + if unsupported_ops: + raise NotImplementedError(unsupported_ops) + + if isinstance(bucket, str): + bucket = bucket.encode('utf-8') + if isinstance(key, str): + key = key.encode('utf-8') + + if None != kwargs.get("multipart") and True == kwargs["multipart"]: + return self._client.multipart_download_concurrency(bucket, key, kwargs["filename"].encode('utf-8')), info + + range_str = "" + if None != kwargs.get("range"): + range_str = kwargs["range"] + data = self._client.get_object(bucket, key, range_str.encode('utf-8')) + + return data, info + + @profile('put') + @wrap_error + def put_with_info(self, cluster, bucket, key, body, **kwargs): + info = {} # todo + if isinstance(bucket, str): + bucket = bucket.encode('utf-8') + if isinstance(key, str): + key = key.encode('utf-8') + + if isinstance(body, (bytes, bytearray)): + if len(body) <= 1024*1024*1024*2: + result = self._client.put_object(bucket, key, body) + else: + raise NotImplementedError(f'unsupported type f{type(body)}') + elif isinstance(body, io.TextIOBase): + result = self._client.multipart_upload_concurrency(bucket, key, bytes(body.name, encoding="utf8")) + elif isinstance(body, io.BufferedIOBase): + result = self._client.multipart_upload_concurrency(bucket, key, bytes(body.name, encoding="utf8")) + else: + pass + + return result, info + + def list(self, bucket, key, page_size=None): + if isinstance(bucket, str): + bucket = bucket.encode('utf-8') + + if key is None: + key = '' + elif key and not key.endswith('/'): + key = key + '/' + + if isinstance(key, str): + key = key.encode('utf-8') + res = self._client.list(bucket, key) + for item in res: + yield item.decode('utf-8') + + def delete(self, cluster, bucket, key, **kwargs): + if isinstance(bucket, str): + bucket = bucket.encode('utf-8') + if isinstance(key, str): + key = key.encode('utf-8') + return self._client.delete(bucket, key) + + def contains(self, cluster, bucket, key, **kwargs): + if isinstance(bucket, str): + bucket = bucket.encode('utf-8') + if isinstance(key, str): + key = key.encode('utf-8') + return self._client.contains(bucket, key) == 1 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/client.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/client.py new file mode 100644 index 0000000000000000000000000000000000000000..9c23201933c3a23069cbd8c938ed2d0fb1cfa9ce --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/client.py @@ -0,0 +1,99 @@ +# -*- coding: utf-8 -*- + +import threading +import logging +import functools +import os + +from petrel_client.mixed_client import MixedClient + +LOG = logging.getLogger(__name__) +thread_local_client = threading.local() + +DEFAULT_CONF_PATH = '~/petreloss.conf' + + +class Client(object): + + def __init__(self, conf_path=None, *args, **kwargs): + self._conf_path = conf_path or DEFAULT_CONF_PATH + self.kwargs = kwargs + + # 用户在调用 Client() 就实例化 GenericClient + # 如果该 GenericClient 实例化失败就抛异常 + # 避免在 put/get 的时候才开始抛异常 + # 此外,如果用户使用log,multiprocessing-logging 需要在Client创建的进程中初始化 + self._get_local_client() + + def _get_local_client(self): + current_pid = os.getpid() + client, client_pid = getattr( + thread_local_client, + self._conf_path, + (None, None) + ) + if current_pid != client_pid: + client = MixedClient(self._conf_path, **self.kwargs) + setattr( + thread_local_client, + self._conf_path, + (client, current_pid) + ) + return client + + def get_with_info(self, uri, **kwargs): + return self._get_local_client().get_with_info(uri, **kwargs) + + def get(self, *args, **kwargs): + data, _ = self.get_with_info(*args, **kwargs) + return data + + def list(self, *args, **kwargs): + client = self._get_local_client() + return client.list(*args, **kwargs) + + def isdir(self, uri): + client = self._get_local_client() + return client.isdir(uri) + + def get_file_iterator(self, uri): + try: + client = self._get_local_client() + file_iterator = client.get_file_iterator(uri) + return file_iterator + except Exception as e: + LOG.error('get file generator error {0}'.format(e)) + raise + + def put_with_info(self, uri, content, **kwargs): + return self._get_local_client().put_with_info(uri, content, **kwargs) + + def put(self, *args, **kwargs): + result, _ = self.put_with_info(*args, **kwargs) + return result + + def size(self, *args, **kwargs): + return self._get_local_client().size(*args, **kwargs) + + def contains(self, *args, **kwargs): + return self._get_local_client().contains(*args, **kwargs) + + def delete(self, *args, **kwargs): + self._get_local_client().delete(*args, **kwargs) + + def generate_presigned_url(self, *args, **kwargs): + return self._get_local_client().generate_presigned_url(*args, **kwargs) + + def generate_presigned_post(self, *args, **kwargs): + return self._get_local_client().generate_presigned_post(*args, **kwargs) + + def create_bucket(self, *args, **kwargs): + return self._get_local_client().create_bucket(*args, **kwargs) + + Get = get + + GetAndUpdate = get_and_update = functools.partialmethod( + get, update_cache=True) + + def set_count_disp(self, count_disp): + self._get_local_client().set_count_disp(count_disp) diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/client_base.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/client_base.py new file mode 100644 index 0000000000000000000000000000000000000000..10e66bbe7cfc4ce67765dd337ecf5e035166c3a9 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/client_base.py @@ -0,0 +1,20 @@ +import logging + +from petrel_client.common.io_profile import ClientStat + +LOG = logging.getLogger(__name__) + + +class ClientBase(object): + + def __init__(self, *args, **kwargs): + cls_name = type(self).__name__ + name = kwargs.get('name', None) + name = '{}({}id: {})'.format( + cls_name, + '{}, '.format(name) if name else '', + id(self)) + + self.client_stat = ClientStat(id(self), name) + + LOG.debug('create %s', name) diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/__init__.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/config.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/config.py new file mode 100644 index 0000000000000000000000000000000000000000..2f818c76aad6adda75ddad37eb273015bbc0b265 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/config.py @@ -0,0 +1,156 @@ +# -*- coding: utf-8 -*- + +import configparser +import logging + +from petrel_client.common import exception + +DEFAULT_SECTION_NAME = 'DEFAULT' + + +# 如果配置文件中有 [DEFAULT],对应内容将覆盖此处 +CONFIG_DEFAULT = { + 'endpoint_url': '%(host_base)s', + 'enable_mc': 'False', # 必须为 str 类型 + 'debug_mc': 'True', + 'file_log_level': 'DEBUG', + 'file_log_max_bytes': 1024 * 1024 * 1024, # 1GB + 'file_log_backup_count': 1, + 'console_log_level': 'WARNING', + 'boto': 'True', + 'mc_server_list_path': '/mnt/lustre/share/memcached_client/server_list.conf', + 'mc_client_config_path': '/mnt/lustre/share/memcached_client/client.conf', + 'count_disp': '5000', + 'enable_mem_trace': 'False', + 'fake': 'False', + 'mc_key_cb': 'identity', + 'get_retry_max': '10', + 's3_cpp_log_level': 'off', + # 'host_bucket': '%(host_base)s/%(bucket)s', + # 'user_https': 'False', + # 'ca_certs_file': '', + # 'check_ssl_certificate': 'True', + + 'multipart': 'False', + 'multipart_threshold': 100 * 1024 * 1024, + 'max_concurrency': 8, + 'multipart_chunksize': 2 * 1024 * 1024, # 2MB + 'use_threads': 'True', +} + +_UNSET = object() + + +def _value_to_str(d): + if isinstance(d, (int, bool)): + return str(d) + if isinstance(d, (dict,)): + return { + k: _value_to_str(v) for k, v in d.items() + } + return d + + +class GetterMixin(object): + + _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, + '0': False, 'no': False, 'false': False, 'off': False} + + def get(self, key, default=_UNSET): + try: + return self[key] + except exception.ConfigItemNotFoundError: + if default is _UNSET: + raise + else: + return default + + def has_option(self, key): + try: + self[key] + return True + except exception.ConfigItemNotFoundError: + return False + + def get_boolean(self, key, default=_UNSET): + v = str(self.get(key, default)).lower() + + if v not in self._boolean_states: + raise exception.ConfigKeyTypeError('Not a boolean: ' + key) + return self._boolean_states[v] + + def get_int(self, key, default=_UNSET): + try: + return int(self.get(key, default)) + except ValueError: + raise exception.ConfigKeyTypeError('Not a integer: ' + key) + + def get_log_level(self, key, default=_UNSET): + v = str(self.get(key, default)).upper() + if v not in logging._nameToLevel: + raise exception.ConfigKeyTypeError('Not a log level: ' + key) + return logging._nameToLevel[v] + + +class _my_dict(configparser._default_dict): + pass + + +class Config(GetterMixin): + def __init__(self, conf_path, *args, **kwargs): + parser = configparser.ConfigParser(CONFIG_DEFAULT) + r = parser.read(conf_path, encoding='utf-8') + if len(r) == 0: + raise exception.ConfigFileNotFoundError(conf_path) + if len(parser.sections()) == 0: + raise exception.ConfigSectionNotFoundError() + + defaults = parser._defaults + all_sections = parser._sections.items() + deleteList = [] + for section, options in all_sections: + if section.lower() != "default": + continue + for name, val in options.items(): + defaults[name] = val + deleteList.append(section) + for deleteSection in deleteList: + parser.remove_section(deleteSection) + + self._parser = parser + self._default = parser.items(DEFAULT_SECTION_NAME, raw=True) + + def __getitem__(self, key): + try: + return Section(self._parser[key]) + except KeyError as err: + raise exception.ConfigSectionNotFoundError(*err.args) + + def update(self, other: dict): + for k, v in other.items(): + self._parser[k].update(_value_to_str(v)) + + def default(self): + return Section(dict(self._default)) + + def items(self): + sections = self._parser.sections() + if len(sections) == 0: + raise exception.ConfigSectionNotFoundError() + return [(section, self[section]) for section in sections] + + +class Section(GetterMixin): + + def __init__(self, conf: dict): + # 注意 conf 中 value 取值类型均为 str + self._conf = conf + + def __getitem__(self, key): + try: + return self._conf[key] + except KeyError as err: + raise exception.ConfigKeyNotFoundError(*err.args) + + def update(self, other): + self._conf.update(_value_to_str(other)) diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/exception.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/exception.py new file mode 100644 index 0000000000000000000000000000000000000000..5984b2bc2e7358a5f4945cd2bc7b8ce3a003477e --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/exception.py @@ -0,0 +1,186 @@ +# -*- coding: utf-8 -*- + + +class Error(Exception): + '''Base class for exceptions in petrel_oss module.''' + + def __str__(self): + cls_name = type(self).__name__ + msg = super(Error, self).__str__() + return '{}({})'.format(cls_name, msg) + + +class RetriableError(Error): + pass + + +# Config Error + + +class ConfigError(Error): + pass + + +class InvalidConfigError(ConfigError): + pass + + +class ConfigFileNotFoundError(ConfigError): + pass + + +class ConfigItemNotFoundError(ConfigError): + pass + + +class ConfigKeyNotFoundError(ConfigItemNotFoundError): + pass + + +class ConfigSectionNotFoundError(ConfigItemNotFoundError): + pass + + +class ConfigKeyTypeError(ConfigError): + pass + + +class ConfigKeyValueError(ConfigError): + pass + + +# Client Error + + +class ClientError(Error): + pass + + +class ContentTypeError(ClientError): + pass + + +class S3ClientError(ClientError): + pass + + +class InvalidAccessKeyError(S3ClientError): + pass + + +class SignatureNotMatchError(S3ClientError): + pass + + +class NetworkConnectionError(S3ClientError): + pass + + +class ResourceNotFoundError(S3ClientError): + pass + + +class AccessDeniedError(ClientError): + pass + + +class RangeError(ClientError): + pass + + +class MultipartError(ClientError): + pass + + +class ObjectNotFoundError(ClientError): + pass + + +class S3ObjectNotFoundError(ObjectNotFoundError): + pass + + +class NoSuchBucketError(S3ObjectNotFoundError): + pass + + +class NoSuchKeyError(S3ObjectNotFoundError): + pass + + +# Cache Error + + +class CacheError(ClientError): + pass + + +class McClientError(CacheError): + pass + + +class McObjectNotFoundError(ObjectNotFoundError, McClientError): + pass + + +class McTimeoutOccur(McClientError, RetriableError): + pass + + +class McConnFailed(McClientError, RetriableError): + pass + + +class McServerFailed(McClientError, RetriableError): + pass + + +class McServerDisable(McClientError): + pass + + +class McServerDead(McClientError): + pass + + +class McBadKeyProvided(McClientError): + pass + + +class McKeySizeExceed(McClientError): + pass + + +class McObjectSizeExceed(McClientError): + pass + + +# URI Error + + +class InvalidUriError(Error): + pass + + +class InvalidS3UriError(InvalidUriError): + pass + + +class InvalidBucketUriError(InvalidS3UriError): + pass + + +class InvalidDfsUriError(InvalidUriError): + pass + + +class InvalidMcUriError(InvalidUriError): + pass + + +class InvalidClusterNameError(InvalidUriError): + pass + + +class NoDefaultClusterNameError(InvalidUriError): + pass diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/hash.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/hash.py new file mode 100644 index 0000000000000000000000000000000000000000..485870e450bf2ee981998de67b18579458a53de2 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/hash.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +import hashlib + +from petrel_client.common.exception import ConfigKeyValueError + +_SUPPORTED_TYPES = ('blake2b', 'blake2s', 'md5', 'pbkdf2_hmac', 'sha1', 'sha224', 'sha256', + 'sha384', 'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512', 'sha512', 'shake_128', 'shake_256') + + +def get_hash_fn(hash_type): + if hash_type in _SUPPORTED_TYPES: + return getattr(hashlib, hash_type) + else: + raise ConfigKeyValueError(f"'{hash_type}' is not a valid hash type.") + + +def to_bytes(key): + if isinstance(key, str): + key = key.encode('utf-8') + else: + assert isinstance(key, bytes) + return key + + +def hexdigest(key, hash_fn): + key = to_bytes(key) + m = hash_fn() + m.update(key) + return m.hexdigest() diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/io_profile.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/io_profile.py new file mode 100644 index 0000000000000000000000000000000000000000..5b75f5fddc781e9ee5345ebe0c6c9912afc583b5 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/io_profile.py @@ -0,0 +1,267 @@ +# -*- coding: utf-8 -*- + +import functools +import logging +import threading +import weakref +import environs +from time import time +from collections import defaultdict +import io + +from petrel_client.common import mem_trace +from petrel_client.common.exception import ObjectNotFoundError + +LOG = logging.getLogger(__name__) +ENV = environs.Env() + + +class StatItem(object): + __slots__ = ['op_name', 'total_io', 'total_hit', + 'total_time', 'total_error', 'total_miss', + 'error_count', 'total_byte' + ] + + def __init__(self, op_name): + self.op_name = op_name + self.reset() + + def reset(self): + self.total_io = 0 + self.total_hit = 0 + self.total_time = 0.0 + self.total_error = 0 + self.total_miss = 0 + self.total_byte = 0 + self.error_count = defaultdict(lambda: 0) + + @property + def time_avg(self): + return self.total_time / self.total_io if self.total_io else .0 + + @property + def hit_ratio(self): + return 1.0 * self.total_hit / self.total_io if self.total_io else .0 + + @property + def speed(self): + return 1.0 * self.total_byte / self.total_time if self.total_time else .0 + + def stat_io(self, callback=None): + stat_info = f'{self.op_name} [total: {self.total_io}' \ + f', hit: {self.total_hit}' \ + f', miss: {self.total_miss}' \ + f', error: {self.total_error}' \ + f', time: {self.total_time:.6} s' \ + f', time_avg: {self.time_avg:.6} s' \ + f', hit ratio: {self.hit_ratio:.2%}' \ + f', bytes: {_sizeof_fmt(self.total_byte)}' \ + f', speed: {_sizeof_fmt(self.speed,suffix="B/s")}' \ + f']' + + if self.error_count: + items = ["{}: {}".format(k, v) + for (k, v) in self.error_count.items()] + stat_info = f'{stat_info}, error_count: [{", ".join(items)}]' + + if callback: + callback(stat_info) + else: + LOG.info(stat_info) + self.reset() + + +class StatItemDict(dict): + def __missing__(self, key): + item = self[key] = StatItem(key) + return item + + def stat_io(self, callback=None): + for item in self.values(): + item.stat_io(callback) + + +class ClientStat(object): + def __init__(self, client_id, name): + self.client_id = client_id + self.name = name + self.stat_item_dict = StatItemDict() + profiler = Profiler.get() + self.profiler = profiler + profiler.register(self) + + def __getitem__(self, op_name): + return self.stat_item_dict[op_name] + + # 若使用 multiprocessing-logging,进程退出时候调用 __del__ 还存在问题 + # def __del__(self): + # # 这里有可能是再 python 将要退出的时候触发,此时file log已经不存在,会发生异常 + # try: + # self.profiler.unregister(self) + # if self.total_io: + # self.stat_io() + # except Exception: + # pass + + @property + def total_io(self): + return sum([item.total_io for item in self.stat_item_dict.values()]) + + @property + def get_hit(self): + return sum([item.total_hit for item in self.stat_item_dict.values() if item.op_name == 'get']) + + def stat_io(self, callback=None): + stat_item_info_list = [] + + def cb(info): + stat_item_info_list.append(info) + + for stat_item in self.stat_item_dict.values(): + stat_item.stat_io(cb) + + if stat_item_info_list: + stat_itme_info = ', '.join(stat_item_info_list) + else: + stat_itme_info = 'No IO operations' + + stat_info = '{}: {}'.format(self.name, stat_itme_info) + if callback: + callback(stat_info) + else: + LOG.info(stat_info) + + +def profile(op_name): + assert isinstance(op_name, str) + + def wrap(fn): + @functools.wraps(fn) + def new_fn(self, *args, **kwargs): + return _profile(op_name, fn, self, *args, **kwargs) + return new_fn + + return wrap + + +def _profile(op_name, fn, client, *args, **kwargs): + stat: StatItem = client.client_stat[op_name] + start = time() + try: + ret = fn(client, *args, **kwargs) + if isinstance(ret, (tuple, list)): + content = ret[0] + else: + content = ret + + if isinstance(content, bytes): + stat.total_byte += len(content) + elif isinstance(content, int): + stat.total_byte += content + elif hasattr(content, 'content_length'): + stat.total_byte += content.content_length + elif op_name == 'get' and content is None: + raise ObjectNotFoundError() + + stat.total_hit += 1 + return ret + + except ObjectNotFoundError: + stat.total_miss += 1 + raise + + except Exception as e: + stat.total_error += 1 + err_name = e.__class__.__name__ + stat.error_count[err_name] += 1 + raise + + finally: + end = time() + stat.total_time += (end - start) + stat.total_io += 1 + client.client_stat.profiler.inc_op_count() + + +class Profiler(object): + thread_local = threading.local() + default_conf = None + + @staticmethod + def set_default_conf(conf): + Profiler.default_conf = conf + + @staticmethod + def get(): + profiler = getattr(Profiler.thread_local, 'profiler', None) + if not profiler: + profiler = Profiler(Profiler.default_conf) + setattr(Profiler.thread_local, 'profiler', profiler) + return profiler + + def __init__(self, conf, *args, **kwargs): + assert conf is not None + self.stat_dict = weakref.WeakValueDictionary() + self.op_count = 0 + self.count_disp = ENV.int( + 'count_disp', None) or conf.get_int('count_disp') + + self.enable_mem_trace = conf.get_boolean('enable_mem_trace') + if self.enable_mem_trace: + mem_trace.start() + + def register(self, client_stat: ClientStat): + client_id = client_stat.client_id + self.stat_dict[client_id] = client_stat + + def unregister(self, client_stat: ClientStat): + client_id = client_stat.client_id + del self.stat_dict[client_id] + + def inc_op_count(self): + self.op_count += 1 + if self.count_disp: + if self.op_count >= self.count_disp: + self.stat_io() + self.op_count = 0 + + @staticmethod + def set_count_disp(count_disp): + if count_disp < 0: + LOG.error('count_disp must be a nonnegative integer, actual value: %s', + count_disp) + return + + profiler = Profiler.get() + profiler.count_disp = count_disp + + def stat_io(self): + if LOG.isEnabledFor(logging.INFO): + io_dict = { + client_stat.name: client_stat.get_hit for client_stat in self.stat_dict.values()} + total_io = sum(io_dict.values()) or 1 + percentage = [f'{client_name}: {1.0 * count / total_io :.2%}' for client_name, + count in io_dict.items()] + + for client_stat in self.stat_dict.values(): + client_stat.stat_io() + LOG.info('IO Percentage: %s', ', '.join(percentage)) + if self.enable_mem_trace: + snapshot = mem_trace.take_snapshot() + buffer = io.StringIO() + snapshot.display_top(buffer=buffer) + LOG.info('Memory trace: \n%s', buffer.getvalue()) + + def enable(self): + raise NotImplementedError() + + def disable(self): + raise NotImplementedError() + + +def _sizeof_fmt(num, suffix='B'): + for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: + if abs(num) < 1024.0: + return "%3.1f%s%s" % (num, unit, suffix) + num /= 1024.0 + return "%.1f%s%s" % (num, 'Yi', suffix) diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/io_retry.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/io_retry.py new file mode 100644 index 0000000000000000000000000000000000000000..5a1302fd0cd90bf18ecc22998eabc57475046eb1 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/io_retry.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +import functools +import logging +from humanize import ordinal + +LOG = logging.getLogger(__name__) + + +def retry(op_name, exceptions=Exception, raises=(), tries=1): + assert isinstance(op_name, str) + + def wrap(fn): + @functools.wraps(fn) + def new_fn(self, *args, **kwargs): + return _retry(op_name, exceptions, raises, tries, fn, self, *args, **kwargs) + return new_fn + + return wrap + + +def _retry(op_name, exceptions, raises, tries, fn, client, *args, **kwargs): + uri, retry_max = args[0], tries + for count in range(1, retry_max + 1): + try: + return fn(client, *args, **kwargs) + except raises: + raise + except exceptions as err: + if count < retry_max: + LOG.debug('Exception occurred in the %s retry of %s operation on (%s): %s', + ordinal(count), op_name, uri, err) + continue + if retry_max > 1: + LOG.error('%s operation (%s) has tried %s times and failed: %s', + op_name.capitalize(), uri, retry_max, err) + raise diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/log.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/log.py new file mode 100644 index 0000000000000000000000000000000000000000..d539835bf05f5bcd0767812123760a105cf5041a --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/log.py @@ -0,0 +1,88 @@ + +import threading +import logging +from logging.handlers import RotatingFileHandler +import coloredlogs +import os +import socket + +from petrel_client.version import version + +# Level Numeric value +# +# CRITICAL 50 +# ERROR 40 +# WARNING 30 +# INFO 20 +# DEBUG 10 +# NOTSET 0 + +# https://docs.python.org/2.7/library/logging.html#logrecord-attributes +BASE_FORMAT = '%(asctime)s %(levelname).3s [%(processName)-11s] [%(threadName)-10s] - %(message)s [P:%(process)d T:%(thread)d F:%(filename)s:%(lineno)d]' +base_formatter = logging.Formatter(BASE_FORMAT) + +log_config = {} +LOG = logging.getLogger('petrel_client') + +LOG.propagate = False +LOG.setLevel(logging.DEBUG) + +coloredlogs.install(level='DEBUG', logger=LOG, + milliseconds=True, fmt=BASE_FORMAT) +console_handler = LOG.handlers[0] + +lock = threading.RLock() +log_config = { + 'have_initiated': False +} + + +def get_log_file_name(): + slurm_procid = os.environ.get('SLURM_PROCID', None) + if slurm_procid is not None: + file_name = f'slurm_procid_{slurm_procid}.log' + else: + hostname = socket.gethostname() + pid = os.getpid() + file_name = f'{hostname}_pid_{pid}.log' + + return file_name + + +def init_log(conf): + with lock: + if log_config['have_initiated']: + LOG.debug('log initiated, skip') + return + else: + log_config['have_initiated'] = True + + log_file_path = conf.get('log_file_path', None) + if log_file_path: + if not os.path.exists(log_file_path): + # exist_ok = True : avoid FileExistsError when multiple + # processes are trying to create the same log_file_path + os.makedirs(log_file_path, exist_ok=True) + + file_log_level = conf.get_log_level('file_log_level') + file_log_max_bytes = conf.get_int('file_log_max_bytes') + file_log_backup_count = conf.get_int('file_log_backup_count') + + file_handler = RotatingFileHandler( + filename=os.path.join(log_file_path, get_log_file_name()), + maxBytes=file_log_max_bytes, + backupCount=file_log_backup_count) + file_handler.setLevel(file_log_level) + file_handler.setFormatter(base_formatter) + LOG.addHandler(file_handler) + + if conf.has_option('console_log_level'): + console_log_level = conf.get_log_level('console_log_level') + console_handler.setLevel(console_log_level) + + if log_file_path: + from multiprocessing_logging import install_mp_handler + # install_mp_handler should be invoked after log configuration + install_mp_handler(LOG) + + LOG.debug('init log, SDK version %s', version) diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/mem_trace.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/mem_trace.py new file mode 100644 index 0000000000000000000000000000000000000000..cb578c6c0923487f2f1df3435d6b7c91d532e08f --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/mem_trace.py @@ -0,0 +1,60 @@ +import linecache +import tracemalloc + +from tracemalloc import Snapshot + + +def format_size(size): + return tracemalloc._format_size(size, False) + + +def display_top(snapshot: Snapshot, limit=10, buffer=None, key_type='lineno'): + if buffer: + def write(msg): + buffer.write(msg) + buffer.write('\n') + else: + def write(msg): + print(msg) + + stats = snapshot.statistics(key_type) + + for index, stat in enumerate(stats[:limit], 1): + frame = stat.traceback[0] + line = linecache.getline(frame.filename, frame.lineno).strip() + msg = f'#{index}:\t{frame.filename}:{frame.lineno}: {stat.count} blocks, {format_size(stat.size)}\n\t{line}' + write(msg) + + other = stats[limit:] + if other: + other_size = sum(stat.size for stat in other) + other_blocks = sum(stat.count for stat in other) + write( + f'Other:\t{len(other)} items, {other_blocks} blocks, {format_size(other_size)}') + + total_size = sum(stat.size for stat in stats) + total_blocks = sum(stat.count for stat in stats) + write( + f'Total:\t{len(stats)} items, {total_blocks} blocks, {format_size(total_size)}') + + +def start(): + tracemalloc.start() + + +def stop(): + tracemalloc.stop() + + +def take_snapshot(): + return tracemalloc.take_snapshot() + + +def filter_traces(snapshot, pattern): + return snapshot.filter_traces(( + tracemalloc.Filter(True, pattern), + )) + + +Snapshot.display_top = display_top +Snapshot.filter_traces = filter_traces diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/uri_parser.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/uri_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..f3022355c4de6b1537dbb5b72e29534952ed47da --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/common/uri_parser.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + +import re +from petrel_client.common.exception import InvalidS3UriError +# (?:...) +# A non-capturing version of regular parentheses. Matches whatever regular expression is inside the parentheses, but the substring matched by the group cannot be retrieved after performing a match or referenced later in the pattern. + +# *?, +?, ?? +# The '*', '+', and '?' qualifiers are all greedy; they match as much text as possible. Sometimes this behaviour isn’t desired; if the RE <.*> is matched against b , it will match the entire string, and not just . Adding ? after the qualifier makes it perform the match in non-greedy or minimal fashion; as few characters as possible will be matched. Using the RE <.*?> will match only . + +# re.I +# re.IGNORECASE +# Perform case-insensitive matching; expressions like [A-Z] will match lowercase letters, too. This is not affected by the current locale. To get this effect on non-ASCII Unicode characters such as ü and Ü, add the UNICODE flag. +PATTERN = re.compile(r'^(?:([^:]+):)?s3://([^/]+)/(.+?)/?$', re.I) + + +def parse_s3_uri(uri): + m = PATTERN.match(uri) + if m: + return (m.group(1), m.group(2), m.group(3)) + else: + raise InvalidS3UriError(uri) diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/dfs/__init__.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/dfs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/dfs/dfs.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/dfs/dfs.py new file mode 100644 index 0000000000000000000000000000000000000000..3ec2f794f34ff28157b532d3eb36a210b6bee6e6 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/dfs/dfs.py @@ -0,0 +1,54 @@ +import logging +import re +import socket + +from petrel_client.client_base import ClientBase +from petrel_client.common.io_profile import profile +from petrel_client.common import exception + +LOG = logging.getLogger(__name__) + + +class DFS(ClientBase): + + @staticmethod + def parse_uri(uri): + # todo check if it is a valid path + return re.sub('^file://', '/', uri) + + @staticmethod + def create(conf, *args, **kwargs): + fake = conf.get_boolean('fake') + if fake: + from petrel_client.fake_client import FakeClient + name = f'DFS: {socket.gethostname()}' + return FakeClient(client_type='dfs', conf=conf, name=name, **kwargs) + else: + return DFS(conf, *args, **kwargs) + + def __init__(self, conf, *args, **kwargs): + hostname = socket.gethostname() + + super(DFS, self).__init__(*args, name=hostname, conf=conf, **kwargs) + self._enable_cache = conf.get_boolean( + 'enable_mc', False) or conf.get_boolean('enable_cache', False) + + @profile('get') + def get(self, file_path, **kwargs): + try: + with open(file_path, 'rb') as f: + return f.read() + except FileNotFoundError as err: + raise exception.ObjectNotFoundError(err) + except Exception as err: + raise exception.ClientError(err) + + def put(self, file_path, content, **kwargs): + try: + with open(file_path, 'wb') as f: + return f.write(content) + except Exception as err: + raise exception.ClientError(err) + + def enable_cache(self): + return self._enable_cache diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/fake_client.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/fake_client.py new file mode 100644 index 0000000000000000000000000000000000000000..789686ca8725b6b46a1c7bd6def4475259759f65 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/fake_client.py @@ -0,0 +1,45 @@ +from petrel_client.client_base import ClientBase +from petrel_client.common.io_profile import profile + + +class FakeClient(ClientBase): + customized_get = None + customized_put = None + + def __init__(self, client_type, conf, **kwargs): + super(FakeClient, self).__init__(conf=conf, **kwargs) + self.conf = conf + self.type = client_type + self.__enable_cache = conf.get_boolean( + 'enable_mc', False) or conf.get_boolean('enable_cache', False) + + @profile('get') + def get(self, *args, **kwargs): + if self.customized_get: + return self.customized_get(*args, **kwargs) + else: + return b'data from FakeClient.' + + def get_with_info(self, *args, **kwargs): + info = {} + data = self.get(*args, **kwargs) + return data, info + + @profile('put') + def put(self, *args, **kwargs): + if self.customized_put: + return self.customized_put(*args, **kwargs) + else: + if self.type == 's3': + body = args[3] + else: + body = args[1] + return len(body) + + def put_with_info(self, *args, **kwargs): + info = {} + result = self.put(*args, **kwargs) + return result, info + + def enable_cache(self): + return self.__enable_cache diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/mixed_client.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/mixed_client.py new file mode 100644 index 0000000000000000000000000000000000000000..5b3259e0a7c86e3943b923e14da1c3429aae67d8 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/mixed_client.py @@ -0,0 +1,279 @@ +# -*- coding: utf-8 -*- + +import logging +from os.path import expanduser, abspath + +from petrel_client.ceph.ceph import Ceph +from petrel_client.cache.cache import Cache +from petrel_client.dfs.dfs import DFS +from petrel_client.common.config import Config +from petrel_client.common.log import init_log +from petrel_client.common import exception +from petrel_client.common.io_profile import Profiler +from petrel_client.common.io_retry import retry + +if issubclass(str, bytes): + def str_to_bytes(s): + return s +else: + import builtins + + def str_to_bytes(s): + return builtins.bytes(s, 'utf-8') + +LOG = logging.getLogger(__name__) + + +class MixedClient(object): + def __init__(self, conf_path, **kwargs): + conf_path = abspath(expanduser(conf_path)) + config = Config(conf_path) + self._default_config = config.default() + + init_log(self._default_config) + + LOG.debug('init MixedClient, conf_path %s', conf_path) + Profiler.set_default_conf(self._default_config) + + if any(conf.get_boolean('enable_mc') for _, conf in config.items()): + cache_conf = config.get('cache', None) or config.get( + 'mc', None) or self._default_config + self._cache = Cache.create(cache_conf, **kwargs) + else: + self._cache = None + + self._ceph_dict = { + cluster: Ceph.create(cluster, conf) + for cluster, conf in config.items() if cluster.lower() not in ('dfs', 'cache', 'mc') + } + + dfs_conf = config.get('dfs', self._default_config) + self._dfs = DFS.create(dfs_conf) + + self._default_cluster = self._default_config.get( + 'default_cluster', None) + self._count_disp = self._default_config.get_int('count_disp') + self._get_retry_max = self._default_config.get_int('get_retry_max') + + def ceph_parse_uri(self, uri, content): + cluster, bucket, key, enable_cache = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + + def io_fn(**kwargs): + client = self._ceph_dict[cluster] + if content is not None: + return client.put_with_info(cluster, bucket, key, content, **kwargs) + else: + return client.get_with_info(cluster, bucket, key, **kwargs) + + return enable_cache, io_fn + + def cache_parse_uri(self, uri, content): + key = Cache.parse_uri(uri) + + def io_fn(**kwargs): + if content is not None: # todo add info + return self._cache.put(key, content), None + else: + return self._cache.get(key), None + return False, io_fn + + def dfs_parse_uri(self, uri, content): + file_path = DFS.parse_uri(uri) + + def io_fn(**kwargs): # todo add info + if content is not None: + return self._dfs.put(file_path, content, **kwargs), None + else: + return self._dfs.get(file_path, **kwargs), None + + return self._dfs.enable_cache(), io_fn + + def prepare_io_fn(self, uri, content=None): + try: + return self.ceph_parse_uri(uri, content) + except exception.InvalidS3UriError: + pass + + try: + return self.cache_parse_uri(uri, content) + except exception.InvalidMcUriError: + pass + + try: + return self.dfs_parse_uri(uri, content) + except exception.InvalidDfsUriError: + pass + + raise exception.InvalidUriError(uri) + + def _get_with_info(self, uri, **kwargs): # returns (data, info) + no_cache = kwargs.get('no_cache', False) + update_cache = kwargs.get('update_cache', False) + + if no_cache and update_cache: + raise ValueError( + 'arguments "update_cache" and "no_cache" conflict with each other') + + enable_cache, get_fn = self.prepare_io_fn(uri) + enable_cache = self._cache and enable_cache and (not no_cache) + cache_retry_times = 3 + cache_value = None + + if enable_cache and (not update_cache): + byte_range = kwargs.get('range', None) + if byte_range: + cache_uri = uri + '?range=' + byte_range + else: + cache_uri = uri + for _ in range(cache_retry_times): + cache_should_retry = False + try: + cache_value = self._cache.get(cache_uri, **kwargs) + except exception.ObjectNotFoundError: + pass + except exception.CacheError as err: + self._cache.log.debug(err) + if isinstance(err, exception.RetriableError): + cache_should_retry = True + except Exception as err: + LOG.error(err) + finally: + if not cache_should_retry: + break + + if cache_value is not None: + return cache_value, None + + content, info = get_fn(**kwargs) + + if enable_cache: + for _ in range(cache_retry_times): + cache_should_retry = False + try: + self._cache.put(uri, content) # todo 如果有异常,是否该忽略? + except exception.CacheError as err: + self._cache.log.debug(err) + if isinstance(err, exception.RetriableError): + cache_should_retry = True + except Exception as err: + LOG.error(err) + finally: + if not cache_should_retry: + break + + return content, info + + # 所有的异常在此处处理 + def get_with_info(self, uri, **kwargs): + @retry('get', exceptions=(Exception,), raises=(exception.ResourceNotFoundError, exception.RangeError, NotImplementedError), tries=self._get_retry_max) + def do_get_with_info(self, uri, **kwargs): + try: + return self._get_with_info(uri, **kwargs) + except exception.NoSuchBucketError as err: + LOG.warning(err) + except exception.ObjectNotFoundError as err: + LOG.debug(err) + except exception.AccessDeniedError as err: + LOG.warning((err, uri)) + + return None, None + + return do_get_with_info(self, uri, **kwargs) + + def create_bucket(self, uri, **kwargs): + cluster, bucket, key, _ = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + if key is not None: + raise exception.InvalidBucketUriError(uri) + return self._ceph_dict[cluster].create_bucket(bucket) + + def isdir(self, uri, **kwarg): + try: + cluster, bucket, key, enable_cache = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + isdir_fn = getattr(client, 'isdir') + return isdir_fn(bucket, key) + except exception.InvalidS3UriError: + LOG.error(f'Invalid S3 URI: ${uri}') + raise + except AttributeError: + LOG.warning('please set boto = True to use this feature') + raise + + def list(self, uri, **kwarg): + try: + cluster, bucket, key, enable_cache = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + list_fn = getattr(client, 'list') + return list_fn(bucket, key, **kwarg) + except exception.InvalidS3UriError: + LOG.error(f'Invalid S3 URI: ${uri}') + raise + except AttributeError: + LOG.warning('please set boto = True to use this feature') + raise + + def get_file_iterator(self, uri): + try: + cluster, bucket, key, enable_cache = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + file_iterator = getattr(client, 'get_file_iterator') + return file_iterator(bucket, key) + except exception.InvalidS3UriError: + LOG.error('only support ceph') + raise + except AttributeError: + LOG.warning('please set boto = True to use this feature') + raise + + def put_with_info(self, uri, content, **kwargs): + if isinstance(content, str): + content = str_to_bytes(content) + + _enable_cache, put_fn = self.prepare_io_fn(uri, content) + + update_cache = self._cache and kwargs.get('update_cache', False) + + result, info = put_fn(**kwargs) + + if update_cache: + self._cache.put(uri, content) + + return result, info + + def size(self, uri): + cluster, bucket, key, _ = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + return client.size(cluster, bucket, key) + + def contains(self, uri): + cluster, bucket, key, _ = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + return client.contains(cluster, bucket, key) + + def delete(self, uri): + cluster, bucket, key, _ = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + return client.delete(cluster, bucket, key) + + def generate_presigned_url(self, uri, client_method='get_object', expires_in=3600): + cluster, bucket, key, _ = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + return client.generate_presigned_url(cluster, bucket, key, client_method, expires_in) + + def generate_presigned_post(self, uri, fields=None, conditions=None, expires_in=3600): + cluster, bucket, key, _ = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + return client.generate_presigned_post(cluster, bucket, key, fields, conditions, expires_in) + + def set_count_disp(self, count_disp): + Profiler.set_count_disp(count_disp) diff --git a/petrel-sdk/petrel-oss-python-sdk/petrel_client/version.py b/petrel-sdk/petrel-oss-python-sdk/petrel_client/version.py new file mode 100644 index 0000000000000000000000000000000000000000..3a9a1055701bd71fdba3cf3736d4a383377105ab --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/petrel_client/version.py @@ -0,0 +1 @@ +version = 'v2.2.1-2-g1505ef3-master' diff --git a/petrel-sdk/petrel-oss-python-sdk/scripts/README.md b/petrel-sdk/petrel-oss-python-sdk/scripts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0faa403d8bc663d6bc1cbbe8f8c351f099d2c2b1 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/scripts/README.md @@ -0,0 +1,2 @@ +get_serverlist.py: generate the above configure file; run on the master node of a cluster; + diff --git a/petrel-sdk/petrel-oss-python-sdk/scripts/get_serverlist.py b/petrel-sdk/petrel-oss-python-sdk/scripts/get_serverlist.py new file mode 100644 index 0000000000000000000000000000000000000000..056ab0b8271a868c7cf47ff1881c1ecdeca249ea --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/scripts/get_serverlist.py @@ -0,0 +1,27 @@ +import commands +import numpy as np + +masters = ['10.10.11.22', '10.10.12.145', '10.10.20.21', '10.1.72.101'] + +def parse(string): + global masters + result = [] + for line in string.split('\n'): + fields = line.split('-') + ip = '.'.join(fields[2:]) + if ip not in masters: + result.append(ip) + return result + + +if __name__ == '__main__': + output = commands.getoutput("scontrol show node | grep NodeName | awk \'{print$1}\' | awk -F = \'{print$2}\'") + print 'the set of machines in this cluster is:' + print output + serverlist = parse(output) + print 'transfer into IP address:' + print serverlist + with open('server_list.conf', 'w') as wf: + np.savetxt(wf, serverlist, delimiter='\n', fmt='%s') + wf.close() + print 'IP addresses have been written into server_list.conf' diff --git a/petrel-sdk/petrel-oss-python-sdk/scripts/server_list.conf b/petrel-sdk/petrel-oss-python-sdk/scripts/server_list.conf new file mode 100644 index 0000000000000000000000000000000000000000..5ecc7872cbefcdc6e1a8138d75061410a5750c38 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/scripts/server_list.conf @@ -0,0 +1,260 @@ +10.5.30.31 +10.5.30.32 +10.5.30.33 +10.5.30.34 +10.5.30.35 +10.5.30.36 +10.5.30.37 +10.5.30.38 +10.5.30.39 +10.5.30.40 +10.5.30.41 +10.5.30.42 +10.5.30.43 +10.5.30.44 +10.5.30.45 +10.5.30.46 +10.5.30.47 +10.5.30.48 +10.5.30.49 +10.5.30.50 +10.5.30.51 +10.5.30.52 +10.5.30.53 +10.5.30.54 +10.5.30.55 +10.5.30.56 +10.5.30.57 +10.5.30.58 +10.5.30.59 +10.5.30.60 +10.5.30.61 +10.5.30.62 +10.5.30.63 +10.5.30.64 +10.5.30.65 +10.5.30.66 +10.5.30.67 +10.5.30.68 +10.5.30.69 +10.5.30.70 +10.5.30.71 +10.5.30.72 +10.5.30.73 +10.5.30.74 +10.5.30.75 +10.5.30.76 +10.5.30.77 +10.5.30.78 +10.5.30.79 +10.5.30.80 +10.5.30.81 +10.5.30.82 +10.5.30.83 +10.5.30.84 +10.5.30.85 +10.5.30.86 +10.5.30.87 +10.5.30.88 +10.5.30.89 +10.5.30.90 +10.5.30.91 +10.5.30.92 +10.5.30.93 +10.5.30.94 +10.5.30.95 +10.5.30.96 +10.5.30.97 +10.5.30.98 +10.5.30.99 +10.5.30.100 +10.5.30.101 +10.5.30.102 +10.5.30.103 +10.5.30.104 +10.5.30.105 +10.5.30.106 +10.5.30.107 +10.5.30.108 +10.5.30.109 +10.5.30.110 +10.5.30.111 +10.5.30.112 +10.5.30.113 +10.5.30.114 +10.5.30.115 +10.5.30.116 +10.5.30.117 +10.5.30.118 +10.5.30.119 +10.5.30.120 +10.5.30.121 +10.5.30.122 +10.5.30.123 +10.5.30.124 +10.5.30.125 +10.5.30.126 +10.5.30.127 +10.5.30.128 +10.5.30.129 +10.5.30.130 +10.5.30.131 +10.5.30.132 +10.5.30.133 +10.5.30.134 +10.5.30.135 +10.5.30.136 +10.5.30.137 +10.5.30.138 +10.5.30.139 +10.5.30.140 +10.5.30.141 +10.5.30.142 +10.5.30.143 +10.5.30.144 +10.5.30.145 +10.5.30.146 +10.5.30.147 +10.5.30.148 +10.5.30.149 +10.5.30.150 +10.5.30.151 +10.5.30.152 +10.5.30.153 +10.5.30.154 +10.5.30.155 +10.5.30.156 +10.5.30.157 +10.5.30.158 +10.5.30.159 +10.5.30.160 +10.5.30.161 +10.5.30.162 +10.5.30.163 +10.5.30.164 +10.5.30.165 +10.5.30.166 +10.5.30.167 +10.5.30.168 +10.5.30.169 +10.5.30.170 +10.5.30.171 +10.5.30.172 +10.5.30.173 +10.5.30.174 +10.5.30.175 +10.5.30.176 +10.5.30.177 +10.5.30.178 +10.5.30.179 +10.5.30.180 +10.5.30.181 +10.5.30.182 +10.5.30.183 +10.5.30.184 +10.5.30.185 +10.5.30.186 +10.5.30.187 +10.5.30.188 +10.5.30.189 +10.5.30.190 +10.5.30.191 +10.5.30.192 +10.5.30.193 +10.5.30.194 +10.5.30.195 +10.5.30.196 +10.5.30.197 +10.5.30.198 +10.5.30.199 +10.5.30.200 +10.5.30.201 +10.5.30.202 +10.5.30.203 +10.5.30.204 +10.5.30.205 +10.5.30.206 +10.5.30.207 +10.5.30.208 +10.5.30.209 +10.5.30.210 +10.5.30.211 +10.5.30.212 +10.5.30.213 +10.5.30.214 +10.5.30.215 +10.5.30.216 +10.5.30.217 +10.5.30.218 +10.5.30.219 +10.5.30.220 +10.5.30.221 +10.5.30.222 +10.5.30.223 +10.5.30.224 +10.5.30.225 +10.5.30.226 +10.5.30.227 +10.5.30.228 +10.5.30.229 +10.5.31.1 +10.5.31.2 +10.5.31.3 +10.5.31.4 +10.5.31.5 +10.5.31.6 +10.5.31.7 +10.5.31.8 +10.5.31.9 +10.5.31.10 +10.5.31.11 +10.5.31.12 +10.5.31.13 +10.5.31.14 +10.5.31.15 +10.5.31.16 +10.5.31.17 +10.5.31.18 +10.5.31.19 +10.5.31.20 +10.5.31.21 +10.5.31.22 +10.5.31.23 +10.5.31.24 +10.5.31.25 +10.5.31.26 +10.5.31.27 +10.5.31.28 +10.5.31.29 +10.5.31.30 +10.5.31.31 +10.5.31.32 +10.5.31.33 +10.5.31.34 +10.5.31.35 +10.5.31.36 +10.5.31.37 +10.5.31.38 +10.5.31.39 +10.5.31.40 +10.5.31.41 +10.5.31.42 +10.5.31.43 +10.5.31.44 +10.5.31.45 +10.5.31.46 +10.5.31.47 +10.5.31.48 +10.5.31.49 +10.5.31.50 +10.5.31.51 +10.5.31.52 +10.5.31.53 +10.5.31.54 +10.5.31.55 +10.5.31.56 +10.5.31.84 +10.5.31.85 +10.5.31.86 +10.5.31.87 +10.5.31.88 diff --git a/petrel-sdk/petrel-oss-python-sdk/setup.py b/petrel-sdk/petrel-oss-python-sdk/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..2eff7368e77f4e95ce05bbe902a42cb6d27ea02e --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/setup.py @@ -0,0 +1,37 @@ +import setuptools +import subprocess +import os +import shutil + +try: + git_describe = subprocess.check_output( + ['git', 'describe', '--tags', '--long']).decode('utf-8').strip() + git_branch = subprocess.check_output( + ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('utf-8').strip() + version = f'{git_describe}-{git_branch}' + + with open('petrel_client/version.py', 'w') as f: + f.write(f"version = '{version}'\n") + f.truncate() +except Exception: + from importlib.machinery import SourceFileLoader + version_module = SourceFileLoader( + 'version_module', 'petrel_client/version.py').load_module() + version = version_module.version + +dist_path = 'dist' +if os.path.exists(dist_path): + shutil.rmtree(dist_path) + +setuptools.setup( + name='petrel-oss-sdk', + version=version, + description='Ceph S3 storage API for Pytorch, Parrots', + url="http://gitlab.bj.sensetime.com/platform/StorageSystem/petrel-oss-python-sdk", + packages=setuptools.find_packages(), + package_data={'': ['**/*.so']}, + install_requires=['boto3', 'environs', 'coloredlogs', + 'humanize', 'multiprocessing-logging'], + python_requires='>=3.6', + zip_safe=False, +) diff --git a/petrel-sdk/petrel-oss-python-sdk/setup.py.bak b/petrel-sdk/petrel-oss-python-sdk/setup.py.bak new file mode 100644 index 0000000000000000000000000000000000000000..2eff7368e77f4e95ce05bbe902a42cb6d27ea02e --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/setup.py.bak @@ -0,0 +1,37 @@ +import setuptools +import subprocess +import os +import shutil + +try: + git_describe = subprocess.check_output( + ['git', 'describe', '--tags', '--long']).decode('utf-8').strip() + git_branch = subprocess.check_output( + ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('utf-8').strip() + version = f'{git_describe}-{git_branch}' + + with open('petrel_client/version.py', 'w') as f: + f.write(f"version = '{version}'\n") + f.truncate() +except Exception: + from importlib.machinery import SourceFileLoader + version_module = SourceFileLoader( + 'version_module', 'petrel_client/version.py').load_module() + version = version_module.version + +dist_path = 'dist' +if os.path.exists(dist_path): + shutil.rmtree(dist_path) + +setuptools.setup( + name='petrel-oss-sdk', + version=version, + description='Ceph S3 storage API for Pytorch, Parrots', + url="http://gitlab.bj.sensetime.com/platform/StorageSystem/petrel-oss-python-sdk", + packages=setuptools.find_packages(), + package_data={'': ['**/*.so']}, + install_requires=['boto3', 'environs', 'coloredlogs', + 'humanize', 'multiprocessing-logging'], + python_requires='>=3.6', + zip_safe=False, +) diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/common_util.py b/petrel-sdk/petrel-oss-python-sdk/tests/common_util.py new file mode 100644 index 0000000000000000000000000000000000000000..940733e0193ad8ba48b90799a1e1d48270d15815 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/common_util.py @@ -0,0 +1,63 @@ +import sys +import argparse +import os +import unittest +import subprocess + +test_dir = os.path.dirname(os.path.realpath(__file__)) + +parser = argparse.ArgumentParser(add_help=False) +parser.add_argument('-s', '--save-result', nargs='?', type=str, default=None) +args, remaining = parser.parse_known_args() + +UNITTEST_ARGS = [sys.argv[0]] + remaining + + +def wait_for_process(p): + try: + return p.wait() + except KeyboardInterrupt: + # Give `p` a chance to handle KeyboardInterrupt. Without this, + # `pytest` can't print errors it collected so far upon KeyboardInterrupt. + exit_status = p.wait(timeout=5) + if exit_status is not None: + return exit_status + else: + p.kill() + raise + except: # noqa E722, copied from python core library + p.kill() + raise + finally: + # Always call p.wait() to ensure exit + p.wait() + + +def shell(command, cwd=None, env=None): + sys.stdout.flush() + sys.stderr.flush() + # The following cool snippet is copied from Py3 core library subprocess.call + # only the with + # 1. `except KeyboardInterrupt` block added for SIGINT handling. + # 2. In Py2, subprocess.Popen doesn't return a context manager, so we do + # `p.wait()` in a `final` block for the code to be portable. + # + # https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323 + # assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens" + p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env) + return wait_for_process(p) + + +def run_test(argv=UNITTEST_ARGS): + if args.save_result is not None: + test_report_path = test_dir + "/" + args.save_result + with open(test_report_path, "a") as report_file: + runner = unittest.TextTestRunner(stream=report_file, verbosity=2) + unittest.main(argv=argv, testRunner=runner) + else: + runner = unittest.TextTestRunner(verbosity=2) + unittest.main(argv=argv, testRunner=runner) + + +if __name__ == "__main__": + run_test() diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/conf/petreloss.conf b/petrel-sdk/petrel-oss-python-sdk/tests/conf/petreloss.conf new file mode 100644 index 0000000000000000000000000000000000000000..d3734b87c2af0df16d13b831addf673a0d495f2a --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/conf/petreloss.conf @@ -0,0 +1,87 @@ +# 注释以 ’#‘ 或 ‘;’ 开头,单独占一行,不能和配置内容在同一行 + +[DEFAULT] + +# 启用 Memcached, 默认 False +# enable_mc = True + +# Memcached 相关配置,默认情况下无需设置 +# mc_server_list_path = /mnt/lustre/share/memcached_client/server_list.conf +# mc_client_config_path = /mnt/lustre/share/memcached_client/client.conf + +# console log 级别,默认 WARNING, 选项有 CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +# 若需要在 console 输出 IO 统计信息,需要设置级别为 INFO +# console_log_level = WARNING + +# file log 级别,默认 DEBUG, 选项有 CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +# file_log_level = DEBUG + +# log 文件路径,默认 无 ,即不输出 log 文件 +# log_file_path = /tmp/petrel.log + +# log 文件最大长度,默认 1GB +# file_log_max_bytes = 1073741824 + +# log 文件备份数目,默认 1 +# file_log_backup_count = 1 + +# 每隔 count_disp 次 get 操作后,日志记录 IO 统计信息。默认值 5000 +# 如果 IO 统计信息输出过于频繁,可将该数值增大 +# 如果需要关闭 IO 统计信,可将该数值设置为 0 +# count_disp = 5000 + +# 内存统计,默认关闭 +# enable_mem_trace = False + +# get 操作失败后,允许重试的次数,默认 10 +# get_retry_max = 10 + +# 默认 cluster,即当访问 Ceph 没有指定 cluster 时,从 default_cluster 获取数据 +default_cluster = cluster1 + +[mc] +# 若访问的路径过长(超过250个字节),mc 将出现 McKeySizeExceed 错误。 +# 配置 mc_key_cb 可将传给 mc 的路径进行转换,可选的参数有: +# blake2b, blake2s, md5, pbkdf2_hmac, sha1, sha224, +# sha256, sha384, sha3_224, sha3_256, sha3_384, +# sha3_512, sha512, shake_128, shake_256 + +# mc_key_cb = sha256 + + +# 是否输出 mc debug log,默认 True +# 注意最终是否输出到 console 和 file 分别还需要由 console_log_level 和 file_log_level 决定 +# debug_mc = True + + +[dfs] +enable_mc = True + +# 至少需要配置一个 cluster ,否则将出现 ConfigSectionNotFoundError +[cluster1] +# 对于每个集群的具体配置,如果没有指定,则以[DEFAULT]作为取值 +# 例如在此处设置 ‘enable_mc = False’ 将覆盖默认配置 +enable_mc = True + +# 启用 S3 boto,默认 True +# boto = c++ 将启用 c++ 版本实现的 S3 +boto = True + +# 若不设置 access_key 和 secret_key,将以 anonymous 账户访问数据 +access_key = lili1 +secret_key = lili1 + +host_base = http://127.0.0.1:7480 + +# 若 boto = False ,则需要增加以下配置 +# conf = conf/ceph.conf +# keyring = conf/keyring +# name = client.rgw.train +# cluster = ceph + +[cluster2] + +access_key = lili1 +secret_key = lili1 + +host_base = http://127.0.0.1:7480 \ No newline at end of file diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/conf/test_empty.conf b/petrel-sdk/petrel-oss-python-sdk/tests/conf/test_empty.conf new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/conf/test_petreloss.conf b/petrel-sdk/petrel-oss-python-sdk/tests/conf/test_petreloss.conf new file mode 100644 index 0000000000000000000000000000000000000000..43411fda698e69526d29ed49a503b7b0d1179486 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/conf/test_petreloss.conf @@ -0,0 +1,86 @@ +# 注释以 ’#‘ 或 ‘;’ 开头,单独占一行,不能和配置内容在同一行 + +[default] + +# 启用 Memcached, 默认 False +# enable_mc = True + +# Memcached 相关配置,默认情况下无需设置 +# mc_server_list_path = /mnt/lustre/share/memcached_client/server_list.conf +# mc_client_config_path = /mnt/lustre/share/memcached_client/client.conf + +# console log 级别,默认 WARNING, 选项有 CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +# 若需要在 console 输出 IO 统计信息,需要设置级别为 INFO +# console_log_level = WARNING + +# file log 级别,默认 DEBUG, 选项有 CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +# file_log_level = DEBUG + +# log 文件路径,默认 无 ,即不输出 log 文件 +# log_file_path = /tmp/petrel.log + +# log 文件最大长度,默认 1GB +# file_log_max_bytes = 1073741824 + +# log 文件备份数目,默认 1 +# file_log_backup_count = 1 + +# 每隔 count_disp 次 get 操作后,日志记录 IO 统计信息。默认值 5000 +# 如果 IO 统计信息输出过于频繁,可将该数值增大 +# 如果需要关闭 IO 统计信,可将该数值设置为 0 +# count_disp = 5000 + +# 内存统计,默认关闭 +# enable_mem_trace = False + +# get 操作失败后,允许重试的次数,默认 10 +# get_retry_max = 10 + +# 默认 cluster,即当访问 Ceph 没有指定 cluster 时,从 default_cluster 获取数据 +default_cluster = cluster1 + +[mc] +# 若访问的路径过长(超过250个字节),mc 将出现 McKeySizeExceed 错误。 +# 配置 mc_key_cb 可将传给 mc 的路径进行转换,可选的参数有: +# blake2b, blake2s, md5, pbkdf2_hmac, sha1, sha224, +# sha256, sha384, sha3_224, sha3_256, sha3_384, +# sha3_512, sha512, shake_128, shake_256 + +# mc_key_cb = sha256 + + +# 是否输出 mc debug log,默认 True +# 注意最终是否输出到 console 和 file 分别还需要由 console_log_level 和 file_log_level 决定 +# debug_mc = True + + +[dfs] +enable_mc = True + +# 至少需要配置一个 cluster ,否则将出现 ConfigSectionNotFoundError +[cluster1] +# 对于每个集群的具体配置,如果没有指定,则以[DEFAULT]作为取值 +# 例如在此处设置 ‘enable_mc = False’ 将覆盖默认配置 +enable_mc = True + +# 启用 S3 boto,默认 True +# boto = c++ 将启用 c++ 版本实现的 S3 +boto = True + +# 若不设置 access_key 和 secret_key,将以 anonymous 账户访问数据 +access_key = lili1 +secret_key = lili1 + +host_base = http://127.0.0.1:7480 +# 若 boto = False ,则需要增加以下配置 +# conf = conf/ceph.conf +# keyring = conf/keyring +# name = client.rgw.train +# cluster = ceph + +[cluster2] + +access_key = lili1 +secret_key = lili1 + +host_base = http://127.0.0.1:7480 diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/dataloader_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/dataloader_test.py new file mode 100644 index 0000000000000000000000000000000000000000..46c88fc599ea6b89651ef57f5cef75b11f409053 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/dataloader_test.py @@ -0,0 +1,116 @@ +import torch +from torch.utils.data import get_worker_info +from torch.utils.data import DataLoader + +import random +import time + +from functools import partial + +from itertools import chain + + +from petrel_client.utils.data import DataLoader as MyDataLoader +MyDataLoader = partial(MyDataLoader, prefetch_factor=4, persistent_workers=True) + + +def assert_equal(lhs, rhs): + if isinstance(lhs, dict): + assert lhs.keys() == rhs.keys() + for k in lhs.keys(): + assert_equal(lhs[k], rhs[k]) + elif isinstance(lhs, list): + assert len(lhs) == len(rhs) + for i in range(len(lhs)): + assert_equal(lhs[i], rhs[i]) + elif isinstance(lhs, torch.Tensor): + assert torch.equal(lhs, rhs) + else: + assert False + + +def wait(dt): + time.sleep(dt) + + +class Dataset(list): + def __init__(self, *args, **kwargs): + super(Dataset, self).__init__(*args, **kwargs) + self._seed_inited = False + + def __getitem__(self, *args, **kwargs): + worker_info = get_worker_info() + if not self._seed_inited: + if worker_info is None: + random.seed(0) + else: + random.seed(worker_info.id) + self._seed_inited = True + rand_int = random.randint(1, 4) + time_to_sleep = rand_int * 0.05 + if worker_info is not None and worker_info.id == 0: + time_to_sleep *= 2 + wait(time_to_sleep) + val = super(Dataset, self).__getitem__(*args, **kwargs) + return {'val': val} + + +def test(dataloader, result): + print('\ntest') + random.seed(0) + data_time = 0 + tstart = t1 = time.time() + for i, data in enumerate(chain(dataloader, dataloader), 1): + t2 = time.time() + d = t2 - t1 + print('{0:>5}' .format(int((t2 - t1)*1000)), end='') + if i % 10: + print('\t', end='') + else: + print('') + + result.append(data) + + data_time += d + + rand_int = random.randrange(1, 4) + wait(0.05 * rand_int) + + t1 = time.time() + tend = time.time() + print('\ntotal time: %.3f' % (tend - tstart)) + print('total data time: %.3f' % data_time) + print(type(dataloader)) + + +def worker_init_fn(worker_id): + print('start worker:', worker_id) + wait(3) + + +dataloader_args = { + 'dataset': Dataset(range(1024)), + 'drop_last': False, + 'shuffle': False, + 'batch_size': 32, + 'num_workers': 8, + 'worker_init_fn': worker_init_fn, +} + + +torch.manual_seed(0) +l2 = MyDataLoader(**dataloader_args) +r2 = [] +test(l2, r2) + +torch.manual_seed(0) +l1 = DataLoader(**dataloader_args) +r1 = [] +test(l1, r1) + + +print('len l1:', len(l1)) +print('len l2:', len(l2)) + +assert_equal(r1, r2) +print(torch) diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/etag_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/etag_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4ad6c27a81e0654aaa5b67290923f9916e109018 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/etag_test.py @@ -0,0 +1,18 @@ +from petrel_client.client import Client +import hashlib + + +client = Client('conf/s3config.ini') +filename = 'cluster1:s3://my-bucket/object.111' +content = b'a' * 1024 +print('put: key len:', len(filename), 'content len:', len(content)) + +client.put(filename, content, update_cache=True) +content, info = client.get_and_update(filename, enable_etag=True) + +print('size:', len(content)) + +digest = hashlib.md5(content).hexdigest() +print('digest:', digest) + +print('etag:', info['etag']) diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/fake_client_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/fake_client_test.py new file mode 100644 index 0000000000000000000000000000000000000000..59e9202beacf71f45130c83eff649c6d22e709d4 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/fake_client_test.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +from random import random + +from petrel_client.client import Client +from petrel_client.fake_client import FakeClient + + +def customized_get(self, *args, **kwargs): + # type 有 s3、dfs、mc + if self.type == 's3': + # s3 的参数有 cluster, bucket, key + cluster, bucket, key = args + else: + # 剩余类型的参数只包含 key + cluster, bucket, key = 'unknow', 'unknow', args[0] + + if self.type == 'mc' and random() < 0.5: + # 模拟缓存失效 + return None + else: + return b'x' * 1024 + + +def customized_put(self, *args, **kwargs): + if self.type == 's3': + # s3 的参数有 cluster, bucket, key, body + cluster, bucket, key, body = args + else: + # 剩余类型的参数只包含 key, body + cluster, bucket, (key, body) = 'unknow', 'unknow', args + + return len(body) + + +FakeClient.customized_get = customized_get +FakeClient.customized_put = customized_put + +client = Client("~/fake_client.conf") + +urls = [ + 'cluster1:s3://my-bucket/object.1', # 从 cluster1 中读取 + 'cluster2:s3://my-bucket/object.2', # 从 cluster2 中读取 + 's3://my-bucket/object.3', # 若不指定 cluster,则从配置文件中指定的 default_cluster 中读取 + 'file://tmp/xxx', # 从 DFS 中读取 + '/tmp/xxx', # 若不包含 's3:' 或 'file:',从 DFS 中读取 +] + +for _ in range(1000): + for url in urls: + client.get(url) diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/file_iterator_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/file_iterator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9c90e1b0baf4ed3ee6d5751d2430f37b5d79205f --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/file_iterator_test.py @@ -0,0 +1,40 @@ +from petrel_client.client import Client + +c = Client(conf_path='../conf/petreloss.conf') + + +data = c.get('cluster1:s3://lili1.test2/sometest') +print (data) + + +c.put('s3://lili1.test2/sometest', 'sometest') + +files = c.get_file_iterator('s3://lili1.test2/test3') +files1 = c.get_file_iterator('s3://lili1.test2') +files2 = c.get_file_iterator('cluster1:s3://lili1.test2/') + +cluster = 'cluster1' + +for path, key in files: + k = '{0}:s3://{1}'.format(cluster, path) + print (k) + #c.get(k) + +print ('='*20) + +for path, key in files: + k = '{0}:s3://{1}'.format(cluster, path) + print (k) + #c.get(k) + +print ('='*20) + +for path, key in files: + k = '{0}:s3://{1}'.format(cluster, path) + print (k) + #c.get(k) +for path, key in files1: + print (path, key) + +for path, key in files2: + print (path, key) diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/io_profile_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/io_profile_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ea8e1ede9affb227c57a4373ee560dc4299d5ecc --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/io_profile_test.py @@ -0,0 +1,63 @@ +import logging +import time +import random +from os.path import expanduser, abspath +from petrel_client.client_base import ClientBase +from petrel_client.common.io_profile import profile +from petrel_client.common.exception import ObjectNotFoundError +from petrel_client.common.config import Config +from petrel_client.common.log import init_log +from petrel_client.common.io_profile import Profiler + + +LOG = logging.getLogger(__name__) + + +class Client(ClientBase): + def __init__(self, conf_path, name, count_disp): + conf_path = abspath(expanduser(conf_path)) + config = Config(conf_path) + self._default_config = config.default() + + init_log(self._default_config) + LOG.info('init io_profile_test.Client, conf_path %s', conf_path) + + Profiler.set_default_conf(self._default_config) + super(Client, self).__init__(name=name, count_disp=count_disp) + + @profile('get') + def get(self, key): + + def not_found(): + raise ObjectNotFoundError(key) + + def error(): + raise Exception(key) + + def found(): + return 'content' + + action = random.choice([found, not_found, error]) + time.sleep(0.001) + return action() + + @profile('put') + def put(self, key, content): + def normal(): + return len(content) + + def error(): + raise Exception(key) + + action = random.choice([normal, error]) + return action() + + +c = Client(conf_path='~/petreloss.conf', name='cluster1', count_disp=50) + + +for _ in range(100): + try: + c.get('key') + except Exception: + pass diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/io_retry_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/io_retry_test.py new file mode 100644 index 0000000000000000000000000000000000000000..cf2d1c29d41e5b5d84a50ac2147aa2f296076ed7 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/io_retry_test.py @@ -0,0 +1,105 @@ +import logging +import time +import random +from os.path import expanduser, abspath + +from petrel_client.client_base import ClientBase +from petrel_client.common.io_profile import profile +from petrel_client.common.io_retry import retry +from petrel_client.common import exception +from petrel_client.common.exception import ObjectNotFoundError, ResourceNotFoundError +from petrel_client.common.config import Config +from petrel_client.common.log import init_log +from petrel_client.common.io_profile import Profiler + + +LOG = logging.getLogger(__name__) + + +class Client(ClientBase): + def __init__(self, conf_path, name, count_disp): + conf_path = abspath(expanduser(conf_path)) + config = Config(conf_path) + self._default_config = config.default() + + init_log(self._default_config) + LOG.info('init io_retry_test.Client, conf_path %s', conf_path) + + Profiler.set_default_conf(self._default_config) + super(Client, self).__init__(name=name, count_disp=count_disp) + + @profile('get') + def get(self, key): + + def not_found(): + raise ObjectNotFoundError(key) + + def resource_not_found(): + raise ResourceNotFoundError() + + def error(): + raise Exception(key) + + def found(): + return 'content' + + action = random.choice([found, not_found, resource_not_found, error]) + time.sleep(0.001) + return action() + + @profile('put') + def put(self, key, content): + def normal(): + return len(content) + + def error(): + raise Exception(key) + + action = random.choice([normal, error]) + return action() + + +class FakeMixedClient(object): + def __init__(self, client): + self.client = client + + def get(self, uri, **kwargs): + @retry('get', exceptions=(Exception,), raises=(exception.ResourceNotFoundError,), tries=3) + def do_get(self, uri, **kwargs): + try: + self.client.get(uri) + except exception.ObjectNotFoundError as err: + LOG.debug(err) + return None + except exception.ResourceNotFoundError as err: + raise + except Exception as err: + raise + + return do_get(self, uri, **kwargs) + + def put(self, uri, content, **kwargs): + @retry('put', exceptions=(Exception,), tries=3) + def do_put(self, uri, content, **kwargs): + try: + self.client.put(uri, content) + except Exception as err: + raise + + return do_put(self, uri, content, **kwargs) + + +c = Client(conf_path='~/petreloss.conf', name='cluster1', count_disp=50) +mc = FakeMixedClient(c) + +for _ in range(50): + try: + mc.get('key') + except Exception: + pass + +for _ in range(50): + try: + mc.put('key', '!@#$%'*10) + except Exception: + pass diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/multi_cluster_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/multi_cluster_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d8d7fa7708c0ee5e550a1e1b4c60a401f4cd1b31 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/multi_cluster_test.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +from petrel_client.client import Client +from multiprocessing import Process +import logging +import random + +LOG = logging.getLogger('petrel_client.test') + + +def f(conf_path, repeat): + client = Client(conf_path) + total_bytes = 0 + + for itr in range(repeat): + urls = [ + 'cluster1:s3://my-bucket/object.1', # 从 cluster1 中读取 + 'cluster2:s3://my-bucket/object.2', # 从 cluster2 中读取 + 's3://my-bucket/object.3', # 若不指定 cluster,则从配置文件中指定的 default_cluster 中读取 + 'file://tmp/xxx', # 从 DFS 中读取 + '/tmp/xxx', # 若不包含 's3:' 或 'file:',从 DFS 中读取 + ] + url = random.choice(urls) + body = client.get(url) + + if not body: + LOG.warn('can not get content from %s', url) + else: + total_bytes += len(body) + + LOG.debug('total_bytes: %s', total_bytes) + + +conf_path = '~/petreloss.conf' +repeat = 5000 +parallelism = 4 + +process_list = [Process(target=f, args=(conf_path, repeat)) + for _ in range(parallelism)] + + +[p.start() for p in process_list] +[p.join() for p in process_list] diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/multipart_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/multipart_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7ad54a59892f01b0cf34ff480df23be3390f34b1 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/multipart_test.py @@ -0,0 +1,17 @@ +# 如果上传的文件过大,则需要分片上传 +# put 传入的参数如果实现了 read 接口即可分片上传 +# 例如 open 的返回值和 io.BytesIO 均实现了 read 接口 + +# 上传大文件 +with open("large_file", "rb") as f: + client.put(url, f) + +# 上传 Tensor +with io.BytesIO() as f: + torch.save(data, f) + f.seek(0) + client.put(url, f) + +# 上传大对象 +with io.BytesIO(large_bytes) as f: + client.put(url, f) diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/multipart_upload_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/multipart_upload_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ba4d7168e8d1509239a1c0a49230a2dcb39a3f25 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/multipart_upload_test.py @@ -0,0 +1,14 @@ +from petrel_client.client import Client + +url = 's3://mybucket/100M' +client = Client() + +content, get_info = client.get_with_info( + url, enable_stream=True, enable_etag=True) +print('etag', get_info['etag']) + +put_result, put_info = client.put_with_info( + url + '.put', content, enable_md5=True, enable_etag=True) +print('put_result:', put_result) +print('md5:', put_info['md5']) +print('etag:', put_info['etag']) diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/pillow_image_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/pillow_image_test.py new file mode 100644 index 0000000000000000000000000000000000000000..949d2935399b3103c668aca96f7c2e8e43341014 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/pillow_image_test.py @@ -0,0 +1,17 @@ +from PIL import Image +import io +from petrel_client.client import Client + + +client = Client('./conf/petreloss.conf') + +# read image file from ceph +img_url = 's3://bucket1/image.png' +image_data = client.get(img_url) +image = Image.open(io.BytesIO(image_data)) + +# write image file to ceph +new_img_url = 's3://bucket1/new_image.png' +new_image_data = io.BytesIO() +image.save(new_image_data, format='PNG') +client.put(new_img_url, new_image_data.getvalue()) \ No newline at end of file diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/profile_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/profile_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf79893965bef0357bd8028f0017811b6c046ed --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/profile_test.py @@ -0,0 +1,14 @@ +from petrel_client.utils.profile.profile import profileit + + +@profileit(name='xxx',count= 500) +def test(): + import time + time.sleep(0.001) + return 1 + + +l = [test() for _ in range(2000)] +print('sum:', sum(l)) + + diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/run_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/run_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2a5af91c51e939e79659508857f523f5edf53a21 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/run_test.py @@ -0,0 +1,198 @@ +#coding:utf-8 +import os +import sys +import argparse +import copy +from datetime import datetime +import common_util +import signal + +test_dir = os.path.dirname(os.path.realpath(__file__)) + +TESTS = ['test_config', 'test_read'] + +# Tests need to be run with pytest. +USE_PYTEST_LIST = [] + +CUSTOM_HANDLERS = {} + + +def print_to_stderr(message): + print(message, file=sys.stderr) + + +def parse_test_module(test): + return test.split('.')[0] + + +class TestChoices(list): + def __init__(self, *args, **kwargs): + super(TestChoices, self).__init__(args[0]) + + def __contains__(self, item): + return list.__contains__(self, parse_test_module(item)) + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Run the Petrel unit test suite', + epilog='where TESTS is any of: {}'.format(', '.join(TESTS))) + + parser.add_argument( + '-pt', + '--pytest', + action='store_true', + help='If true, use `pytest` to execute the tests. E.g., this runs ' + 'python run_test.py -pt') + + parser.add_argument( + '-i', + '--include', + nargs='+', + choices=TestChoices(TESTS), + default=TESTS, + metavar='TESTS', + help='select a set of tests to include (defaults to ALL tests).' + ' tests are specified with module name') + + parser.add_argument('-x', + '--exclude', + nargs='+', + choices=TESTS, + metavar='TESTS', + default=[], + help='select a set of tests to exclude') + + parser.add_argument( + '--continue-through-error', + action='store_true', + help='Runs the full test suite despite one of the tests failing') + + parser.add_argument( + 'additional_unittest_args', + nargs='*', + help='additional arguments passed through to unittest, e.g., ' + 'python run_test.py -i test_config -- -s test_report.log' + 'to save test report in test_report.log') + + return parser.parse_args() + + +def exclude_tests(exclude_list, selected_tests, exclude_message=None): + for exclude_test in exclude_list: + tests_copy = selected_tests[:] + for test in tests_copy: + if test.startswith(exclude_test): + if exclude_message is not None: + print_to_stderr('Excluding {} {}'.format( + test, exclude_message)) + selected_tests.remove(test) + return selected_tests + + +def get_selected_tests(options): + selected_tests = options.include + selected_tests = exclude_tests(options.exclude, selected_tests) + return selected_tests + + +def get_executable_command(options, allow_pytest): + executable = [sys.executable] + + if options.pytest: + if allow_pytest: + executable += ['-m', 'pytest'] + else: + print_to_stderr( + 'Pytest cannot be used for this test. Falling back to unittest.' + ) + return executable + + +def run_test(test_module, + test_directory, + options, + launcher_cmd=None, + extra_unittest_args=None): + unittest_args = options.additional_unittest_args.copy() + + if extra_unittest_args: + assert isinstance(extra_unittest_args, list) + unittest_args.extend(extra_unittest_args) + + # If using pytest, replace -f with equivalent -x + if options.pytest: + unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args] + + # Can't call `python -m unittest test_*` here because it doesn't run code + # in `if __name__ == '__main__': `. So call `python test_*.py` instead. + print(test_module) + argv = [test_module + '.py'] + unittest_args + + # Extra arguments are not supported with pytest + executable = get_executable_command(options, + allow_pytest=not extra_unittest_args) + + command = (launcher_cmd or []) + executable + argv + print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now())) + return common_util.shell(command, test_directory) + + +# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python +SIGNALS_TO_NAMES_DICT = { + getattr(signal, n): n + for n in dir(signal) if n.startswith('SIG') and '_' not in n +} + + +def run_test_module(test, test_directory, options): + test_module = parse_test_module(test) + + # Printing the date here can help diagnose which tests are slow + print_to_stderr('Running {} ... [{}]'.format(test, datetime.now())) + handler = CUSTOM_HANDLERS.get(test, run_test) + return_code = handler(test_module, test_directory, options) + assert isinstance(return_code, int) and not isinstance( + return_code, bool), 'Return code should be an integer' + + if return_code == 0: + return None + + message = '{test} failed!' + if return_code < 0: + # subprocess.Popen returns the child process' exit signal as + # return code -N, where N is the signal number. + signal_name = SIGNALS_TO_NAMES_DICT[-return_code] + print(signal_name) + message += ' Received signal: ' + signal_name + return message + + +def main(): + options = parse_args() + test_directory = os.path.dirname(os.path.abspath(__file__)) + selected_tests = get_selected_tests(options) + + failure_messages = [] + has_failed = False + for test in selected_tests: + options_clone = copy.deepcopy(options) + if test in USE_PYTEST_LIST: + options_clone.pytest = True + err_message = run_test_module(test, test_directory, options_clone) + if err_message is None: + continue + has_failed = True + failure_messages.append(err_message) + if not options_clone.continue_through_error: + raise RuntimeError(err_message) + print_to_stderr(err_message) + + if options.continue_through_error and has_failed: + for err in failure_messages: + print_to_stderr(err) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/tensor_json_test.py b/petrel-sdk/petrel-oss-python-sdk/tests/tensor_json_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7eda31bbc54730ff4ec51e6f2b95a64b4b5e4ffc --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/tensor_json_test.py @@ -0,0 +1,32 @@ +import torch +import io +import json +from petrel_client.client import Client + +client = Client('./conf/petreloss.conf') + +# Pytorch save & load + +data = torch.tensor([0, 1, 2, 3]) +tensor_url = 's3://bucket1/tensor_data' + +with io.BytesIO() as f: + torch.save(data, f) + client.put(tensor_url, f.getvalue()) + +with io.BytesIO(client.get(tensor_url)) as f: + data2 = torch.load(f) + +assert torch.equal(data, data2) + + +# Json dumps & loads + +data = [0, 1, 2, 3] +json_data_url = 's3://bucket1/json_data' + +client.put(json_data_url, json.dumps(data).encode('utf-8')) + +data2 = json.loads(client.get(json_data_url)) + +assert data == data2 diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/test_config.py b/petrel-sdk/petrel-oss-python-sdk/tests/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..b113c2a965d573bc4a1146ba7bc8b97711b7285e --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/test_config.py @@ -0,0 +1,144 @@ +import logging +import os +from petrel_client.common.config import Config, CONFIG_DEFAULT, Section, _value_to_str +from petrel_client.common import exception + +import unittest +import common_util +from unittest import mock +from petrel_client.client import Client +test_dir = os.path.dirname(os.path.realpath(__file__)) + + +class TestSection(unittest.TestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + def test_valuetoStr(self): + self.assertEqual(_value_to_str(100), "100") + self.assertEqual(_value_to_str(False), "False") + expect = {'a': '2', 'b': '2', 'c': '3', 'd': '4'} + input = dict(a=2, b=2, c=3, d=4) + self.assertEqual(expect, _value_to_str(input)) + + def test_init(self): + session = Section(CONFIG_DEFAULT) + self.assertEqual(session._conf, CONFIG_DEFAULT) + self.assertTrue(isinstance(session, Section)) + + def test_key(self): + session = Section(CONFIG_DEFAULT) + self.assertEqual(session['enable_mc'], 'False') + + def test_ConfigKeyNotFoundError(self): + session = Section(CONFIG_DEFAULT) + with self.assertRaises(exception.ConfigKeyNotFoundError): + _ = session['empty'] + + def test_update(self): + session = Section(CONFIG_DEFAULT) + toUpdate = dict(enable_mc='True', file_log_backup_count=3) + session.update(toUpdate) + self.assertEqual(session['enable_mc'], 'True') + self.assertEqual(session['file_log_backup_count'], '3') + + # def testGetitem(self): + # expected = CONFIG_DEFAULT + + def test_get(self): + session = Section(CONFIG_DEFAULT) + self.assertEqual(session.get('enable_mc'), 'False') + with self.assertRaises(exception.ConfigItemNotFoundError): + _ = session.get('enable_mc1') + self.assertIsNone(session.get('enable_mc1', default=None)) + + def test_has_option(self): + session = Section(CONFIG_DEFAULT) + self.assertTrue(session.has_option('enable_mc')) + self.assertFalse(session.has_option('enable_mc1')) + + def test_get_boolean(self): + session = Section(CONFIG_DEFAULT) + self.assertFalse(session.get_boolean('enable_mc')) + with self.assertRaises(exception.ConfigKeyTypeError): + _ = session.get_boolean('endpoint_url') + + def test_get_int(self): + session = Section(CONFIG_DEFAULT) + self.assertEqual(session.get_int('file_log_backup_count'), 1) + with self.assertRaises(exception.ConfigKeyTypeError): + _ = session.get_int('enable_mc') + + def test_get_log_level(self): + session = Section(CONFIG_DEFAULT) + self.assertEqual(session.get_log_level('file_log_level'), + logging.DEBUG) + with self.assertRaises(exception.ConfigKeyTypeError): + _ = session.get_log_level('enable_mc') + + +class TestConfig(unittest.TestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + def test_init(self): + with self.assertRaises(exception.ConfigFileNotFoundError): + conf_path = test_dir + '/tests/conf/petreloss.conf1' + self.config = Config(conf_path) + + with self.assertRaises(exception.ConfigSectionNotFoundError): + conf_path = test_dir + '/conf/test_empty.conf' + self.config = Config(conf_path) + + expect_session = Section(CONFIG_DEFAULT) + toUpdate = dict(default_cluster='cluster1') + expect_session.update(toUpdate) + + conf_path = test_dir + '/conf/petreloss.conf' + config = Config(conf_path) + default_session = config.default() + self.assertTrue(default_session._conf == expect_session._conf) + + samll_case_conf_path = test_dir + '/conf/test_petreloss.conf' + samll_case_config = Config(samll_case_conf_path) + samll_case_default_session = samll_case_config.default() + self.assertTrue( + samll_case_default_session._conf == expect_session._conf) + + def test_get(self): + conf_path = test_dir + '/conf/petreloss.conf' + config = Config(conf_path) + cluster1_session = config['cluster1'] + self.assertTrue(cluster1_session.get_boolean("enable_mc")) + self.assertEqual(cluster1_session.get("access_key"), 'lili1') + + samll_case_conf_path = test_dir + '/conf/test_petreloss.conf' + samll_case_config = Config(samll_case_conf_path) + cluster1_session = samll_case_config['cluster1'] + + self.assertEqual(cluster1_session.get("default_cluster"), 'cluster1') + + with self.assertRaises(exception.ConfigSectionNotFoundError): + config["noncluster1"] + + def test_update(self): + conf_path = test_dir + '/conf/petreloss.conf' + config = Config(conf_path) + toUpdate = dict(cluster1=dict(default_cluster='cluster3')) + config.update(toUpdate) + self.assertEqual(config["cluster1"].get("default_cluster"), 'cluster3') + + def test_items(self): + conf_path = test_dir + '/conf/petreloss.conf' + config = Config(conf_path) + sections = config.items() + self.assertEqual(len(sections), 4) + +if __name__ == '__main__': + common_util.run_test() diff --git a/petrel-sdk/petrel-oss-python-sdk/tests/test_read.py b/petrel-sdk/petrel-oss-python-sdk/tests/test_read.py new file mode 100644 index 0000000000000000000000000000000000000000..836c2731746930adbd9a47e6cd1ecfc6a61ee9f9 --- /dev/null +++ b/petrel-sdk/petrel-oss-python-sdk/tests/test_read.py @@ -0,0 +1,60 @@ +import os +import unittest +import common_util +from unittest import mock +from petrel_client.client import Client +test_dir = os.path.dirname(os.path.realpath(__file__)) + + +class TestRead(unittest.TestCase): + def setUp(self): + # 实例化一个 Mock 对象,用于替换 petrel_client.ceph.s3.s3_client.S3Client.get_with_info 函数 + self._mock_get_with_info = mock.Mock() + # self._mock_get_with_info.return_value = "23", {} + self._mock_get_with_info.return_value = None, {} + # 替换 petrel_client.ceph.s3.s3_client.S3Client.get_with_info 函数 + self._patcher = mock.patch( + '', + self._mock_get_with_info) + self._patcher.start() + pass + + def tearDown(self): + self._patcher.stop() + pass + + # 1. 在setUp注入Mock方法,替换petrel_client.ceph.s3.s3_client.S3Client.get_with_info函数,并在tearDown停止Mock + def test_read1(self): + _conf_path = '/home/PJLAB/huipeng/petreloss.conf' + c = Client(conf_path=_conf_path) + # self._mock_get_with_info.return_value = "23", {} + # data = c.get('cluster1:s3://lili1.test2/sometest') + data = c.get("s3://hptestbucket/a/b/c.go") + print("data is ", data) + self.assertEqual("23", data) + + # 2.使用@mock.patch指定被替换的方法,并默认按参数列表的顺序替换该方法 + # @mock.patch("petrel_client.ceph.s3.s3_client.S3Client.get_with_info") + # #@mock.patch("...") + # def test_read2(self, mock_get_with_info): + # mock_get_with_info.return_value = "15", {} + # _conf_path = test_dir + '/conf/petreloss.conf' + # c = Client(conf_path=_conf_path) + # data = c.get('cluster1:s3://lili1.test2/sometest') + # self.assertEqual("15", data) + + # # 3.使用with mock.patch 替换掉指定模块 + # def test_read3(self): + # mock_get_with_info = mock.Mock() + # mock_get_with_info.return_value = "15", {} + # _conf_path = test_dir + '/conf/petreloss.conf' + # c = Client(conf_path=_conf_path) + # with mock.patch( + # 'petrel_client.ceph.s3.s3_client.S3Client.get_with_info', + # mock_get_with_info): + # data = c.get('cluster1:s3://lili1.test2/sometest') + # self.assertEqual("15", data) + + +if __name__ == '__main__': + common_util.run_test() \ No newline at end of file diff --git a/petrel-sdk/recompile_cpp_sdk.sh b/petrel-sdk/recompile_cpp_sdk.sh new file mode 100644 index 0000000000000000000000000000000000000000..acc482807843e87d5b01eb45f56d90d7821c439e --- /dev/null +++ b/petrel-sdk/recompile_cpp_sdk.sh @@ -0,0 +1,6 @@ +cd petrel-oss-cpp-sdk +python setup.py build_ext --inplace +cd ../petrel-oss-python-sdk +python linkcpp.py +python setup.py sdist +pip install --user dist/* diff --git a/petrel_client/__init__.py b/petrel_client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel_client/cache/__init__.py b/petrel_client/cache/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel_client/cache/cache.py b/petrel_client/cache/cache.py new file mode 100644 index 0000000000000000000000000000000000000000..558fc6547319e7a82009cdeb7beedc169d1c3592 --- /dev/null +++ b/petrel_client/cache/cache.py @@ -0,0 +1,53 @@ +import logging +import socket +import re +import sys +from petrel_client.client_base import ClientBase +from petrel_client.common.exception import InvalidMcUriError + +LOG = logging.getLogger(__name__) + +_MC_URI_PATTERN = re.compile(r'^mc://(.+)') + +import_map = {'memcached': 'petrel_client.cache.mc.mc.MC'} + + +class Cache(ClientBase): + @staticmethod + def parse_uri(uri): + m = _MC_URI_PATTERN.match(uri) + if m: + return m.group(1) + else: + raise InvalidMcUriError(uri) + + @staticmethod + def get_engine_cls(engine_type): + import_name = import_map.get(engine_type) + module_name, callable_name = import_name.rsplit('.', 1) + __import__(module_name) + module = sys.modules[module_name] + return getattr(module, callable_name) + + @staticmethod + def create(conf, *args, **kwargs): + fake = conf.get_boolean('fake') + if fake: + from petrel_client.fake_client import FakeClient + name = f'MC: {socket.gethostname()}' + return FakeClient(client_type='mc', conf=conf, name=name, **kwargs) + + engine_type = conf.get('cache_engine', 'memcached') + try: + engine_cls = Cache.get_engine_cls(engine_type) + instance = engine_cls(conf, *args, **kwargs) + if not hasattr(instance, 'log'): + setattr(instance, 'log', LOG) + return instance + except Exception as err: + LOG.warn('can not init cache client') + LOG.exception(err) + return None + + def __init__(self, *args, **kwargs): + super(Cache, self).__init__(*args, **kwargs) diff --git a/petrel_client/cache/mc/__init__.py b/petrel_client/cache/mc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel_client/cache/mc/mc.py b/petrel_client/cache/mc/mc.py new file mode 100644 index 0000000000000000000000000000000000000000..47638cca8bc2552557bd10bd4c7a5efa5652f769 --- /dev/null +++ b/petrel_client/cache/mc/mc.py @@ -0,0 +1,116 @@ +import functools +import logging +from functools import partial +from collections import defaultdict + +from petrel_client.cache.cache import Cache +from petrel_client.common.io_profile import profile +from petrel_client.common import exception +from petrel_client.cache.mc.petrel_pymc import McClient +from petrel_client.common import hash + +LOG = logging.getLogger(__name__) + +_STATUS_SUCCESS = 'SUCCESS' +_STATUS_NOT_FOUND = 'NOT FOUND' + +_MAX_KEY_SIZE = 250 +_ITEM_SIZE_RESERVED = 128 + +_EXCEPTION_MAP = defaultdict(lambda: exception.McClientError, { + 'A TIMEOUT OCCURRED': exception.McTimeoutOccur, + 'CONNECTION FAILURE': exception.McConnFailed, + 'FAILURE': exception.McServerDisable, + 'CLIENT ERROR': exception.McServerDisable, + 'SERVER ERROR': exception.McServerDisable, + 'ERROR was returned by server': exception.McServerDisable, + 'SYSTEM ERROR': exception.McServerFailed, + 'A KEY LENGTH OF ZERO WAS PROVIDED': exception.McBadKeyProvided, + 'A BAD KEY WAS PROVIDED/CHARACTERS OUT OF RANGE': exception.McBadKeyProvided, + 'SERVER IS MARKED DEAD': exception.McServerDead, + 'ITEM TOO BIG': exception.McObjectSizeExceed, + 'SERVER HAS FAILED AND IS DISABLED UNTIL TIMED RETRY': exception.McServerFailed, +}) + + +def wrap_io(fn): + @functools.wraps(fn) + def new_fn(self, key, *args, **kwargs): + if self.mc_key_cb: + key = self.mc_key_cb(key) + self.check_key_size(key) + + value, status = fn(self, key, *args, **kwargs) + + if status == _STATUS_SUCCESS: + return value + elif status == _STATUS_NOT_FOUND: + raise exception.McObjectNotFoundError(key) + else: + server, _ = self._mc.get_server(key) + raise _EXCEPTION_MAP[status](key, status, server) + + return new_fn + + +class MC(Cache): + def __init__(self, conf, *args, **kwargs): + mc_server_list_path = conf['mc_server_list_path'] + mc_client_config_path = conf['mc_client_config_path'] + debug_mc = conf.get_boolean('debug_mc') + if debug_mc: + LOG.setLevel(logging.DEBUG) + else: + LOG.setLevel(logging.WARNING) + + self.log = LOG + LOG.debug('init MC, server list path: %s, client config path: %s', + mc_server_list_path, mc_client_config_path) + super(MC, self).__init__(*args, conf=conf, **kwargs) + + self._mc = McClient.GetInstance( + mc_server_list_path, mc_client_config_path) + self._max_item_size = self._mc.max_item_size() - _MAX_KEY_SIZE - \ + _ITEM_SIZE_RESERVED + self._max_key_size = _MAX_KEY_SIZE + + mc_key_cb = kwargs.get('mc_key_cb', None) or conf.get('mc_key_cb') + if mc_key_cb == 'identity': + self.mc_key_cb = None + elif isinstance(mc_key_cb, str): + hash_fn = hash.get_hash_fn(mc_key_cb) + self.mc_key_cb = partial(hash.hexdigest, hash_fn=hash_fn) + LOG.debug('mc: using mc_key_cb %s', mc_key_cb) + elif not callable(mc_key_cb): + raise Exception("argument 'mc_key_cb' should be callable.") + else: + self.mc_key_cb = mc_key_cb + LOG.debug('mc: using user defined mc_key_cb') + + def check_key_size(self, key): + if isinstance(key, str): + key_len = len(key.encode('utf-8')) + elif isinstance(key, bytes): + key_len = len(key) + else: + raise Exception( + 'mc key type is not supported: {}, value: {}'.format(type(key), key)) + + if key_len > self._max_key_size: + raise exception.McKeySizeExceed( + 'size of key must <= {}'.format(self._max_key_size), key) + + @profile('get') + @wrap_io + def get(self, key, **kwargs): + return self._mc.get(key) + + @profile('put') + @wrap_io + def put(self, key, content, **kwargs): + size = len(content) + if size > self._max_item_size: + raise exception.McObjectSizeExceed( + key, 'size of object must <= {}, actual size: {}'.format(self._max_item_size, size)) + status = self._mc.set(key, content) + return size, status diff --git a/petrel_client/cache/mc/petrel_pymc.so b/petrel_client/cache/mc/petrel_pymc.so new file mode 100644 index 0000000000000000000000000000000000000000..18b04af64be592c2087d47db273efe0c098a8487 --- /dev/null +++ b/petrel_client/cache/mc/petrel_pymc.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a7064faa9f52205d9263333016c32f44bfbc5c43db5a5e32776d0f6a6659ec1 +size 1412352 diff --git a/petrel_client/ceph/__init__.py b/petrel_client/ceph/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel_client/ceph/ceph.py b/petrel_client/ceph/ceph.py new file mode 100644 index 0000000000000000000000000000000000000000..6a6bec260d990fb301b53889625d249b7cf91d98 --- /dev/null +++ b/petrel_client/ceph/ceph.py @@ -0,0 +1,76 @@ +# -*- coding: utf-8 -*- + + +import re +from petrel_client.common.exception import InvalidClusterNameError, InvalidS3UriError, NoDefaultClusterNameError +from petrel_client.client_base import ClientBase + + +# (?:...) +# A non-capturing version of regular parentheses. Matches whatever regular expression is inside the parentheses, but the substring matched by the group cannot be retrieved after performing a match or referenced later in the pattern. + +# *?, +?, ?? +# The '*', '+', and '?' qualifiers are all greedy; they match as much text as possible. Sometimes this behaviour isn’t desired; if the RE <.*> is matched against b , it will match the entire string, and not just . Adding ? after the qualifier makes it perform the match in non-greedy or minimal fashion; as few characters as possible will be matched. Using the RE <.*?> will match only . + +# re.I +# re.IGNORECASE +# Perform case-insensitive matching; expressions like [A-Z] will match lowercase letters, too. This is not affected by the current locale. To get this effect on non-ASCII Unicode characters such as ü and Ü, add the UNICODE flag. +# _S3_URI_PATTERN = re.compile(r'^(?:([^:]+):)?s3://([^/]+)/(.+?)/?$', re.I) + +_S3_URI_PATTERN = re.compile( + r'^(?:(?P[^:]+):)?s3://(?P[^/]+)/?(?P(?:.+?)/?$)?', re.I) + + +class Ceph(ClientBase): + + @staticmethod + def parse_uri(uri, ceph_dict, default_cluster=None): + m = _S3_URI_PATTERN.match(uri) + if m: + cluster, bucket, key = m.group( + 'cluster'), m.group('bucket'), m.group('key') + cluster = cluster or default_cluster + if not cluster: + raise NoDefaultClusterNameError(uri) + + try: + client = ceph_dict[cluster] + enable_cache = client.enable_cache() + return cluster, bucket, key, enable_cache + except KeyError: + raise InvalidClusterNameError(cluster) + else: + raise InvalidS3UriError(uri) + + @staticmethod + def create(cluster, conf, *args, **kwargs): + fake = conf.get_boolean('fake') + enable_s3_cpp = conf.get('boto').lower() in ('cpp', 'c++') + enable_boto = (not enable_s3_cpp) and conf.get_boolean('boto') + anonymous_access = (conf.get('access_key', None) is None) and ( + conf.get('secret_key', None) is None) + + if fake: + from petrel_client.fake_client import FakeClient + name = f'S3: {cluster}' + return FakeClient(client_type='s3', conf=conf, name=name, **kwargs) + elif enable_s3_cpp: + from petrel_client.ceph.s3cpp.s3_cpp_client import S3CppClient + return S3CppClient(cluster, conf, anonymous_access, *args, **kwargs) + elif enable_boto: + from petrel_client.ceph.s3.s3_client import S3Client + return S3Client(cluster, conf, anonymous_access, *args, **kwargs) + else: + from petrel_client.ceph.librgw.rgw_client import RGWClient + return RGWClient(cluster, conf, *args, **kwargs) + + def __init__(self, cluster, conf, *args, **kwargs): + super(Ceph, self).__init__(*args, name=cluster, conf=conf, **kwargs) + self.__enable_cache = conf.get_boolean( + 'enable_mc', False) or conf.get_boolean('enable_cache', False) + + def enable_cache(self): + # 使用 __ 前缀使得 __enable_cache 变量成为私有变量 + # https://docs.python.org/3/tutorial/classes.html#private-variables + # 将 enable_cache 类型定义为方法是为了以后可以动态计算是否需要做 cache + return self.__enable_cache diff --git a/petrel_client/ceph/librgw/__init__.py b/petrel_client/ceph/librgw/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel_client/ceph/librgw/rados.cpython-36m-x86_64-linux-gnu.so b/petrel_client/ceph/librgw/rados.cpython-36m-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4724b53841375da816a84a69ba1700e5e8377770 --- /dev/null +++ b/petrel_client/ceph/librgw/rados.cpython-36m-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2982b57dc9c62e95900e13745c8722985f6837ffc8f1ade5a134d523e2949860 +size 1085000 diff --git a/petrel_client/ceph/librgw/rados.so b/petrel_client/ceph/librgw/rados.so new file mode 100644 index 0000000000000000000000000000000000000000..5e0052836ebcdaf715acccf8ef1c53b77ac5d9f2 Binary files /dev/null and b/petrel_client/ceph/librgw/rados.so differ diff --git a/petrel_client/ceph/librgw/rgw.cpython-36m-x86_64-linux-gnu.so b/petrel_client/ceph/librgw/rgw.cpython-36m-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a61bd342f352c06e95a4b36e4fec302e0b8aff75 --- /dev/null +++ b/petrel_client/ceph/librgw/rgw.cpython-36m-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:460acd1fe6301bb517a98224b69330e885b4f6ccb351963f3121ad82c9e9561c +size 1427450 diff --git a/petrel_client/ceph/librgw/rgw.so b/petrel_client/ceph/librgw/rgw.so new file mode 100644 index 0000000000000000000000000000000000000000..c4e774b3306ae39e44898194c9e282b9f6ddeff2 Binary files /dev/null and b/petrel_client/ceph/librgw/rgw.so differ diff --git a/petrel_client/ceph/librgw/rgw_client.py b/petrel_client/ceph/librgw/rgw_client.py new file mode 100644 index 0000000000000000000000000000000000000000..c9f2df36f9ab1b62aac833efa3d629bf2b8f5fa2 --- /dev/null +++ b/petrel_client/ceph/librgw/rgw_client.py @@ -0,0 +1,74 @@ +import logging + +from petrel_client.ceph.ceph import Ceph +from petrel_client.common.io_profile import profile +from petrel_client.common.exception import ObjectNotFoundError +from petrel_client.ceph.librgw import rgw + + +LOG = logging.getLogger(__name__) + + +class RGWClient(Ceph): + + kb = 1024 + mb = 1024 * kb + + def __init__(self, cluster, conf, *args, **kwargs): + LOG.debug('init RGWClient(%s)', cluster) + super(RGWClient, self).__init__(cluster, conf, *args, **kwargs) + conn_args = { + 'conf': conf['conf'], + 'keyring': conf['keyring'], + 'name': conf['name'], + 'cluster': conf['cluster'], + } + uid = conf.get('uid', 'user_id') + self.bucket_fs = {} + self._init_librgw(uid, conf['access_key'], + conf['secret_key'], conn_args) + + def _init_librgw(self, uid=None, key=None, secret=None, connection_kwargs=None): + try: + self.client = rgw.LibRGWFS(uid, key, secret, **connection_kwargs) + self.root_fs = self.client.mount() + LOG.debug("The connection bulid successfully.") + except Exception as e: + LOG.error("The input parameters is invalid. %s", e) + raise Exception("The input parameters is invalid.", e) + + @profile('get') + def get(self, cluster, bucket, key, file_size=4 * mb, **kwargs): + try: + # destination_base_uri = S3Uri(filename) + # bucket = destination_base_uri.bucket() + # key = destination_base_uri.object() + bucket_fs = self.bucket_fs.get(bucket) + if not bucket_fs: + bucket_fs = self.client.opendir(self.root_fs, bucket) + self.bucket_fs[bucket] = bucket_fs + file_fs = self.client.open(bucket_fs, key) + value = self.client.read(file_fs, 0, file_size) + self.client.close(file_fs) + self.client.close(bucket_fs) + # log.debug('filename is: {}'.format(key)) + # log.debug('value size is: {} kB'.format(len(value) / self.kb)) + return value + except rgw.ObjectNotFound as err: + raise ObjectNotFoundError(err) + + def put(self, cluster, bucket, key, body, **kwargs): + # destination_base_uri = S3Uri(filename) + # bucket = destination_base_uri.bucket() + # key = destination_base_uri.object() + bucket_fs = self.client.opendir(self.root_fs, bucket) + try: + file_fs = self.client.create(bucket_fs, key) + except rgw.ObjectExists: + file_fs = self.client.open(bucket_fs, key) + self.client.write(file_fs, 0, body) + self.client.close(file_fs) + self.client.close(bucket_fs) + # log.debug('filename is: {}'.format(key)) + # log.debug('value size is: {} kB'.format(len(body) / self.kb)) + return True diff --git a/petrel_client/ceph/s3/__init__.py b/petrel_client/ceph/s3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel_client/ceph/s3/generator.py b/petrel_client/ceph/s3/generator.py new file mode 100644 index 0000000000000000000000000000000000000000..77bd964423569a1a60437e0d127ae18cac40540d --- /dev/null +++ b/petrel_client/ceph/s3/generator.py @@ -0,0 +1,99 @@ +import re + + +_S3_ACCESSPOINT_TO_BUCKET_KEY_REGEX = re.compile( + r'^(?Parn:(aws).*:s3:[a-z\-0-9]+:[0-9]{12}:accesspoint[:/][^/]+)/?' + r'(?P.*)$' +) + + +def find_bucket_key(s3_path): + """ + This is a helper function that given an s3 path such that the path is of + the form: bucket/key + It will return the bucket and the key represented by the s3 path + """ + match = _S3_ACCESSPOINT_TO_BUCKET_KEY_REGEX.match(s3_path) + if match: + return match.group('bucket'), match.group('key') + s3_components = s3_path.split('/', 1) + bucket = s3_components[0] + s3_key = '' + if len(s3_components) > 1: + s3_key = s3_components[1] + return bucket, s3_key + + +class BucketLister(object): + """List keys in a bucket.""" + + def __init__(self, client): + self._client = client + + def list_objects(self, bucket, prefix=None, page_size=1000, max_items=None): + kwargs = {'Bucket': bucket, + 'PaginationConfig': {'PageSize': page_size, + 'MaxItems ': max_items}} + if prefix is not None: + kwargs['Prefix'] = prefix + +# paginator = self._client.get_paginator('list_objects_v2') + paginator = self._client.get_paginator('list_objects') + pages = paginator.paginate(**kwargs) + for page in pages: + contents = page.get('Contents', []) + for content in contents: + source_path = bucket + '/' + content['Key'] + yield source_path, content + + +class FileGenerator(object): + """ + This is a class the creates a generator to yield files based on information + returned from the ``FileFormat`` class. It is universal in the sense that + it will handle s3 files, local files, local directories, and s3 objects + under the same common prefix. The generator yields corresponding + ``FileInfo`` objects to send to a ``Comparator`` or ``S3Handler``. + """ + + def __init__(self, client, page_size=None): + self._client = client + self.page_size = page_size + self.request_parameters = {} + + def __call__(self, path): + + file_iterator = self.list_objects(path) + yield from file_iterator + + def list_objects(self, s3_path): + """ + This function yields the appropriate object or objects under a + common prefix depending if the operation is on objects under a + common prefix. It yields the file's source path, size, and last + update. + """ + if s3_path.startswith('s3://'): + s3_path = s3_path[5:] + bucket, prefix = find_bucket_key(s3_path) + lister = BucketLister(self._client) + for key in lister.list_objects(bucket=bucket, prefix=prefix, + page_size=self.page_size): + source_path, response_data = key + if response_data['Size'] == 0 and source_path.endswith('/'): + pass + else: + yield source_path, response_data + + +class FileIterator(object): + + def __init__(self, client, path, page_size=None): + self._client = client + self.path = path + self.page_size = page_size + self.request_parameters = {} + + def __iter__(self): + generator = FileGenerator(self._client, self.page_size) + return generator(self.path) diff --git a/petrel_client/ceph/s3/s3_client.py b/petrel_client/ceph/s3/s3_client.py new file mode 100644 index 0000000000000000000000000000000000000000..4742bcecf341642ddb95771c585901cbfdf5e048 --- /dev/null +++ b/petrel_client/ceph/s3/s3_client.py @@ -0,0 +1,229 @@ +# -*- coding: utf-8 -*- + +import logging +import hashlib + +import boto3 +from botocore.exceptions import ClientError as BotoClientError +from botocore.client import Config +from botocore import UNSIGNED + +from petrel_client.ceph.ceph import Ceph +from petrel_client.common.io_profile import profile +from petrel_client.common.exception import NoSuchBucketError, NoSuchKeyError, S3ClientError, AccessDeniedError +from .generator import FileIterator + +LOG = logging.getLogger(__name__) + + +class S3Client(Ceph): + + def __init__(self, cluster, conf, anonymous_access, *args, **kwargs): + if anonymous_access: + s3_args = { + 'config': Config(signature_version=UNSIGNED) + } + else: + s3_args = { + 'aws_access_key_id': conf['access_key'], + 'aws_secret_access_key': conf['secret_key'] + } + + s3_args['endpoint_url'] = conf['endpoint_url'] + s3_args['verify'] = conf.get_boolean('verify_ssl', False) + + super(S3Client, self).__init__(cluster, conf, *args, **kwargs) + self._cluster = cluster + self._conf = conf + self._session = boto3.session.Session() + self._s3_resource = self._session.resource( + 's3', + **s3_args + ) + + @profile('get') + def get_with_info(self, cluster, bucket, key, **kwargs): + enable_etag = kwargs.get('enable_etag', False) + enable_stream = kwargs.get('enable_stream', False) + info = {} + assert self._cluster == cluster + try: + obj = self._s3_resource.Object(bucket, key).get() + content = obj['Body'] + if not enable_stream: + content = content.read() + if enable_etag: + info['etag'] = obj['ETag'].strip('"') + return content, info + except BotoClientError as err: + if type(err).__name__ == 'NoSuchKey': + # 这里的 err 的类型是 botocore.errorfactory.NoSuchKey 或 NoSuchBucket + # 但是该类型是通过 + # type(exception_name, (ClientError,), {}) // botocore.errorfactory.py:83 + # 运行时构造的,目前的办法只能通过其基类 ClientError 来捕捉 + raise NoSuchKeyError(cluster, bucket, key) + elif type(err).__name__ == 'NoSuchBucket': + raise NoSuchBucketError(cluster, bucket) + elif err.response['ResponseMetadata']['HTTPStatusCode'] == 403: + raise AccessDeniedError(err) + else: + raise S3ClientError(err) + + def create_bucket(self, bucket): + return self._s3_resource.create_bucket(Bucket=bucket) + + def isdir(self, bucket, key): + itr = self.list(bucket, key) + try: + next(itr) + return True + except StopIteration: + return False + + def list(self, bucket, key, page_size=None): + if key is None: + key = '' + elif key and not key.endswith('/'): + key = key + '/' + + client = self._s3_resource.meta.client + paginator = client.get_paginator('list_objects') + paging_args = { + 'Bucket': bucket, 'Prefix': key, 'Delimiter': '/', + 'PaginationConfig': {'PageSize': page_size} + } + itr = paginator.paginate(**paging_args) + + for response_data in itr: + common_prefixes = response_data.get('CommonPrefixes', []) + contents = response_data.get('Contents', []) + + for common_prefix in common_prefixes: + prefix_components = common_prefix['Prefix'].split('/') + prefix = prefix_components[-2] + yield prefix + '/' + + for content in contents: + filename_components = content['Key'].split('/') + filename = filename_components[-1] + yield filename + + def get_file_iterator(self, bucket, key): + client = self._s3_resource.meta.client + path = 's3://{0}'.format(bucket) + if key: + path = path + '/' + key + file_iterator = FileIterator(client, path) + return file_iterator + + @profile('put') + def put_with_info(self, cluster, bucket, key, body, **kwargs): + if isinstance(body, (bytes, bytearray)): + result, info = self.put_bytes(cluster, bucket, key, body, **kwargs) + elif hasattr(body, 'read'): + result, info = self.multipart_upload( + cluster, bucket, key, body, **kwargs) + else: + raise TypeError( + f'{type(self)} does not support content type {type(body)}') + + if kwargs.get('enable_etag', False): + info['etag'] = result.e_tag.strip('"') + + return result, info + + def put_bytes(self, cluster, bucket, key, body, **kwargs): + assert self._cluster == cluster + enable_md5 = kwargs.get('enable_md5', False) + info = {} + try: + obj = self._s3_resource.Object(bucket, key) + obj.put(Body=body) + if enable_md5: + info['md5'] = hashlib.md5(body).hexdigest() + return obj, info + except BotoClientError as err: + if err.response['ResponseMetadata']['HTTPStatusCode'] == 403: + raise AccessDeniedError(err) + else: + raise S3ClientError(err) + + def multipart_upload(self, cluster, bucket, key, stream, chunk_size=1024 * 1024 * 1024 * 2, **kwargs): + assert self._cluster == cluster + info = {} + obj = self._s3_resource.Object(bucket, key) + multipart = obj.initiate_multipart_upload() + part_id = 0 + parts = [] + total_size = 0 + + enable_md5 = kwargs.get('enable_md5', False) + if enable_md5: + md5 = hashlib.md5() + + while True: + chunk = stream.read(chunk_size) + actual_size = len(chunk) + if actual_size == 0: + break + part_id += 1 + total_size += actual_size + part = multipart.Part(part_id) + response = part.upload(Body=chunk) + parts.append({ + "PartNumber": part_id, + "ETag": response["ETag"] + }) + if enable_md5: + md5.update(chunk) + + part_info = { + 'Parts': parts + } + result = multipart.complete(MultipartUpload=part_info) + if enable_md5: + info['md5'] = md5.hexdigest() + return result, info + + def contains(self, cluster, bucket, key): + assert self._cluster == cluster + try: + self._s3_resource.Object(bucket, key).load() + return True + except BotoClientError as err: + if err.response['ResponseMetadata']['HTTPStatusCode'] == 404: + return False + elif err.response['ResponseMetadata']['HTTPStatusCode'] == 403: + raise AccessDeniedError(err) + else: + raise S3ClientError(err) + + def delete(self, cluster, bucket, key, **kwargs): + assert self._cluster == cluster + try: + return self._s3_resource.Object(bucket, key).delete() + except BotoClientError as err: + if type(err).__name__ == 'NoSuchKey': + raise NoSuchKeyError(cluster, bucket, key) + elif type(err).__name__ == 'NoSuchBucket': + raise NoSuchBucketError(cluster, bucket) + elif err.response['ResponseMetadata']['HTTPStatusCode'] == 403: + raise AccessDeniedError(err) + else: + raise S3ClientError(err) + + def generate_presigned_url(self, cluster, bucket, key, client_method, expires_in): + assert self._cluster == cluster + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.generate_presigned_url + return self._s3_resource.meta.client.generate_presigned_url( + client_method, + {'Bucket': bucket, 'Key': key}, + expires_in + ) + + def generate_presigned_post(self, cluster, bucket, key, fields=None, conditions=None, expires_in=3600): + assert self._cluster == cluster + # https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.generate_presigned_post + return self._s3_resource.meta.client.generate_presigned_post( + bucket, key, fields, conditions, expires_in + ) diff --git a/petrel_client/ceph/s3cpp/__init__.py b/petrel_client/ceph/s3cpp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1f53a4ba1af8369fc771656bfe19af8963578b6d --- /dev/null +++ b/petrel_client/ceph/s3cpp/__init__.py @@ -0,0 +1,17 @@ +from os import path +from ctypes import cdll + +root_path = path.dirname(__file__) +libs_path = path.join(root_path, 'libs') + +libs = [ + 'libaws-c-common.so', + 'libaws-checksums.so', + 'libaws-c-event-stream.so', + 'libaws-cpp-sdk-core.so', + 'libaws-cpp-sdk-s3.so' +] + +for lib in libs: + lib_path = path.join(libs_path, lib) + cdll.LoadLibrary(lib_path) diff --git a/petrel_client/ceph/s3cpp/libs/libaws-c-common.so b/petrel_client/ceph/s3cpp/libs/libaws-c-common.so new file mode 100644 index 0000000000000000000000000000000000000000..d812f170ab4d58a00c625dc24c14f8dbd5cce8c9 Binary files /dev/null and b/petrel_client/ceph/s3cpp/libs/libaws-c-common.so differ diff --git a/petrel_client/ceph/s3cpp/libs/libaws-c-event-stream.so b/petrel_client/ceph/s3cpp/libs/libaws-c-event-stream.so new file mode 100644 index 0000000000000000000000000000000000000000..df63d1b5b83288d3ff2582dc290e7f78b6a9b7e2 Binary files /dev/null and b/petrel_client/ceph/s3cpp/libs/libaws-c-event-stream.so differ diff --git a/petrel_client/ceph/s3cpp/libs/libaws-checksums.so b/petrel_client/ceph/s3cpp/libs/libaws-checksums.so new file mode 100644 index 0000000000000000000000000000000000000000..5c193d836ed2b0e89d0d30cdd7dab3caf19f57e5 Binary files /dev/null and b/petrel_client/ceph/s3cpp/libs/libaws-checksums.so differ diff --git a/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-core.so b/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-core.so new file mode 100644 index 0000000000000000000000000000000000000000..044df492d8ba4a0c1c9c5754a6ea6eac6250af58 --- /dev/null +++ b/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-core.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb6e4abdb6ac735fb844dc477008f70853feed6d3017fbaf4041f165e28bc0c1 +size 1455760 diff --git a/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-s3.so b/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-s3.so new file mode 100644 index 0000000000000000000000000000000000000000..a01263906bab86f61d7110b4846682e823e39c80 --- /dev/null +++ b/petrel_client/ceph/s3cpp/libs/libaws-cpp-sdk-s3.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ea28764432d32c25fba6111bc529184ff9187ae7a42f9baab56e4d39f414ca5 +size 4017984 diff --git a/petrel_client/ceph/s3cpp/pys3client.cpython-36m-x86_64-linux-gnu.so b/petrel_client/ceph/s3cpp/pys3client.cpython-36m-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..2869d9fabdd34f815e43ce0cd78bae99a4f4e234 --- /dev/null +++ b/petrel_client/ceph/s3cpp/pys3client.cpython-36m-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7feaca324b70bc453eddc65f9299949a71efde4d6e8de4f369dbe6465a5a84c +size 1838544 diff --git a/petrel_client/ceph/s3cpp/pys3client.cpython-37m-x86_64-linux-gnu.so b/petrel_client/ceph/s3cpp/pys3client.cpython-37m-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..a0dd691be4b0f0f205b7ca4b799b26826602218a --- /dev/null +++ b/petrel_client/ceph/s3cpp/pys3client.cpython-37m-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:db05941e6dd6385fec3f7d8d8d501ded63e255bc5864898ba6dc8f9457655941 +size 2001408 diff --git a/petrel_client/ceph/s3cpp/pys3client.cpython-38-x86_64-linux-gnu.so b/petrel_client/ceph/s3cpp/pys3client.cpython-38-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..eb709af4f6d812220056affed507f855dea227d0 --- /dev/null +++ b/petrel_client/ceph/s3cpp/pys3client.cpython-38-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2b9c18aa97e7eebf05eee0055e96b786a1e3c74b3d10e2912270e09f611777ae +size 1898064 diff --git a/petrel_client/ceph/s3cpp/s3_cpp_client.py b/petrel_client/ceph/s3cpp/s3_cpp_client.py new file mode 100644 index 0000000000000000000000000000000000000000..f25b3861decb8da233d32db1c75da8cee79124c2 --- /dev/null +++ b/petrel_client/ceph/s3cpp/s3_cpp_client.py @@ -0,0 +1,129 @@ +import functools +from urllib.parse import urlparse +import logging +import hashlib + +from petrel_client.common.io_profile import profile +from petrel_client.ceph.s3cpp.pys3client import PyS3Client, S3Error, init_api, shutdown_api +from petrel_client.ceph.ceph import Ceph +from petrel_client.common import exception + +LOG = logging.getLogger(__name__) + +EXCEPTION_MAP = { + 'ACCESS_DENIED': exception.AccessDeniedError, + 'NO_SUCH_BUCKET': exception.NoSuchBucketError, + 'NO_SUCH_KEY': exception.NoSuchKeyError, + 'RESOURCE_NOT_FOUND': exception.ResourceNotFoundError, + 'SIGNATURE_DOES_NOT_MATCH': exception.SignatureNotMatchError, + 'INVALID_ACCESS_KEY_ID': exception.InvalidAccessKeyError, + 'NETWORK_CONNECTION': exception.NetworkConnectionError, +} + +S3_CPP_ENV = None + + +class S3CppEnv(object): + def __init__(self, log_level): + LOG.debug('S3CppEnv init') + init_api(log_level) + + def __del__(self): + # LOG.debug('S3CppEnv del') del 阶段log抛异常 + shutdown_api() + + +def get_s3_cpp_env(log_level): + global S3_CPP_ENV + if S3_CPP_ENV is None: + S3_CPP_ENV = S3CppEnv(log_level) + return S3_CPP_ENV + + +def wrap_error(fn): + @functools.wraps(fn) + def new_fn(self, cluster, bucket, key, *args, **kwargs): + try: + return fn(self, cluster, bucket, key, *args, **kwargs) + except S3Error as err: + err_type = EXCEPTION_MAP.get(err.error_name, None) + if err_type: + new_err = err_type(cluster, bucket, key) + elif err.error_message: + new_err = exception.S3ClientError( + err.error_name, err.error_message) + else: + new_err = exception.S3ClientError(err.error_name) + + new_err.__traceback__ = err.__traceback__ + raise new_err from None + + return new_fn + + +class S3CppClient(Ceph): + def __init__(self, cluster, conf, anonymous_access, *args, **kwargs): + # 如果初始化出现异常,将会调用 __del__ ,这里先赋值避免 __del__ 出现逻辑错误 + self._client = None + self._env = None + + endpoint_url = conf['endpoint_url'] + if '://' not in endpoint_url: + endpoint_url = 'http://' + endpoint_url + parse_result = urlparse(endpoint_url) + s3_args = { + # AWS CPP SDK 中 ak 和 sk 为空时表示匿名访问 + 'ak': b'' if anonymous_access else conf['access_key'].encode('utf-8'), + 'sk': b'' if anonymous_access else conf['secret_key'].encode('utf-8'), + + 'endpoint': parse_result.netloc.encode('utf-8'), + 'enable_https': parse_result.scheme == 'https', + 'verify_ssl': conf.get_boolean('verify_ssl', False), + 'use_dual_stack': False, + } + + super(S3CppClient, self).__init__(cluster, conf, *args, **kwargs) + self._cluster = cluster + self._conf = conf + + s3_cpp_log_level = conf.get('s3_cpp_log_level') + self._env = get_s3_cpp_env(s3_cpp_log_level) + self._client = PyS3Client(**s3_args) + + def __del__(self): + del self._client + del self._env + + @profile('get') + @wrap_error + def get_with_info(self, cluster, bucket, key, **kwargs): + info = {} + + unsupported_ops = [k for k, v in kwargs.items() if k in ( + 'enable_stream', 'enable_etag') and v] + if unsupported_ops: + raise NotImplementedError(unsupported_ops) + + if isinstance(bucket, str): + bucket = bucket.encode('utf-8') + if isinstance(key, str): + key = key.encode('utf-8') + data = self._client.get_object(bucket, key) + + enable_md5 = kwargs.get('enable_md5', False) + if enable_md5: + info['md5'] = hashlib.md5(data).hexdigest() + + return data, info + + @profile('put') + @wrap_error + def put_with_info(self, cluster, bucket, key, body, **kwargs): + info = {} # todo + if not isinstance(body, (bytes, bytearray)): + raise NotImplementedError(f'unsupported type f{type(body)}') + if isinstance(bucket, str): + bucket = bucket.encode('utf-8') + if isinstance(key, str): + key = key.encode('utf-8') + return self._client.put_object(bucket, key, body), info diff --git a/petrel_client/client.py b/petrel_client/client.py new file mode 100644 index 0000000000000000000000000000000000000000..4b9ec3572db9ab1350201d749ceb372ec7fb330c --- /dev/null +++ b/petrel_client/client.py @@ -0,0 +1,96 @@ +# -*- coding: utf-8 -*- + +import threading +import logging +import functools +import os + +from petrel_client.mixed_client import MixedClient + +LOG = logging.getLogger(__name__) +thread_local_client = threading.local() + +DEFAULT_CONF_PATH = '~/petreloss.conf' + + +class Client(object): + + def __init__(self, conf_path=None, *args, **kwargs): + self._conf_path = conf_path or DEFAULT_CONF_PATH + self.kwargs = kwargs + + # 用户在调用 Client() 就实例化 GenericClient + # 如果该 GenericClient 实例化失败就抛异常 + # 避免在 put/get 的时候才开始抛异常 + # 此外,如果用户使用log,multiprocessing-logging 需要在Client创建的进程中初始化 + self._get_local_client() + + def _get_local_client(self): + current_pid = os.getpid() + client, client_pid = getattr( + thread_local_client, + self._conf_path, + (None, None) + ) + if current_pid != client_pid: + client = MixedClient(self._conf_path, **self.kwargs) + setattr( + thread_local_client, + self._conf_path, + (client, current_pid) + ) + return client + + def get_with_info(self, uri, **kwargs): + return self._get_local_client().get_with_info(uri, **kwargs) + + def get(self, *args, **kwargs): + data, _ = self.get_with_info(*args, **kwargs) + return data + + def list(self, *args, **kwargs): + client = self._get_local_client() + return client.list(*args, **kwargs) + + def isdir(self, uri): + client = self._get_local_client() + return client.isdir(uri) + + def get_file_iterator(self, uri): + try: + client = self._get_local_client() + file_iterator = client.get_file_iterator(uri) + return file_iterator + except Exception as e: + LOG.error('get file generator error {0}'.format(e)) + raise + + def put_with_info(self, uri, content, **kwargs): + return self._get_local_client().put_with_info(uri, content, **kwargs) + + def put(self, *args, **kwargs): + result, _ = self.put_with_info(*args, **kwargs) + return result + + def contains(self, *args, **kwargs): + return self._get_local_client().contains(*args, **kwargs) + + def delete(self, *args, **kwargs): + self._get_local_client().delete(*args, **kwargs) + + def generate_presigned_url(self, *args, **kwargs): + return self._get_local_client().generate_presigned_url(*args, **kwargs) + + def generate_presigned_post(self, *args, **kwargs): + return self._get_local_client().generate_presigned_post(*args, **kwargs) + + def create_bucket(self, *args, **kwargs): + return self._get_local_client().create_bucket(*args, **kwargs) + + Get = get + + GetAndUpdate = get_and_update = functools.partialmethod( + get, update_cache=True) + + def set_count_disp(self, count_disp): + self._get_local_client().set_count_disp(count_disp) diff --git a/petrel_client/client_base.py b/petrel_client/client_base.py new file mode 100644 index 0000000000000000000000000000000000000000..10e66bbe7cfc4ce67765dd337ecf5e035166c3a9 --- /dev/null +++ b/petrel_client/client_base.py @@ -0,0 +1,20 @@ +import logging + +from petrel_client.common.io_profile import ClientStat + +LOG = logging.getLogger(__name__) + + +class ClientBase(object): + + def __init__(self, *args, **kwargs): + cls_name = type(self).__name__ + name = kwargs.get('name', None) + name = '{}({}id: {})'.format( + cls_name, + '{}, '.format(name) if name else '', + id(self)) + + self.client_stat = ClientStat(id(self), name) + + LOG.debug('create %s', name) diff --git a/petrel_client/common/__init__.py b/petrel_client/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel_client/common/config.py b/petrel_client/common/config.py new file mode 100644 index 0000000000000000000000000000000000000000..af0704d2a33a56bf446e564e9f77101b8dec6db9 --- /dev/null +++ b/petrel_client/common/config.py @@ -0,0 +1,150 @@ +# -*- coding: utf-8 -*- + +import configparser +import logging + +from petrel_client.common import exception + +DEFAULT_SECTION_NAME = 'DEFAULT' + + +# 如果配置文件中有 [DEFAULT],对应内容将覆盖此处 +CONFIG_DEFAULT = { + 'endpoint_url': '%(host_base)s', + 'enable_mc': 'False', # 必须为 str 类型 + 'debug_mc': 'True', + 'file_log_level': 'DEBUG', + 'file_log_max_bytes': 1024 * 1024 * 1024, # 1GB + 'file_log_backup_count': 1, + 'console_log_level': 'WARNING', + 'boto': 'True', + 'mc_server_list_path': '/mnt/lustre/share/memcached_client/server_list.conf', + 'mc_client_config_path': '/mnt/lustre/share/memcached_client/client.conf', + 'count_disp': '5000', + 'enable_mem_trace': 'False', + 'fake': 'False', + 'mc_key_cb': 'identity', + 'get_retry_max': '10', + 's3_cpp_log_level': 'off', + # 'host_bucket': '%(host_base)s/%(bucket)s', + # 'user_https': 'False', + # 'ca_certs_file': '', + # 'check_ssl_certificate': 'True', +} + +_UNSET = object() + + +def _value_to_str(d): + if isinstance(d, (int, bool)): + return str(d) + if isinstance(d, (dict,)): + return { + k: _value_to_str(v) for k, v in d.items() + } + return d + + +class GetterMixin(object): + + _boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True, + '0': False, 'no': False, 'false': False, 'off': False} + + def get(self, key, default=_UNSET): + try: + return self[key] + except exception.ConfigItemNotFoundError: + if default is _UNSET: + raise + else: + return default + + def has_option(self, key): + try: + self[key] + return True + except exception.ConfigItemNotFoundError: + return False + + def get_boolean(self, key, default=_UNSET): + v = str(self.get(key, default)).lower() + + if v not in self._boolean_states: + raise exception.ConfigKeyTypeError('Not a boolean: ' + key) + return self._boolean_states[v] + + def get_int(self, key, default=_UNSET): + try: + return int(self.get(key, default)) + except ValueError: + raise exception.ConfigKeyTypeError('Not a integer: ' + key) + + def get_log_level(self, key, default=_UNSET): + v = str(self.get(key, default)).upper() + if v not in logging._nameToLevel: + raise exception.ConfigKeyTypeError('Not a log level: ' + key) + return logging._nameToLevel[v] + + +class _my_dict(configparser._default_dict): + pass + + +class Config(GetterMixin): + def __init__(self, conf_path, *args, **kwargs): + parser = configparser.ConfigParser(CONFIG_DEFAULT) + r = parser.read(conf_path, encoding='utf-8') + if len(r) == 0: + raise exception.ConfigFileNotFoundError(conf_path) + if len(parser.sections()) == 0: + raise exception.ConfigSectionNotFoundError() + + defaults = parser._defaults + all_sections = parser._sections.items() + deleteList = [] + for section, options in all_sections: + if section.lower() != "default": + continue + for name, val in options.items(): + defaults[name] = val + deleteList.append(section) + for deleteSection in deleteList: + parser.remove_section(deleteSection) + + self._parser = parser + self._default = parser.items(DEFAULT_SECTION_NAME, raw=True) + + def __getitem__(self, key): + try: + return Section(self._parser[key]) + except KeyError as err: + raise exception.ConfigSectionNotFoundError(*err.args) + + def update(self, other: dict): + for k, v in other.items(): + self._parser[k].update(_value_to_str(v)) + + def default(self): + return Section(dict(self._default)) + + def items(self): + sections = self._parser.sections() + if len(sections) == 0: + raise exception.ConfigSectionNotFoundError() + return [(section, self[section]) for section in sections] + + +class Section(GetterMixin): + + def __init__(self, conf: dict): + # 注意 conf 中 value 取值类型均为 str + self._conf = conf + + def __getitem__(self, key): + try: + return self._conf[key] + except KeyError as err: + raise exception.ConfigKeyNotFoundError(*err.args) + + def update(self, other): + self._conf.update(_value_to_str(other)) diff --git a/petrel_client/common/exception.py b/petrel_client/common/exception.py new file mode 100644 index 0000000000000000000000000000000000000000..d4dcd7122cee5d64bec74cd6b4155d39997f360d --- /dev/null +++ b/petrel_client/common/exception.py @@ -0,0 +1,178 @@ +# -*- coding: utf-8 -*- + + +class Error(Exception): + '''Base class for exceptions in petrel_oss module.''' + + def __str__(self): + cls_name = type(self).__name__ + msg = super(Error, self).__str__() + return '{}({})'.format(cls_name, msg) + + +class RetriableError(Error): + pass + + +# Config Error + + +class ConfigError(Error): + pass + + +class InvalidConfigError(ConfigError): + pass + + +class ConfigFileNotFoundError(ConfigError): + pass + + +class ConfigItemNotFoundError(ConfigError): + pass + + +class ConfigKeyNotFoundError(ConfigItemNotFoundError): + pass + + +class ConfigSectionNotFoundError(ConfigItemNotFoundError): + pass + + +class ConfigKeyTypeError(ConfigError): + pass + + +class ConfigKeyValueError(ConfigError): + pass + + +# Client Error + + +class ClientError(Error): + pass + + +class ContentTypeError(ClientError): + pass + + +class S3ClientError(ClientError): + pass + + +class InvalidAccessKeyError(S3ClientError): + pass + + +class SignatureNotMatchError(S3ClientError): + pass + + +class NetworkConnectionError(S3ClientError): + pass + + +class ResourceNotFoundError(S3ClientError): + pass + + +class AccessDeniedError(ClientError): + pass + + +class ObjectNotFoundError(ClientError): + pass + + +class S3ObjectNotFoundError(ObjectNotFoundError): + pass + + +class NoSuchBucketError(S3ObjectNotFoundError): + pass + + +class NoSuchKeyError(S3ObjectNotFoundError): + pass + + +# Cache Error + + +class CacheError(ClientError): + pass + + +class McClientError(CacheError): + pass + + +class McObjectNotFoundError(ObjectNotFoundError, McClientError): + pass + + +class McTimeoutOccur(McClientError, RetriableError): + pass + + +class McConnFailed(McClientError, RetriableError): + pass + + +class McServerFailed(McClientError, RetriableError): + pass + + +class McServerDisable(McClientError): + pass + + +class McServerDead(McClientError): + pass + + +class McBadKeyProvided(McClientError): + pass + + +class McKeySizeExceed(McClientError): + pass + + +class McObjectSizeExceed(McClientError): + pass + + +# URI Error + + +class InvalidUriError(Error): + pass + + +class InvalidS3UriError(InvalidUriError): + pass + + +class InvalidBucketUriError(InvalidS3UriError): + pass + + +class InvalidDfsUriError(InvalidUriError): + pass + + +class InvalidMcUriError(InvalidUriError): + pass + + +class InvalidClusterNameError(InvalidUriError): + pass + + +class NoDefaultClusterNameError(InvalidUriError): + pass diff --git a/petrel_client/common/hash.py b/petrel_client/common/hash.py new file mode 100644 index 0000000000000000000000000000000000000000..485870e450bf2ee981998de67b18579458a53de2 --- /dev/null +++ b/petrel_client/common/hash.py @@ -0,0 +1,29 @@ +# -*- coding: utf-8 -*- +import hashlib + +from petrel_client.common.exception import ConfigKeyValueError + +_SUPPORTED_TYPES = ('blake2b', 'blake2s', 'md5', 'pbkdf2_hmac', 'sha1', 'sha224', 'sha256', + 'sha384', 'sha3_224', 'sha3_256', 'sha3_384', 'sha3_512', 'sha512', 'shake_128', 'shake_256') + + +def get_hash_fn(hash_type): + if hash_type in _SUPPORTED_TYPES: + return getattr(hashlib, hash_type) + else: + raise ConfigKeyValueError(f"'{hash_type}' is not a valid hash type.") + + +def to_bytes(key): + if isinstance(key, str): + key = key.encode('utf-8') + else: + assert isinstance(key, bytes) + return key + + +def hexdigest(key, hash_fn): + key = to_bytes(key) + m = hash_fn() + m.update(key) + return m.hexdigest() diff --git a/petrel_client/common/io_profile.py b/petrel_client/common/io_profile.py new file mode 100644 index 0000000000000000000000000000000000000000..5b75f5fddc781e9ee5345ebe0c6c9912afc583b5 --- /dev/null +++ b/petrel_client/common/io_profile.py @@ -0,0 +1,267 @@ +# -*- coding: utf-8 -*- + +import functools +import logging +import threading +import weakref +import environs +from time import time +from collections import defaultdict +import io + +from petrel_client.common import mem_trace +from petrel_client.common.exception import ObjectNotFoundError + +LOG = logging.getLogger(__name__) +ENV = environs.Env() + + +class StatItem(object): + __slots__ = ['op_name', 'total_io', 'total_hit', + 'total_time', 'total_error', 'total_miss', + 'error_count', 'total_byte' + ] + + def __init__(self, op_name): + self.op_name = op_name + self.reset() + + def reset(self): + self.total_io = 0 + self.total_hit = 0 + self.total_time = 0.0 + self.total_error = 0 + self.total_miss = 0 + self.total_byte = 0 + self.error_count = defaultdict(lambda: 0) + + @property + def time_avg(self): + return self.total_time / self.total_io if self.total_io else .0 + + @property + def hit_ratio(self): + return 1.0 * self.total_hit / self.total_io if self.total_io else .0 + + @property + def speed(self): + return 1.0 * self.total_byte / self.total_time if self.total_time else .0 + + def stat_io(self, callback=None): + stat_info = f'{self.op_name} [total: {self.total_io}' \ + f', hit: {self.total_hit}' \ + f', miss: {self.total_miss}' \ + f', error: {self.total_error}' \ + f', time: {self.total_time:.6} s' \ + f', time_avg: {self.time_avg:.6} s' \ + f', hit ratio: {self.hit_ratio:.2%}' \ + f', bytes: {_sizeof_fmt(self.total_byte)}' \ + f', speed: {_sizeof_fmt(self.speed,suffix="B/s")}' \ + f']' + + if self.error_count: + items = ["{}: {}".format(k, v) + for (k, v) in self.error_count.items()] + stat_info = f'{stat_info}, error_count: [{", ".join(items)}]' + + if callback: + callback(stat_info) + else: + LOG.info(stat_info) + self.reset() + + +class StatItemDict(dict): + def __missing__(self, key): + item = self[key] = StatItem(key) + return item + + def stat_io(self, callback=None): + for item in self.values(): + item.stat_io(callback) + + +class ClientStat(object): + def __init__(self, client_id, name): + self.client_id = client_id + self.name = name + self.stat_item_dict = StatItemDict() + profiler = Profiler.get() + self.profiler = profiler + profiler.register(self) + + def __getitem__(self, op_name): + return self.stat_item_dict[op_name] + + # 若使用 multiprocessing-logging,进程退出时候调用 __del__ 还存在问题 + # def __del__(self): + # # 这里有可能是再 python 将要退出的时候触发,此时file log已经不存在,会发生异常 + # try: + # self.profiler.unregister(self) + # if self.total_io: + # self.stat_io() + # except Exception: + # pass + + @property + def total_io(self): + return sum([item.total_io for item in self.stat_item_dict.values()]) + + @property + def get_hit(self): + return sum([item.total_hit for item in self.stat_item_dict.values() if item.op_name == 'get']) + + def stat_io(self, callback=None): + stat_item_info_list = [] + + def cb(info): + stat_item_info_list.append(info) + + for stat_item in self.stat_item_dict.values(): + stat_item.stat_io(cb) + + if stat_item_info_list: + stat_itme_info = ', '.join(stat_item_info_list) + else: + stat_itme_info = 'No IO operations' + + stat_info = '{}: {}'.format(self.name, stat_itme_info) + if callback: + callback(stat_info) + else: + LOG.info(stat_info) + + +def profile(op_name): + assert isinstance(op_name, str) + + def wrap(fn): + @functools.wraps(fn) + def new_fn(self, *args, **kwargs): + return _profile(op_name, fn, self, *args, **kwargs) + return new_fn + + return wrap + + +def _profile(op_name, fn, client, *args, **kwargs): + stat: StatItem = client.client_stat[op_name] + start = time() + try: + ret = fn(client, *args, **kwargs) + if isinstance(ret, (tuple, list)): + content = ret[0] + else: + content = ret + + if isinstance(content, bytes): + stat.total_byte += len(content) + elif isinstance(content, int): + stat.total_byte += content + elif hasattr(content, 'content_length'): + stat.total_byte += content.content_length + elif op_name == 'get' and content is None: + raise ObjectNotFoundError() + + stat.total_hit += 1 + return ret + + except ObjectNotFoundError: + stat.total_miss += 1 + raise + + except Exception as e: + stat.total_error += 1 + err_name = e.__class__.__name__ + stat.error_count[err_name] += 1 + raise + + finally: + end = time() + stat.total_time += (end - start) + stat.total_io += 1 + client.client_stat.profiler.inc_op_count() + + +class Profiler(object): + thread_local = threading.local() + default_conf = None + + @staticmethod + def set_default_conf(conf): + Profiler.default_conf = conf + + @staticmethod + def get(): + profiler = getattr(Profiler.thread_local, 'profiler', None) + if not profiler: + profiler = Profiler(Profiler.default_conf) + setattr(Profiler.thread_local, 'profiler', profiler) + return profiler + + def __init__(self, conf, *args, **kwargs): + assert conf is not None + self.stat_dict = weakref.WeakValueDictionary() + self.op_count = 0 + self.count_disp = ENV.int( + 'count_disp', None) or conf.get_int('count_disp') + + self.enable_mem_trace = conf.get_boolean('enable_mem_trace') + if self.enable_mem_trace: + mem_trace.start() + + def register(self, client_stat: ClientStat): + client_id = client_stat.client_id + self.stat_dict[client_id] = client_stat + + def unregister(self, client_stat: ClientStat): + client_id = client_stat.client_id + del self.stat_dict[client_id] + + def inc_op_count(self): + self.op_count += 1 + if self.count_disp: + if self.op_count >= self.count_disp: + self.stat_io() + self.op_count = 0 + + @staticmethod + def set_count_disp(count_disp): + if count_disp < 0: + LOG.error('count_disp must be a nonnegative integer, actual value: %s', + count_disp) + return + + profiler = Profiler.get() + profiler.count_disp = count_disp + + def stat_io(self): + if LOG.isEnabledFor(logging.INFO): + io_dict = { + client_stat.name: client_stat.get_hit for client_stat in self.stat_dict.values()} + total_io = sum(io_dict.values()) or 1 + percentage = [f'{client_name}: {1.0 * count / total_io :.2%}' for client_name, + count in io_dict.items()] + + for client_stat in self.stat_dict.values(): + client_stat.stat_io() + LOG.info('IO Percentage: %s', ', '.join(percentage)) + if self.enable_mem_trace: + snapshot = mem_trace.take_snapshot() + buffer = io.StringIO() + snapshot.display_top(buffer=buffer) + LOG.info('Memory trace: \n%s', buffer.getvalue()) + + def enable(self): + raise NotImplementedError() + + def disable(self): + raise NotImplementedError() + + +def _sizeof_fmt(num, suffix='B'): + for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: + if abs(num) < 1024.0: + return "%3.1f%s%s" % (num, unit, suffix) + num /= 1024.0 + return "%.1f%s%s" % (num, 'Yi', suffix) diff --git a/petrel_client/common/io_retry.py b/petrel_client/common/io_retry.py new file mode 100644 index 0000000000000000000000000000000000000000..5a1302fd0cd90bf18ecc22998eabc57475046eb1 --- /dev/null +++ b/petrel_client/common/io_retry.py @@ -0,0 +1,37 @@ +# -*- coding: utf-8 -*- + +import functools +import logging +from humanize import ordinal + +LOG = logging.getLogger(__name__) + + +def retry(op_name, exceptions=Exception, raises=(), tries=1): + assert isinstance(op_name, str) + + def wrap(fn): + @functools.wraps(fn) + def new_fn(self, *args, **kwargs): + return _retry(op_name, exceptions, raises, tries, fn, self, *args, **kwargs) + return new_fn + + return wrap + + +def _retry(op_name, exceptions, raises, tries, fn, client, *args, **kwargs): + uri, retry_max = args[0], tries + for count in range(1, retry_max + 1): + try: + return fn(client, *args, **kwargs) + except raises: + raise + except exceptions as err: + if count < retry_max: + LOG.debug('Exception occurred in the %s retry of %s operation on (%s): %s', + ordinal(count), op_name, uri, err) + continue + if retry_max > 1: + LOG.error('%s operation (%s) has tried %s times and failed: %s', + op_name.capitalize(), uri, retry_max, err) + raise diff --git a/petrel_client/common/log.py b/petrel_client/common/log.py new file mode 100644 index 0000000000000000000000000000000000000000..d539835bf05f5bcd0767812123760a105cf5041a --- /dev/null +++ b/petrel_client/common/log.py @@ -0,0 +1,88 @@ + +import threading +import logging +from logging.handlers import RotatingFileHandler +import coloredlogs +import os +import socket + +from petrel_client.version import version + +# Level Numeric value +# +# CRITICAL 50 +# ERROR 40 +# WARNING 30 +# INFO 20 +# DEBUG 10 +# NOTSET 0 + +# https://docs.python.org/2.7/library/logging.html#logrecord-attributes +BASE_FORMAT = '%(asctime)s %(levelname).3s [%(processName)-11s] [%(threadName)-10s] - %(message)s [P:%(process)d T:%(thread)d F:%(filename)s:%(lineno)d]' +base_formatter = logging.Formatter(BASE_FORMAT) + +log_config = {} +LOG = logging.getLogger('petrel_client') + +LOG.propagate = False +LOG.setLevel(logging.DEBUG) + +coloredlogs.install(level='DEBUG', logger=LOG, + milliseconds=True, fmt=BASE_FORMAT) +console_handler = LOG.handlers[0] + +lock = threading.RLock() +log_config = { + 'have_initiated': False +} + + +def get_log_file_name(): + slurm_procid = os.environ.get('SLURM_PROCID', None) + if slurm_procid is not None: + file_name = f'slurm_procid_{slurm_procid}.log' + else: + hostname = socket.gethostname() + pid = os.getpid() + file_name = f'{hostname}_pid_{pid}.log' + + return file_name + + +def init_log(conf): + with lock: + if log_config['have_initiated']: + LOG.debug('log initiated, skip') + return + else: + log_config['have_initiated'] = True + + log_file_path = conf.get('log_file_path', None) + if log_file_path: + if not os.path.exists(log_file_path): + # exist_ok = True : avoid FileExistsError when multiple + # processes are trying to create the same log_file_path + os.makedirs(log_file_path, exist_ok=True) + + file_log_level = conf.get_log_level('file_log_level') + file_log_max_bytes = conf.get_int('file_log_max_bytes') + file_log_backup_count = conf.get_int('file_log_backup_count') + + file_handler = RotatingFileHandler( + filename=os.path.join(log_file_path, get_log_file_name()), + maxBytes=file_log_max_bytes, + backupCount=file_log_backup_count) + file_handler.setLevel(file_log_level) + file_handler.setFormatter(base_formatter) + LOG.addHandler(file_handler) + + if conf.has_option('console_log_level'): + console_log_level = conf.get_log_level('console_log_level') + console_handler.setLevel(console_log_level) + + if log_file_path: + from multiprocessing_logging import install_mp_handler + # install_mp_handler should be invoked after log configuration + install_mp_handler(LOG) + + LOG.debug('init log, SDK version %s', version) diff --git a/petrel_client/common/mem_trace.py b/petrel_client/common/mem_trace.py new file mode 100644 index 0000000000000000000000000000000000000000..cb578c6c0923487f2f1df3435d6b7c91d532e08f --- /dev/null +++ b/petrel_client/common/mem_trace.py @@ -0,0 +1,60 @@ +import linecache +import tracemalloc + +from tracemalloc import Snapshot + + +def format_size(size): + return tracemalloc._format_size(size, False) + + +def display_top(snapshot: Snapshot, limit=10, buffer=None, key_type='lineno'): + if buffer: + def write(msg): + buffer.write(msg) + buffer.write('\n') + else: + def write(msg): + print(msg) + + stats = snapshot.statistics(key_type) + + for index, stat in enumerate(stats[:limit], 1): + frame = stat.traceback[0] + line = linecache.getline(frame.filename, frame.lineno).strip() + msg = f'#{index}:\t{frame.filename}:{frame.lineno}: {stat.count} blocks, {format_size(stat.size)}\n\t{line}' + write(msg) + + other = stats[limit:] + if other: + other_size = sum(stat.size for stat in other) + other_blocks = sum(stat.count for stat in other) + write( + f'Other:\t{len(other)} items, {other_blocks} blocks, {format_size(other_size)}') + + total_size = sum(stat.size for stat in stats) + total_blocks = sum(stat.count for stat in stats) + write( + f'Total:\t{len(stats)} items, {total_blocks} blocks, {format_size(total_size)}') + + +def start(): + tracemalloc.start() + + +def stop(): + tracemalloc.stop() + + +def take_snapshot(): + return tracemalloc.take_snapshot() + + +def filter_traces(snapshot, pattern): + return snapshot.filter_traces(( + tracemalloc.Filter(True, pattern), + )) + + +Snapshot.display_top = display_top +Snapshot.filter_traces = filter_traces diff --git a/petrel_client/common/uri_parser.py b/petrel_client/common/uri_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..f3022355c4de6b1537dbb5b72e29534952ed47da --- /dev/null +++ b/petrel_client/common/uri_parser.py @@ -0,0 +1,22 @@ +# -*- coding: utf-8 -*- + +import re +from petrel_client.common.exception import InvalidS3UriError +# (?:...) +# A non-capturing version of regular parentheses. Matches whatever regular expression is inside the parentheses, but the substring matched by the group cannot be retrieved after performing a match or referenced later in the pattern. + +# *?, +?, ?? +# The '*', '+', and '?' qualifiers are all greedy; they match as much text as possible. Sometimes this behaviour isn’t desired; if the RE <.*> is matched against b , it will match the entire string, and not just . Adding ? after the qualifier makes it perform the match in non-greedy or minimal fashion; as few characters as possible will be matched. Using the RE <.*?> will match only . + +# re.I +# re.IGNORECASE +# Perform case-insensitive matching; expressions like [A-Z] will match lowercase letters, too. This is not affected by the current locale. To get this effect on non-ASCII Unicode characters such as ü and Ü, add the UNICODE flag. +PATTERN = re.compile(r'^(?:([^:]+):)?s3://([^/]+)/(.+?)/?$', re.I) + + +def parse_s3_uri(uri): + m = PATTERN.match(uri) + if m: + return (m.group(1), m.group(2), m.group(3)) + else: + raise InvalidS3UriError(uri) diff --git a/petrel_client/dfs/__init__.py b/petrel_client/dfs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel_client/dfs/dfs.py b/petrel_client/dfs/dfs.py new file mode 100644 index 0000000000000000000000000000000000000000..3ec2f794f34ff28157b532d3eb36a210b6bee6e6 --- /dev/null +++ b/petrel_client/dfs/dfs.py @@ -0,0 +1,54 @@ +import logging +import re +import socket + +from petrel_client.client_base import ClientBase +from petrel_client.common.io_profile import profile +from petrel_client.common import exception + +LOG = logging.getLogger(__name__) + + +class DFS(ClientBase): + + @staticmethod + def parse_uri(uri): + # todo check if it is a valid path + return re.sub('^file://', '/', uri) + + @staticmethod + def create(conf, *args, **kwargs): + fake = conf.get_boolean('fake') + if fake: + from petrel_client.fake_client import FakeClient + name = f'DFS: {socket.gethostname()}' + return FakeClient(client_type='dfs', conf=conf, name=name, **kwargs) + else: + return DFS(conf, *args, **kwargs) + + def __init__(self, conf, *args, **kwargs): + hostname = socket.gethostname() + + super(DFS, self).__init__(*args, name=hostname, conf=conf, **kwargs) + self._enable_cache = conf.get_boolean( + 'enable_mc', False) or conf.get_boolean('enable_cache', False) + + @profile('get') + def get(self, file_path, **kwargs): + try: + with open(file_path, 'rb') as f: + return f.read() + except FileNotFoundError as err: + raise exception.ObjectNotFoundError(err) + except Exception as err: + raise exception.ClientError(err) + + def put(self, file_path, content, **kwargs): + try: + with open(file_path, 'wb') as f: + return f.write(content) + except Exception as err: + raise exception.ClientError(err) + + def enable_cache(self): + return self._enable_cache diff --git a/petrel_client/fake_client.py b/petrel_client/fake_client.py new file mode 100644 index 0000000000000000000000000000000000000000..789686ca8725b6b46a1c7bd6def4475259759f65 --- /dev/null +++ b/petrel_client/fake_client.py @@ -0,0 +1,45 @@ +from petrel_client.client_base import ClientBase +from petrel_client.common.io_profile import profile + + +class FakeClient(ClientBase): + customized_get = None + customized_put = None + + def __init__(self, client_type, conf, **kwargs): + super(FakeClient, self).__init__(conf=conf, **kwargs) + self.conf = conf + self.type = client_type + self.__enable_cache = conf.get_boolean( + 'enable_mc', False) or conf.get_boolean('enable_cache', False) + + @profile('get') + def get(self, *args, **kwargs): + if self.customized_get: + return self.customized_get(*args, **kwargs) + else: + return b'data from FakeClient.' + + def get_with_info(self, *args, **kwargs): + info = {} + data = self.get(*args, **kwargs) + return data, info + + @profile('put') + def put(self, *args, **kwargs): + if self.customized_put: + return self.customized_put(*args, **kwargs) + else: + if self.type == 's3': + body = args[3] + else: + body = args[1] + return len(body) + + def put_with_info(self, *args, **kwargs): + info = {} + result = self.put(*args, **kwargs) + return result, info + + def enable_cache(self): + return self.__enable_cache diff --git a/petrel_client/mixed_client.py b/petrel_client/mixed_client.py new file mode 100644 index 0000000000000000000000000000000000000000..233deccca85fbc24e27e20100e79f0456091337d --- /dev/null +++ b/petrel_client/mixed_client.py @@ -0,0 +1,268 @@ +# -*- coding: utf-8 -*- + +import logging +from os.path import expanduser, abspath + +from petrel_client.ceph.ceph import Ceph +from petrel_client.cache.cache import Cache +from petrel_client.dfs.dfs import DFS +from petrel_client.common.config import Config +from petrel_client.common.log import init_log +from petrel_client.common import exception +from petrel_client.common.io_profile import Profiler +from petrel_client.common.io_retry import retry + +if issubclass(str, bytes): + def str_to_bytes(s): + return s +else: + import builtins + + def str_to_bytes(s): + return builtins.bytes(s, 'utf-8') + +LOG = logging.getLogger(__name__) + + +class MixedClient(object): + def __init__(self, conf_path, **kwargs): + conf_path = abspath(expanduser(conf_path)) + config = Config(conf_path) + self._default_config = config.default() + + init_log(self._default_config) + + LOG.debug('init MixedClient, conf_path %s', conf_path) + Profiler.set_default_conf(self._default_config) + + if any(conf.get_boolean('enable_mc') for _, conf in config.items()): + cache_conf = config.get('cache', None) or config.get( + 'mc', None) or self._default_config + self._cache = Cache.create(cache_conf, **kwargs) + else: + self._cache = None + + self._ceph_dict = { + cluster: Ceph.create(cluster, conf) + for cluster, conf in config.items() if cluster.lower() not in ('dfs', 'cache', 'mc') + } + + dfs_conf = config.get('dfs', self._default_config) + self._dfs = DFS.create(dfs_conf) + + self._default_cluster = self._default_config.get( + 'default_cluster', None) + self._count_disp = self._default_config.get_int('count_disp') + self._get_retry_max = self._default_config.get_int('get_retry_max') + + def ceph_parse_uri(self, uri, content): + cluster, bucket, key, enable_cache = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + + def io_fn(**kwargs): + client = self._ceph_dict[cluster] + if content is not None: + return client.put_with_info(cluster, bucket, key, content, **kwargs) + else: + return client.get_with_info(cluster, bucket, key, **kwargs) + + return enable_cache, io_fn + + def cache_parse_uri(self, uri, content): + key = Cache.parse_uri(uri) + + def io_fn(**kwargs): + if content is not None: # todo add info + return self._cache.put(key, content), None + else: + return self._cache.get(key), None + return False, io_fn + + def dfs_parse_uri(self, uri, content): + file_path = DFS.parse_uri(uri) + + def io_fn(**kwargs): # todo add info + if content is not None: + return self._dfs.put(file_path, content, **kwargs), None + else: + return self._dfs.get(file_path, **kwargs), None + + return self._dfs.enable_cache(), io_fn + + def prepare_io_fn(self, uri, content=None): + try: + return self.ceph_parse_uri(uri, content) + except exception.InvalidS3UriError: + pass + + try: + return self.cache_parse_uri(uri, content) + except exception.InvalidMcUriError: + pass + + try: + return self.dfs_parse_uri(uri, content) + except exception.InvalidDfsUriError: + pass + + raise exception.InvalidUriError(uri) + + def _get_with_info(self, uri, **kwargs): # returns (data, info) + no_cache = kwargs.get('no_cache', False) + update_cache = kwargs.get('update_cache', False) + + if no_cache and update_cache: + raise ValueError( + 'arguments "update_cache" and "no_cache" conflict with each other') + + enable_cache, get_fn = self.prepare_io_fn(uri) + enable_cache = self._cache and enable_cache and (not no_cache) + cache_retry_times = 3 + cache_value = None + + if enable_cache and (not update_cache): + for _ in range(cache_retry_times): + cache_should_retry = False + try: + cache_value = self._cache.get(uri, **kwargs) + except exception.ObjectNotFoundError: + pass + except exception.CacheError as err: + self._cache.log.debug(err) + if isinstance(err, exception.RetriableError): + cache_should_retry = True + except Exception as err: + LOG.error(err) + finally: + if not cache_should_retry: + break + + if cache_value is not None: + return cache_value, None + + content, info = get_fn(**kwargs) + + if enable_cache: + for _ in range(cache_retry_times): + cache_should_retry = False + try: + self._cache.put(uri, content) # todo 如果有异常,是否该忽略? + except exception.CacheError as err: + self._cache.log.debug(err) + if isinstance(err, exception.RetriableError): + cache_should_retry = True + except Exception as err: + LOG.error(err) + finally: + if not cache_should_retry: + break + + return content, info + + # 所有的异常在此处处理 + def get_with_info(self, uri, **kwargs): + @retry('get', exceptions=(Exception,), raises=(exception.ResourceNotFoundError, NotImplementedError), tries=self._get_retry_max) + def do_get_with_info(self, uri, **kwargs): + try: + return self._get_with_info(uri, **kwargs) + except exception.NoSuchBucketError as err: + LOG.warning(err) + except exception.ObjectNotFoundError as err: + LOG.debug(err) + except exception.AccessDeniedError as err: + LOG.warning((err, uri)) + + return None, None + + return do_get_with_info(self, uri, **kwargs) + + def create_bucket(self, uri, **kwargs): + cluster, bucket, key, _ = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + if key is not None: + raise exception.InvalidBucketUriError(uri) + return self._ceph_dict[cluster].create_bucket(bucket) + + def isdir(self, uri, **kwarg): + try: + cluster, bucket, key, enable_cache = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + isdir_fn = getattr(client, 'isdir') + return isdir_fn(bucket, key) + except exception.InvalidS3UriError: + LOG.error(f'Invalid S3 URI: ${uri}') + raise + except AttributeError: + LOG.warning('please set boto = True to use this feature') + raise + + def list(self, uri, **kwarg): + try: + cluster, bucket, key, enable_cache = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + list_fn = getattr(client, 'list') + return list_fn(bucket, key, **kwarg) + except exception.InvalidS3UriError: + LOG.error(f'Invalid S3 URI: ${uri}') + raise + except AttributeError: + LOG.warning('please set boto = True to use this feature') + raise + + def get_file_iterator(self, uri): + try: + cluster, bucket, key, enable_cache = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + file_iterator = getattr(client, 'get_file_iterator') + return file_iterator(bucket, key) + except exception.InvalidS3UriError: + LOG.error('only support ceph') + raise + except AttributeError: + LOG.warning('please set boto = True to use this feature') + raise + + def put_with_info(self, uri, content, **kwargs): + if isinstance(content, str): + content = str_to_bytes(content) + + _enable_cache, put_fn = self.prepare_io_fn(uri, content) + + update_cache = self._cache and kwargs.get('update_cache', False) + + result, info = put_fn(**kwargs) + + if update_cache: + self._cache.put(uri, content) + + return result, info + + def contains(self, uri): + cluster, bucket, key, _ = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + return client.contains(cluster, bucket, key) + + def delete(self, uri): + cluster, bucket, key, _ = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + return client.delete(cluster, bucket, key) + + def generate_presigned_url(self, uri, client_method='get_object', expires_in=3600): + cluster, bucket, key, _ = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + return client.generate_presigned_url(cluster, bucket, key, client_method, expires_in) + + def generate_presigned_post(self, uri, fields=None, conditions=None, expires_in=3600): + cluster, bucket, key, _ = Ceph.parse_uri( + uri, self._ceph_dict, self._default_cluster) + client = self._ceph_dict[cluster] + return client.generate_presigned_post(cluster, bucket, key, fields, conditions, expires_in) + + def set_count_disp(self, count_disp): + Profiler.set_count_disp(count_disp) diff --git a/petrel_client/utils/__init__.py b/petrel_client/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/petrel_client/utils/data/__init__.py b/petrel_client/utils/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b77e9c1d281273cf8c7999e04f43dcc5506dfa1f --- /dev/null +++ b/petrel_client/utils/data/__init__.py @@ -0,0 +1 @@ +from .dataloader import DataLoader # noqa: F401 diff --git a/petrel_client/utils/data/_utils/__init__.py b/petrel_client/utils/data/_utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fc8819ca2ac8ed9e3a352b7d2e6e954b08ad272a --- /dev/null +++ b/petrel_client/utils/data/_utils/__init__.py @@ -0,0 +1 @@ +from . import worker # noqa: F401 diff --git a/petrel_client/utils/data/_utils/pin_memory.py b/petrel_client/utils/data/_utils/pin_memory.py new file mode 100644 index 0000000000000000000000000000000000000000..1647ac81f83b3d308298120176ee4865bb3c839e --- /dev/null +++ b/petrel_client/utils/data/_utils/pin_memory.py @@ -0,0 +1,54 @@ +from torch.utils.data._utils.pin_memory import ( + torch, queue, pin_memory, MP_STATUS_CHECK_INTERVAL, ExceptionWrapper) + +from .worker import _ResumeIteration + +from petrel_client.utils.profile import profileit, wrap_with_stat_qsize + + +def _pin_memory_loop(in_queue, out_queue, device_id, done_event): + # This setting is thread local, and prevents the copy in pin_memory from + # consuming all CPU cores. + torch.set_num_threads(1) + + torch.cuda.set_device(device_id) + + in_queue_get = wrap_with_stat_qsize( + in_queue, in_queue.get, '_pin_memory_loop.in_queue.qsize:') + out_queue_put = wrap_with_stat_qsize( + out_queue, out_queue.put, '_pin_memory_loop.out_queue.qsize:') + + in_queue.get = in_queue_get + out_queue.put = out_queue_put + + cnt = 1 + brk = 0 + + def loop(): + try: + r = in_queue.get(timeout=MP_STATUS_CHECK_INTERVAL) + except queue.Empty: + return cnt + if not isinstance(r, _ResumeIteration): + idx, data = r + if not done_event.is_set() and not isinstance(data, ExceptionWrapper): + try: + data = pin_memory(data) + except Exception: + data = ExceptionWrapper( + where="in pin memory thread for device {}".format(device_id)) + r = (idx, data) + while not done_event.is_set(): + try: + out_queue.put(r, timeout=MP_STATUS_CHECK_INTERVAL) + break + except queue.Full: + continue + del r # save memory + + loop = profileit(loop, name='_pin_memory_loop.loop') + # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the + # logic of this function. + while not done_event.is_set(): + if loop() == brk: + break diff --git a/petrel_client/utils/data/_utils/worker.py b/petrel_client/utils/data/_utils/worker.py new file mode 100644 index 0000000000000000000000000000000000000000..34637d43d1060bfa6c85719d402d9e4687a441bc --- /dev/null +++ b/petrel_client/utils/data/_utils/worker.py @@ -0,0 +1,130 @@ +from petrel_client.utils.profile import profileit, wrap_with_stat_qsize, WORKER_LOOP_PROFILE_COUNT +from torch.utils.data._utils.worker import ( + torch, random, queue, ExceptionWrapper, ManagerWatchdog, WorkerInfo, + _IterableDatasetStopIteration, signal_handling, MP_STATUS_CHECK_INTERVAL) + + +class _ResumeIteration(object): + r"""Dummy class used to resume the fetching when worker reuse is enabled""" + pass + + +def _worker_loop(dataset_kind, dataset, index_queue, data_queue, done_event, + auto_collation, collate_fn, drop_last, seed, init_fn, worker_id, + num_workers, persistent_workers): + # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on the + # logic of this function. + index_queue_get = wrap_with_stat_qsize( + index_queue, index_queue.get, '_worker_loop.index_queue.qsize', count=WORKER_LOOP_PROFILE_COUNT) + data_queue_put = wrap_with_stat_qsize( + data_queue, data_queue.put, '_worker_loop.data_queue.qsize', count=WORKER_LOOP_PROFILE_COUNT) + index_queue.get = index_queue_get + data_queue.put = data_queue_put + + try: + # Initialize C side signal handlers for SIGBUS and SIGSEGV. Python signal + # module's handlers are executed after Python returns from C low-level + # handlers, likely when the same fatal signal had already happened + # again. + # https://docs.python.org/3/library/signal.html#execution-of-python-signal-handlers + signal_handling._set_worker_signal_handlers() + + torch.set_num_threads(1) + random.seed(seed) + torch.manual_seed(seed) + + _worker_info = WorkerInfo(id=worker_id, num_workers=num_workers, + seed=seed, dataset=dataset) + + from torch.utils.data._utils import worker as pt_worker + pt_worker._worker_info = _worker_info + + from torch.utils.data import _DatasetKind + + init_exception = None + + try: + if init_fn is not None: + init_fn(worker_id) + + fetcher = _DatasetKind.create_fetcher( + dataset_kind, dataset, auto_collation, collate_fn, drop_last) + except Exception: + init_exception = ExceptionWrapper( + where="in DataLoader worker process {}".format(worker_id)) + + # When using Iterable mode, some worker can exit earlier than others due + # to the IterableDataset behaving differently for different workers. + # When such things happen, an `_IterableDatasetStopIteration` object is + # sent over to the main process with the ID of this worker, so that the + # main process won't send more tasks to this worker, and will send + # `None` to this worker to properly exit it. + # + # Note that we cannot set `done_event` from a worker as it is shared + # among all processes. Instead, we set the `iteration_end` flag to + # signify that the iterator is exhausted. When either `done_event` or + # `iteration_end` is set, we skip all processing step and just wait for + # `None`. + iteration_end = False + + watchdog = ManagerWatchdog() + cnt = 1 + brk = 0 + + def loop(): + nonlocal iteration_end, init_exception, fetcher + try: + r = index_queue.get(timeout=MP_STATUS_CHECK_INTERVAL) + except queue.Empty: + return cnt + if isinstance(r, _ResumeIteration): + # Acknowledge the main process + data_queue.put(r) + iteration_end = False + # Recreate the fetcher for worker-reuse policy + fetcher = _DatasetKind.create_fetcher( + dataset_kind, dataset, auto_collation, collate_fn, drop_last) + return cnt + elif r is None: + # Received the final signal + assert done_event.is_set() or iteration_end + return brk + elif done_event.is_set() or iteration_end: + # `done_event` is set. But I haven't received the final signal + # (None) yet. I will keep continuing until get it, and skip the + # processing steps. + return cnt + idx, index = r + if init_exception is not None: + data = init_exception + init_exception = None + else: + try: + data = fetcher.fetch(index) + except Exception as e: + if isinstance(e, StopIteration) and dataset_kind == _DatasetKind.Iterable: + data = _IterableDatasetStopIteration(worker_id) + # Set `iteration_end` + # (1) to save future `next(...)` calls, and + # (2) to avoid sending multiple `_IterableDatasetStopIteration`s. + iteration_end = True + else: + # It is important that we don't store exc_info in a variable. + # `ExceptionWrapper` does the correct thing. + # See NOTE [ Python Traceback Reference Cycle Problem ] + data = ExceptionWrapper( + where="in DataLoader worker process {}".format(worker_id)) + data_queue.put((idx, data)) + del data, idx, index, r # save memory + + loop = profileit(loop, name='_worker_loop.loop', + count=WORKER_LOOP_PROFILE_COUNT) + while watchdog.is_alive(): + if loop() == brk: + break + except KeyboardInterrupt: + # Main process will raise KeyboardInterrupt anyways. + pass + if done_event.is_set(): + data_queue.cancel_join_thread() + data_queue.close() diff --git a/petrel_client/utils/data/dataloader.py b/petrel_client/utils/data/dataloader.py new file mode 100644 index 0000000000000000000000000000000000000000..920dd2c1b8fb1a776b942149f6354d608619cd4c --- /dev/null +++ b/petrel_client/utils/data/dataloader.py @@ -0,0 +1,624 @@ +from torch.utils.data.dataloader import ( + torch, python_multiprocessing, multiprocessing, ExceptionWrapper, threading, + itertools, queue, string_classes, _utils, _DatasetKind, _InfiniteConstantSampler, + IterableDataset, SequentialSampler, RandomSampler, BatchSampler) + +from ._utils.pin_memory import _pin_memory_loop +from ._utils.worker import _worker_loop, _ResumeIteration + +from petrel_client.utils.profile import profileit + + +class DataLoader(object): + + __initialized = False + + def __init__(self, dataset, batch_size=1, shuffle=False, sampler=None, + batch_sampler=None, num_workers=0, collate_fn=None, + pin_memory=False, drop_last=False, timeout=0, + worker_init_fn=None, multiprocessing_context=None, + *, prefetch_factor=2, persistent_workers=False): + torch._C._log_api_usage_once("python.data_loader") + + if num_workers < 0: + raise ValueError('num_workers option should be non-negative; ' + 'use num_workers=0 to disable multiprocessing.') + + if timeout < 0: + raise ValueError('timeout option should be non-negative') + + if num_workers == 0 and prefetch_factor != 2: + raise ValueError('prefetch_factor option could only be specified in multiprocessing.' + 'let num_workers > 0 to enable multiprocessing.') + assert prefetch_factor > 0 + + if persistent_workers and num_workers == 0: + raise ValueError('persistent_workers option needs num_workers > 0') + + self.dataset = dataset + self.num_workers = num_workers + self.prefetch_factor = prefetch_factor + self.pin_memory = pin_memory + self.timeout = timeout + self.worker_init_fn = worker_init_fn + self.multiprocessing_context = multiprocessing_context + + # Arg-check dataset related before checking samplers because we want to + # tell users that iterable-style datasets are incompatible with custom + # samplers first, so that they don't learn that this combo doesn't work + # after spending time fixing the custom sampler errors. + if isinstance(dataset, IterableDataset): + self._dataset_kind = _DatasetKind.Iterable + # NOTE [ Custom Samplers and `IterableDataset` ] + # + # `IterableDataset` does not support custom `batch_sampler` or + # `sampler` since the key is irrelevant (unless we support + # generator-style dataset one day...). + # + # For `sampler`, we always create a dummy sampler. This is an + # infinite sampler even when the dataset may have an implemented + # finite `__len__` because in multi-process data loading, naive + # settings will return duplicated data (which may be desired), and + # thus using a sampler with length matching that of dataset will + # cause data lost (you may have duplicates of the first couple + # batches, but never see anything afterwards). Therefore, + # `Iterabledataset` always uses an infinite sampler, an instance of + # `_InfiniteConstantSampler` defined above. + # + # A custom `batch_sampler` essentially only controls the batch size. + # However, it is unclear how useful it would be since an iterable-style + # dataset can handle that within itself. Moreover, it is pointless + # in multi-process data loading as the assignment order of batches + # to workers is an implementation detail so users can not control + # how to batchify each worker's iterable. Thus, we disable this + # option. If this turns out to be useful in future, we can re-enable + # this, and support custom samplers that specify the assignments to + # specific workers. + if shuffle is not False: + raise ValueError( + "DataLoader with IterableDataset: expected unspecified " + "shuffle option, but got shuffle={}".format(shuffle)) + elif sampler is not None: + # See NOTE [ Custom Samplers and IterableDataset ] + raise ValueError( + "DataLoader with IterableDataset: expected unspecified " + "sampler option, but got sampler={}".format(sampler)) + elif batch_sampler is not None: + # See NOTE [ Custom Samplers and IterableDataset ] + raise ValueError( + "DataLoader with IterableDataset: expected unspecified " + "batch_sampler option, but got batch_sampler={}".format(batch_sampler)) + else: + self._dataset_kind = _DatasetKind.Map + + if sampler is not None and shuffle: + raise ValueError('sampler option is mutually exclusive with ' + 'shuffle') + + if batch_sampler is not None: + # auto_collation with custom batch_sampler + if batch_size != 1 or shuffle or sampler is not None or drop_last: + raise ValueError('batch_sampler option is mutually exclusive ' + 'with batch_size, shuffle, sampler, and ' + 'drop_last') + batch_size = None + drop_last = False + elif batch_size is None: + # no auto_collation + if shuffle or drop_last: + raise ValueError('batch_size=None option disables auto-batching ' + 'and is mutually exclusive with ' + 'shuffle, and drop_last') + + if sampler is None: # give default samplers + if self._dataset_kind == _DatasetKind.Iterable: + # See NOTE [ Custom Samplers and IterableDataset ] + sampler = _InfiniteConstantSampler() + else: # map-style + if shuffle: + sampler = RandomSampler(dataset) + else: + sampler = SequentialSampler(dataset) + + if batch_size is not None and batch_sampler is None: + # auto_collation without custom batch_sampler + batch_sampler = BatchSampler(sampler, batch_size, drop_last) + + self.batch_size = batch_size + self.drop_last = drop_last + self.sampler = sampler + self.batch_sampler = batch_sampler + + if collate_fn is None: + if self._auto_collation: + collate_fn = _utils.collate.default_collate + else: + collate_fn = _utils.collate.default_convert + + self.collate_fn = collate_fn + self.persistent_workers = persistent_workers + + self.__initialized = True + self._iterator = None + + def _get_iterator(self): + if self.num_workers == 0: + return _SingleProcessDataLoaderIter(self) + else: + return _MultiProcessingDataLoaderIter(self) + + @property + def multiprocessing_context(self): + return self.__multiprocessing_context + + @multiprocessing_context.setter + def multiprocessing_context(self, multiprocessing_context): + if multiprocessing_context is not None: + if self.num_workers > 0: + if not multiprocessing._supports_context: + raise ValueError('multiprocessing_context relies on Python >= 3.4, with ' + 'support for different start methods') + + if isinstance(multiprocessing_context, string_classes): + valid_start_methods = multiprocessing.get_all_start_methods() + if multiprocessing_context not in valid_start_methods: + raise ValueError( + ('multiprocessing_context option ' + 'should specify a valid start method in {}, but got ' + 'multiprocessing_context={}').format(valid_start_methods, multiprocessing_context)) + multiprocessing_context = multiprocessing.get_context( + multiprocessing_context) + + if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext): + raise ValueError(('multiprocessing_context option should be a valid context ' + 'object or a string specifying the start method, but got ' + 'multiprocessing_context={}').format(multiprocessing_context)) + else: + raise ValueError(('multiprocessing_context can only be used with ' + 'multi-process loading (num_workers > 0), but got ' + 'num_workers={}').format(self.num_workers)) + + self.__multiprocessing_context = multiprocessing_context + + def __setattr__(self, attr, val): + if self.__initialized and attr in ( + 'batch_size', 'batch_sampler', 'sampler', 'drop_last', 'dataset', 'persistent_workers'): + raise ValueError('{} attribute should not be set after {} is ' + 'initialized'.format(attr, self.__class__.__name__)) + + super(DataLoader, self).__setattr__(attr, val) + + def __iter__(self): + # When using a single worker the returned iterator should be + # created everytime to avoid reseting its state + # However, in the case of a multiple workers iterator + # the iterator is only created once in the lifetime of the + # DataLoader object so that workers can be reused + if self.persistent_workers and self.num_workers > 0: + if self._iterator is None: + self._iterator = self._get_iterator() + else: + self._iterator._reset(self) + return self._iterator + else: + return self._get_iterator() + + @property + def _auto_collation(self): + return self.batch_sampler is not None + + @property + def _index_sampler(self): + # The actual sampler used for generating indices for `_DatasetFetcher` + # (see _utils/fetch.py) to read data at each time. This would be + # `.batch_sampler` if in auto-collation mode, and `.sampler` otherwise. + # We can't change `.sampler` and `.batch_sampler` attributes for BC + # reasons. + if self._auto_collation: + return self.batch_sampler + else: + return self.sampler + + def __len__(self): + # with iterable-style dataset, this will error + return len(self._index_sampler) + + +class _BaseDataLoaderIter(object): + def __init__(self, loader): + self._dataset = loader.dataset + self._dataset_kind = loader._dataset_kind + self._auto_collation = loader._auto_collation + self._drop_last = loader.drop_last + self._index_sampler = loader._index_sampler + self._num_workers = loader.num_workers + self._prefetch_factor = loader.prefetch_factor + self._pin_memory = loader.pin_memory and torch.cuda.is_available() + self._timeout = loader.timeout + self._collate_fn = loader.collate_fn + self._sampler_iter = iter(self._index_sampler) + self._base_seed = torch.empty((), dtype=torch.int64).random_().item() + self._persistent_workers = loader.persistent_workers + + def __iter__(self): + return self + + def _reset(self, loader, first_iter=False): + self._sampler_iter = iter(self._index_sampler) + + def _next_index(self): + return next(self._sampler_iter) # may raise StopIteration + + def __next__(self): + raise NotImplementedError + + def __len__(self): + return len(self._index_sampler) + + def __getstate__(self): + # TODO: add limited pickling support for sharing an iterator + # across multiple threads for HOGWILD. + # Probably the best way to do this is by moving the sample pushing + # to a separate thread and then just sharing the data queue + # but signalling the end is tricky without a non-blocking API + raise NotImplementedError( + "{} cannot be pickled", self.__class__.__name__) + + +class _SingleProcessDataLoaderIter(_BaseDataLoaderIter): + def __init__(self, loader): + super(_SingleProcessDataLoaderIter, self).__init__(loader) + assert self._timeout == 0 + assert self._num_workers == 0 + + self._dataset_fetcher = _DatasetKind.create_fetcher( + self._dataset_kind, self._dataset, self._auto_collation, self._collate_fn, self._drop_last) + + def __next__(self): + index = self._next_index() # may raise StopIteration + data = self._dataset_fetcher.fetch(index) # may raise StopIteration + if self._pin_memory: + data = _utils.pin_memory.pin_memory(data) + return data + + next = __next__ # Python 2 compatibility + + +class _MultiProcessingDataLoaderIter(_BaseDataLoaderIter): + + def __init__(self, loader): + super(_MultiProcessingDataLoaderIter, self).__init__(loader) + + assert self._num_workers > 0 + assert self._prefetch_factor > 0 + + if loader.multiprocessing_context is None: + multiprocessing_context = multiprocessing + else: + multiprocessing_context = loader.multiprocessing_context + + self._worker_init_fn = loader.worker_init_fn + self._worker_queue_idx_cycle = itertools.cycle( + range(self._num_workers)) + self._worker_result_queue = multiprocessing_context.Queue() + self._worker_pids_set = False + self._shutdown = False + self._workers_done_event = multiprocessing_context.Event() + + self._index_queues = [] + self._workers = [] + for i in range(self._num_workers): + index_queue = multiprocessing_context.Queue() + index_queue.cancel_join_thread() + w = multiprocessing_context.Process( + target=_worker_loop, + args=(self._dataset_kind, self._dataset, index_queue, + self._worker_result_queue, self._workers_done_event, + self._auto_collation, self._collate_fn, self._drop_last, + self._base_seed + i, self._worker_init_fn, i, self._num_workers, + self._persistent_workers)) + w.daemon = True + # NB: Process.start() actually take some time as it needs to + # start a process and pass the arguments over via a pipe. + # Therefore, we only add a worker to self._workers list after + # it started, so that we do not call .join() if program dies + # before it starts, and __del__ tries to join but will get: + # AssertionError: can only join a started process. + w.start() + self._index_queues.append(index_queue) + self._workers.append(w) + + if self._pin_memory: + self._pin_memory_thread_done_event = threading.Event() + self._data_queue = queue.Queue() + pin_memory_thread = threading.Thread( + target=_pin_memory_loop, + args=(self._worker_result_queue, self._data_queue, + torch.cuda.current_device(), + self._pin_memory_thread_done_event)) + pin_memory_thread.daemon = True + pin_memory_thread.start() + # Similar to workers (see comment above), we only register + # pin_memory_thread once it is started. + self._pin_memory_thread = pin_memory_thread + else: + self._data_queue = self._worker_result_queue + + _utils.signal_handling._set_worker_pids( + id(self), tuple(w.pid for w in self._workers)) + _utils.signal_handling._set_SIGCHLD_handler() + self._worker_pids_set = True + self._reset(loader, first_iter=True) + + def _reset(self, loader, first_iter=False): + super()._reset(loader, first_iter) + self._send_idx = 0 # idx of the next task to be sent to workers + self._rcvd_idx = 0 # idx of the next task to be returned in __next__ + # information about data not yet yielded, i.e., tasks w/ indices in range [rcvd_idx, send_idx). + # map: task idx => - (worker_id,) if data isn't fetched (outstanding) + # \ (worker_id, data) if data is already fetched (out-of-order) + self._task_info = {} + # always equal to count(v for v in task_info.values() if len(v) == 1) + self._tasks_outstanding = 0 + # A list of booleans representing whether each worker still has work to + # do, i.e., not having exhausted its iterable dataset object. It always + # contains all `True`s if not using an iterable-style dataset + # (i.e., if kind != Iterable). + # Not that this indicates that a worker still has work to do *for this epoch*. + # It does not mean that a worker is dead. In case of `_persistent_workers`, + # the worker will be reset to available in the next epoch. + self._workers_status = [True for i in range(self._num_workers)] + # We resume the prefetching in case it was enabled + if not first_iter: + for idx in range(self._num_workers): + self._index_queues[idx].put(_ResumeIteration()) + resume_iteration_cnt = self._num_workers + while resume_iteration_cnt > 0: + data = self._get_data() + if isinstance(data, _ResumeIteration): + resume_iteration_cnt -= 1 + # prime the prefetch loop + for _ in range(self._prefetch_factor * self._num_workers): + self._try_put_index() + + def _try_get_data(self, timeout=_utils.MP_STATUS_CHECK_INTERVAL): + # Tries to fetch data from `self._data_queue` once for a given timeout. + # This can also be used as inner loop of fetching without timeout, with + # the sender status as the loop condition. + # + # This raises a `RuntimeError` if any worker died expectedly. This error + # can come from either the SIGCHLD handler in `_utils/signal_handling.py` + # (only for non-Windows platforms), or the manual check below on errors + # and timeouts. + # + # Returns a 2-tuple: + # (bool: whether successfully get data, any: data if successful else None) + try: + data = self._data_queue.get(timeout=timeout) + return (True, data) + except Exception as e: + # At timeout and error, we manually check whether any worker has + # failed. Note that this is the only mechanism for Windows to detect + # worker failures. + failed_workers = [] + for worker_id, w in enumerate(self._workers): + if self._workers_status[worker_id] and not w.is_alive(): + failed_workers.append(w) + self._mark_worker_as_unavailable(worker_id) + if len(failed_workers) > 0: + pids_str = ', '.join(str(w.pid) for w in failed_workers) + raise RuntimeError( + 'DataLoader worker (pid(s) {}) exited unexpectedly'.format(pids_str)) from e + if isinstance(e, queue.Empty): + return (False, None) + raise + + def _get_data(self): + # Fetches data from `self._data_queue`. + # + # We check workers' status every `MP_STATUS_CHECK_INTERVAL` seconds, + # which we achieve by running `self._try_get_data(timeout=MP_STATUS_CHECK_INTERVAL)` + # in a loop. This is the only mechanism to detect worker failures for + # Windows. For other platforms, a SIGCHLD handler is also used for + # worker failure detection. + # + # If `pin_memory=True`, we also need check if `pin_memory_thread` had + # died at timeouts. + if self._timeout > 0: + success, data = self._try_get_data(self._timeout) + if success: + return data + else: + raise RuntimeError( + 'DataLoader timed out after {} seconds'.format(self._timeout)) + elif self._pin_memory: + while self._pin_memory_thread.is_alive(): + success, data = self._try_get_data() + if success: + return data + else: + # while condition is false, i.e., pin_memory_thread died. + raise RuntimeError('Pin memory thread exited unexpectedly') + # In this case, `self._data_queue` is a `queue.Queue`,. But we don't + # need to call `.task_done()` because we don't use `.join()`. + else: + while True: + success, data = self._try_get_data() + if success: + return data + + @profileit + def __next__(self): + while True: + # If the worker responsible for `self._rcvd_idx` has already ended + # and was unable to fulfill this task (due to exhausting an `IterableDataset`), + # we try to advance `self._rcvd_idx` to find the next valid index. + # + # This part needs to run in the loop because both the `self._get_data()` + # call and `_IterableDatasetStopIteration` check below can mark + # extra worker(s) as dead. + while self._rcvd_idx < self._send_idx: + info = self._task_info[self._rcvd_idx] + worker_id = info[0] + # has data or is still active + if len(info) == 2 or self._workers_status[worker_id]: + break + del self._task_info[self._rcvd_idx] + self._rcvd_idx += 1 + else: + # no valid `self._rcvd_idx` is found (i.e., didn't break) + if not self._persistent_workers: + self._shutdown_workers() + raise StopIteration + + # Now `self._rcvd_idx` is the batch index we want to fetch + + # Check if the next sample has already been generated + if len(self._task_info[self._rcvd_idx]) == 2: + data = self._task_info.pop(self._rcvd_idx)[1] + return self._process_data(data) + + assert not self._shutdown and self._tasks_outstanding > 0 + idx, data = self._get_data() + self._tasks_outstanding -= 1 + + if self._dataset_kind == _DatasetKind.Iterable: + # Check for _IterableDatasetStopIteration + if isinstance(data, _utils.worker._IterableDatasetStopIteration): + if self._persistent_workers: + self._workers_status[data.worker_id] = False + else: + self._mark_worker_as_unavailable(data.worker_id) + self._try_put_index() + continue + + if idx != self._rcvd_idx: + # store out-of-order samples + self._task_info[idx] += (data,) + else: + del self._task_info[idx] + return self._process_data(data) + + next = __next__ # Python 2 compatibility + + def _try_put_index(self): + assert self._tasks_outstanding < self._prefetch_factor * self._num_workers + + try: + index = self._next_index() + except StopIteration: + return + for _ in range(self._num_workers): # find the next active worker, if any + worker_queue_idx = next(self._worker_queue_idx_cycle) + if self._workers_status[worker_queue_idx]: + break + else: + # not found (i.e., didn't break) + return + + self._index_queues[worker_queue_idx].put((self._send_idx, index)) + self._task_info[self._send_idx] = (worker_queue_idx,) + self._tasks_outstanding += 1 + self._send_idx += 1 + + def _process_data(self, data): + self._rcvd_idx += 1 + self._try_put_index() + if isinstance(data, ExceptionWrapper): + data.reraise() + return data + + def _mark_worker_as_unavailable(self, worker_id, shutdown=False): + # Mark a worker as having finished its work e.g., due to + # exhausting an `IterableDataset`. This should be used only when this + # `_MultiProcessingDataLoaderIter` is going to continue running. + + assert self._workers_status[worker_id] or ( + self._persistent_workers and shutdown) + + # Signal termination to that specific worker. + q = self._index_queues[worker_id] + # Indicate that no more data will be put on this queue by the current + # process. + q.put(None) + + # Note that we don't actually join the worker here, nor do we remove the + # worker's pid from C side struct because (1) joining may be slow, and + # (2) since we don't join, the worker may still raise error, and we + # prefer capturing those, rather than ignoring them, even though they + # are raised after the worker has finished its job. + # Joinning is deferred to `_shutdown_workers`, which it is called when + # all workers finish their jobs (e.g., `IterableDataset` replicas) or + # when this iterator is garbage collected. + + self._workers_status[worker_id] = False + + assert self._workers_done_event.is_set() == shutdown + + def _shutdown_workers(self): + # Called when shutting down this `_MultiProcessingDataLoaderIter`. + # See NOTE [ Data Loader Multiprocessing Shutdown Logic ] for details on + # the logic of this function. + python_exit_status = _utils.python_exit_status + if python_exit_status is True or python_exit_status is None: + # See (2) of the note. If Python is shutting down, do no-op. + return + # Normal exit when last reference is gone / iterator is depleted. + # See (1) and the second half of the note. + if not self._shutdown: + self._shutdown = True + try: + # Exit `pin_memory_thread` first because exiting workers may leave + # corrupted data in `worker_result_queue` which `pin_memory_thread` + # reads from. + if hasattr(self, '_pin_memory_thread'): + # Use hasattr in case error happens before we set the attribute. + self._pin_memory_thread_done_event.set() + # Send something to pin_memory_thread in case it is waiting + # so that it can wake up and check `pin_memory_thread_done_event` + self._worker_result_queue.put((None, None)) + self._pin_memory_thread.join() + self._worker_result_queue.cancel_join_thread() + self._worker_result_queue.close() + + # Exit workers now. + self._workers_done_event.set() + for worker_id in range(len(self._workers)): + # Get number of workers from `len(self._workers)` instead of + # `self._num_workers` in case we error before starting all + # workers. + # If we are using workers_status with persistent_workers + # we have to shut it down because the worker is paused + if self._persistent_workers or self._workers_status[worker_id]: + self._mark_worker_as_unavailable( + worker_id, shutdown=True) + for w in self._workers: + w.join() + for q in self._index_queues: + q.cancel_join_thread() + q.close() + finally: + # Even though all this function does is putting into queues that + # we have called `cancel_join_thread` on, weird things can + # happen when a worker is killed by a signal, e.g., hanging in + # `Event.set()`. So we need to guard this with SIGCHLD handler, + # and remove pids from the C side data structure only at the + # end. + # + # FIXME: Unfortunately, for Windows, we are missing a worker + # error detection mechanism here in this function, as it + # doesn't provide a SIGCHLD handler. + if self._worker_pids_set: + _utils.signal_handling._remove_worker_pids(id(self)) + self._worker_pids_set = False + + for w in self._workers: + if w.is_alive(): + # Existing mechanisms try to make the workers exit + # peacefully, but in case that we unfortunately reach + # here, which we shouldn't, (e.g., pytorch/pytorch#39570), + # we kill the worker. + w.terminate() + + def __del__(self): + self._shutdown_workers() diff --git a/petrel_client/utils/profile/__init__.py b/petrel_client/utils/profile/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8f0179fe2775390c294fbfb56625acd36a1a2bc9 --- /dev/null +++ b/petrel_client/utils/profile/__init__.py @@ -0,0 +1 @@ +from .profile import profileit, wrap_with_stat_qsize, WORKER_LOOP_PROFILE_COUNT # noqa: F401 diff --git a/petrel_client/utils/profile/profile.py b/petrel_client/utils/profile/profile.py new file mode 100644 index 0000000000000000000000000000000000000000..3f7cd6c2a964ac7f89d49efc3a989f47f370ef55 --- /dev/null +++ b/petrel_client/utils/profile/profile.py @@ -0,0 +1,105 @@ +import cProfile +import pstats +import io +import functools +import os +from collections import defaultdict +from distutils.util import strtobool + +PETREL_PROFILE_ENV = os.getenv('PETREL_PROFILE', 'False') +try: + ENABLE_PROFILE = strtobool(PETREL_PROFILE_ENV) +except ValueError: + raise ValueError( + f'invalid value of environment variable PETREL_PROFILE: {PETREL_PROFILE_ENV}') + +PROFILE_COUNT_ENV = os.getenv('PETREL_PROFILE_COUNT', 1000) +try: + + PROFILE_COUNT = int(PROFILE_COUNT_ENV) +except ValueError: + raise ValueError( + f'invalid value of environment variable PETREL_PROFILE_COUNT: {PROFILE_COUNT_ENV}') + +WORKER_LOOP_PROFILE_COUNT_ENV = os.getenv( + 'PETREL_WORKER_LOOP_PROFILE_COUNT', 250) +try: + + WORKER_LOOP_PROFILE_COUNT = int(WORKER_LOOP_PROFILE_COUNT_ENV) +except ValueError: + raise ValueError( + f'invalid value of environment variable PETREL_WORKER_LOOP_PROFILE_COUNT: {WORKER_LOOP_PROFILE_COUNT_ENV}') + + +def print_stats(prof, name, sortby='cumulative'): + s = io.StringIO() + if name: + s.write(f'\nProfile of function {name}:\n') + s.write(f'pid: {os.getpid()}\n') + ps = pstats.Stats(prof, stream=s).sort_stats(sortby) + ps.print_stats() + print(s.getvalue()) + + +def profile_helper(func, name, count): + if not ENABLE_PROFILE: + return func + + prof = cProfile.Profile() + call_count = 0 + + if not name: + try: + name = func.__name__ + except AttributeError: + pass + + @functools.wraps(func) + def wrapper(*args, **kwargs): + nonlocal prof + nonlocal call_count + try: + return prof.runcall(func, *args, **kwargs) + finally: + call_count += 1 + if call_count == count: + print_stats(prof, name) + call_count = 0 + prof = cProfile.Profile() + + return wrapper + + +def profileit(*args, name=None, count=PROFILE_COUNT): + if args: + assert len(args) == 1 and callable(args[0]) + return profile_helper(args[0], name, count) + else: + return functools.partial(profileit, name=name, count=count) + + +def wrap_with_stat_qsize(queue, cb, name, count=PROFILE_COUNT): + if not ENABLE_PROFILE: + return cb + + cb_count = 0 + qsize_dict = defaultdict(lambda: 0) + qsize_list = [] + + @functools.wraps(cb) + def wrapper(*args, **kwargs): + nonlocal cb_count + cb_count += 1 + qsize = queue.qsize() + qsize_dict[qsize] += 1 + qsize_list.append(qsize) + try: + return cb(*args, **kwargs) + finally: + if cb_count == count: + print('pid', os.getpid(), name, qsize_dict, '\n', qsize_list) + cb_count = 0 + qsize_dict.clear() + qsize_list.clear() + + return wrapper diff --git a/scripts/README.md b/scripts/README.md new file mode 100644 index 0000000000000000000000000000000000000000..0faa403d8bc663d6bc1cbbe8f8c351f099d2c2b1 --- /dev/null +++ b/scripts/README.md @@ -0,0 +1,2 @@ +get_serverlist.py: generate the above configure file; run on the master node of a cluster; + diff --git a/scripts/get_serverlist.py b/scripts/get_serverlist.py new file mode 100644 index 0000000000000000000000000000000000000000..056ab0b8271a868c7cf47ff1881c1ecdeca249ea --- /dev/null +++ b/scripts/get_serverlist.py @@ -0,0 +1,27 @@ +import commands +import numpy as np + +masters = ['10.10.11.22', '10.10.12.145', '10.10.20.21', '10.1.72.101'] + +def parse(string): + global masters + result = [] + for line in string.split('\n'): + fields = line.split('-') + ip = '.'.join(fields[2:]) + if ip not in masters: + result.append(ip) + return result + + +if __name__ == '__main__': + output = commands.getoutput("scontrol show node | grep NodeName | awk \'{print$1}\' | awk -F = \'{print$2}\'") + print 'the set of machines in this cluster is:' + print output + serverlist = parse(output) + print 'transfer into IP address:' + print serverlist + with open('server_list.conf', 'w') as wf: + np.savetxt(wf, serverlist, delimiter='\n', fmt='%s') + wf.close() + print 'IP addresses have been written into server_list.conf' diff --git a/scripts/server_list.conf b/scripts/server_list.conf new file mode 100644 index 0000000000000000000000000000000000000000..5ecc7872cbefcdc6e1a8138d75061410a5750c38 --- /dev/null +++ b/scripts/server_list.conf @@ -0,0 +1,260 @@ +10.5.30.31 +10.5.30.32 +10.5.30.33 +10.5.30.34 +10.5.30.35 +10.5.30.36 +10.5.30.37 +10.5.30.38 +10.5.30.39 +10.5.30.40 +10.5.30.41 +10.5.30.42 +10.5.30.43 +10.5.30.44 +10.5.30.45 +10.5.30.46 +10.5.30.47 +10.5.30.48 +10.5.30.49 +10.5.30.50 +10.5.30.51 +10.5.30.52 +10.5.30.53 +10.5.30.54 +10.5.30.55 +10.5.30.56 +10.5.30.57 +10.5.30.58 +10.5.30.59 +10.5.30.60 +10.5.30.61 +10.5.30.62 +10.5.30.63 +10.5.30.64 +10.5.30.65 +10.5.30.66 +10.5.30.67 +10.5.30.68 +10.5.30.69 +10.5.30.70 +10.5.30.71 +10.5.30.72 +10.5.30.73 +10.5.30.74 +10.5.30.75 +10.5.30.76 +10.5.30.77 +10.5.30.78 +10.5.30.79 +10.5.30.80 +10.5.30.81 +10.5.30.82 +10.5.30.83 +10.5.30.84 +10.5.30.85 +10.5.30.86 +10.5.30.87 +10.5.30.88 +10.5.30.89 +10.5.30.90 +10.5.30.91 +10.5.30.92 +10.5.30.93 +10.5.30.94 +10.5.30.95 +10.5.30.96 +10.5.30.97 +10.5.30.98 +10.5.30.99 +10.5.30.100 +10.5.30.101 +10.5.30.102 +10.5.30.103 +10.5.30.104 +10.5.30.105 +10.5.30.106 +10.5.30.107 +10.5.30.108 +10.5.30.109 +10.5.30.110 +10.5.30.111 +10.5.30.112 +10.5.30.113 +10.5.30.114 +10.5.30.115 +10.5.30.116 +10.5.30.117 +10.5.30.118 +10.5.30.119 +10.5.30.120 +10.5.30.121 +10.5.30.122 +10.5.30.123 +10.5.30.124 +10.5.30.125 +10.5.30.126 +10.5.30.127 +10.5.30.128 +10.5.30.129 +10.5.30.130 +10.5.30.131 +10.5.30.132 +10.5.30.133 +10.5.30.134 +10.5.30.135 +10.5.30.136 +10.5.30.137 +10.5.30.138 +10.5.30.139 +10.5.30.140 +10.5.30.141 +10.5.30.142 +10.5.30.143 +10.5.30.144 +10.5.30.145 +10.5.30.146 +10.5.30.147 +10.5.30.148 +10.5.30.149 +10.5.30.150 +10.5.30.151 +10.5.30.152 +10.5.30.153 +10.5.30.154 +10.5.30.155 +10.5.30.156 +10.5.30.157 +10.5.30.158 +10.5.30.159 +10.5.30.160 +10.5.30.161 +10.5.30.162 +10.5.30.163 +10.5.30.164 +10.5.30.165 +10.5.30.166 +10.5.30.167 +10.5.30.168 +10.5.30.169 +10.5.30.170 +10.5.30.171 +10.5.30.172 +10.5.30.173 +10.5.30.174 +10.5.30.175 +10.5.30.176 +10.5.30.177 +10.5.30.178 +10.5.30.179 +10.5.30.180 +10.5.30.181 +10.5.30.182 +10.5.30.183 +10.5.30.184 +10.5.30.185 +10.5.30.186 +10.5.30.187 +10.5.30.188 +10.5.30.189 +10.5.30.190 +10.5.30.191 +10.5.30.192 +10.5.30.193 +10.5.30.194 +10.5.30.195 +10.5.30.196 +10.5.30.197 +10.5.30.198 +10.5.30.199 +10.5.30.200 +10.5.30.201 +10.5.30.202 +10.5.30.203 +10.5.30.204 +10.5.30.205 +10.5.30.206 +10.5.30.207 +10.5.30.208 +10.5.30.209 +10.5.30.210 +10.5.30.211 +10.5.30.212 +10.5.30.213 +10.5.30.214 +10.5.30.215 +10.5.30.216 +10.5.30.217 +10.5.30.218 +10.5.30.219 +10.5.30.220 +10.5.30.221 +10.5.30.222 +10.5.30.223 +10.5.30.224 +10.5.30.225 +10.5.30.226 +10.5.30.227 +10.5.30.228 +10.5.30.229 +10.5.31.1 +10.5.31.2 +10.5.31.3 +10.5.31.4 +10.5.31.5 +10.5.31.6 +10.5.31.7 +10.5.31.8 +10.5.31.9 +10.5.31.10 +10.5.31.11 +10.5.31.12 +10.5.31.13 +10.5.31.14 +10.5.31.15 +10.5.31.16 +10.5.31.17 +10.5.31.18 +10.5.31.19 +10.5.31.20 +10.5.31.21 +10.5.31.22 +10.5.31.23 +10.5.31.24 +10.5.31.25 +10.5.31.26 +10.5.31.27 +10.5.31.28 +10.5.31.29 +10.5.31.30 +10.5.31.31 +10.5.31.32 +10.5.31.33 +10.5.31.34 +10.5.31.35 +10.5.31.36 +10.5.31.37 +10.5.31.38 +10.5.31.39 +10.5.31.40 +10.5.31.41 +10.5.31.42 +10.5.31.43 +10.5.31.44 +10.5.31.45 +10.5.31.46 +10.5.31.47 +10.5.31.48 +10.5.31.49 +10.5.31.50 +10.5.31.51 +10.5.31.52 +10.5.31.53 +10.5.31.54 +10.5.31.55 +10.5.31.56 +10.5.31.84 +10.5.31.85 +10.5.31.86 +10.5.31.87 +10.5.31.88 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..2eff7368e77f4e95ce05bbe902a42cb6d27ea02e --- /dev/null +++ b/setup.py @@ -0,0 +1,37 @@ +import setuptools +import subprocess +import os +import shutil + +try: + git_describe = subprocess.check_output( + ['git', 'describe', '--tags', '--long']).decode('utf-8').strip() + git_branch = subprocess.check_output( + ['git', 'rev-parse', '--abbrev-ref', 'HEAD']).decode('utf-8').strip() + version = f'{git_describe}-{git_branch}' + + with open('petrel_client/version.py', 'w') as f: + f.write(f"version = '{version}'\n") + f.truncate() +except Exception: + from importlib.machinery import SourceFileLoader + version_module = SourceFileLoader( + 'version_module', 'petrel_client/version.py').load_module() + version = version_module.version + +dist_path = 'dist' +if os.path.exists(dist_path): + shutil.rmtree(dist_path) + +setuptools.setup( + name='petrel-oss-sdk', + version=version, + description='Ceph S3 storage API for Pytorch, Parrots', + url="http://gitlab.bj.sensetime.com/platform/StorageSystem/petrel-oss-python-sdk", + packages=setuptools.find_packages(), + package_data={'': ['**/*.so']}, + install_requires=['boto3', 'environs', 'coloredlogs', + 'humanize', 'multiprocessing-logging'], + python_requires='>=3.6', + zip_safe=False, +) diff --git a/tests/common_util.py b/tests/common_util.py new file mode 100644 index 0000000000000000000000000000000000000000..940733e0193ad8ba48b90799a1e1d48270d15815 --- /dev/null +++ b/tests/common_util.py @@ -0,0 +1,63 @@ +import sys +import argparse +import os +import unittest +import subprocess + +test_dir = os.path.dirname(os.path.realpath(__file__)) + +parser = argparse.ArgumentParser(add_help=False) +parser.add_argument('-s', '--save-result', nargs='?', type=str, default=None) +args, remaining = parser.parse_known_args() + +UNITTEST_ARGS = [sys.argv[0]] + remaining + + +def wait_for_process(p): + try: + return p.wait() + except KeyboardInterrupt: + # Give `p` a chance to handle KeyboardInterrupt. Without this, + # `pytest` can't print errors it collected so far upon KeyboardInterrupt. + exit_status = p.wait(timeout=5) + if exit_status is not None: + return exit_status + else: + p.kill() + raise + except: # noqa E722, copied from python core library + p.kill() + raise + finally: + # Always call p.wait() to ensure exit + p.wait() + + +def shell(command, cwd=None, env=None): + sys.stdout.flush() + sys.stderr.flush() + # The following cool snippet is copied from Py3 core library subprocess.call + # only the with + # 1. `except KeyboardInterrupt` block added for SIGINT handling. + # 2. In Py2, subprocess.Popen doesn't return a context manager, so we do + # `p.wait()` in a `final` block for the code to be portable. + # + # https://github.com/python/cpython/blob/71b6c1af727fbe13525fb734568057d78cea33f3/Lib/subprocess.py#L309-L323 + # assert not isinstance(command, torch._six.string_classes), "Command to shell should be a list or tuple of tokens" + p = subprocess.Popen(command, universal_newlines=True, cwd=cwd, env=env) + return wait_for_process(p) + + +def run_test(argv=UNITTEST_ARGS): + if args.save_result is not None: + test_report_path = test_dir + "/" + args.save_result + with open(test_report_path, "a") as report_file: + runner = unittest.TextTestRunner(stream=report_file, verbosity=2) + unittest.main(argv=argv, testRunner=runner) + else: + runner = unittest.TextTestRunner(verbosity=2) + unittest.main(argv=argv, testRunner=runner) + + +if __name__ == "__main__": + run_test() diff --git a/tests/conf/petreloss.conf b/tests/conf/petreloss.conf new file mode 100644 index 0000000000000000000000000000000000000000..d3734b87c2af0df16d13b831addf673a0d495f2a --- /dev/null +++ b/tests/conf/petreloss.conf @@ -0,0 +1,87 @@ +# 注释以 ’#‘ 或 ‘;’ 开头,单独占一行,不能和配置内容在同一行 + +[DEFAULT] + +# 启用 Memcached, 默认 False +# enable_mc = True + +# Memcached 相关配置,默认情况下无需设置 +# mc_server_list_path = /mnt/lustre/share/memcached_client/server_list.conf +# mc_client_config_path = /mnt/lustre/share/memcached_client/client.conf + +# console log 级别,默认 WARNING, 选项有 CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +# 若需要在 console 输出 IO 统计信息,需要设置级别为 INFO +# console_log_level = WARNING + +# file log 级别,默认 DEBUG, 选项有 CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +# file_log_level = DEBUG + +# log 文件路径,默认 无 ,即不输出 log 文件 +# log_file_path = /tmp/petrel.log + +# log 文件最大长度,默认 1GB +# file_log_max_bytes = 1073741824 + +# log 文件备份数目,默认 1 +# file_log_backup_count = 1 + +# 每隔 count_disp 次 get 操作后,日志记录 IO 统计信息。默认值 5000 +# 如果 IO 统计信息输出过于频繁,可将该数值增大 +# 如果需要关闭 IO 统计信,可将该数值设置为 0 +# count_disp = 5000 + +# 内存统计,默认关闭 +# enable_mem_trace = False + +# get 操作失败后,允许重试的次数,默认 10 +# get_retry_max = 10 + +# 默认 cluster,即当访问 Ceph 没有指定 cluster 时,从 default_cluster 获取数据 +default_cluster = cluster1 + +[mc] +# 若访问的路径过长(超过250个字节),mc 将出现 McKeySizeExceed 错误。 +# 配置 mc_key_cb 可将传给 mc 的路径进行转换,可选的参数有: +# blake2b, blake2s, md5, pbkdf2_hmac, sha1, sha224, +# sha256, sha384, sha3_224, sha3_256, sha3_384, +# sha3_512, sha512, shake_128, shake_256 + +# mc_key_cb = sha256 + + +# 是否输出 mc debug log,默认 True +# 注意最终是否输出到 console 和 file 分别还需要由 console_log_level 和 file_log_level 决定 +# debug_mc = True + + +[dfs] +enable_mc = True + +# 至少需要配置一个 cluster ,否则将出现 ConfigSectionNotFoundError +[cluster1] +# 对于每个集群的具体配置,如果没有指定,则以[DEFAULT]作为取值 +# 例如在此处设置 ‘enable_mc = False’ 将覆盖默认配置 +enable_mc = True + +# 启用 S3 boto,默认 True +# boto = c++ 将启用 c++ 版本实现的 S3 +boto = True + +# 若不设置 access_key 和 secret_key,将以 anonymous 账户访问数据 +access_key = lili1 +secret_key = lili1 + +host_base = http://127.0.0.1:7480 + +# 若 boto = False ,则需要增加以下配置 +# conf = conf/ceph.conf +# keyring = conf/keyring +# name = client.rgw.train +# cluster = ceph + +[cluster2] + +access_key = lili1 +secret_key = lili1 + +host_base = http://127.0.0.1:7480 \ No newline at end of file diff --git a/tests/conf/test_empty.conf b/tests/conf/test_empty.conf new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/tests/conf/test_petreloss.conf b/tests/conf/test_petreloss.conf new file mode 100644 index 0000000000000000000000000000000000000000..43411fda698e69526d29ed49a503b7b0d1179486 --- /dev/null +++ b/tests/conf/test_petreloss.conf @@ -0,0 +1,86 @@ +# 注释以 ’#‘ 或 ‘;’ 开头,单独占一行,不能和配置内容在同一行 + +[default] + +# 启用 Memcached, 默认 False +# enable_mc = True + +# Memcached 相关配置,默认情况下无需设置 +# mc_server_list_path = /mnt/lustre/share/memcached_client/server_list.conf +# mc_client_config_path = /mnt/lustre/share/memcached_client/client.conf + +# console log 级别,默认 WARNING, 选项有 CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +# 若需要在 console 输出 IO 统计信息,需要设置级别为 INFO +# console_log_level = WARNING + +# file log 级别,默认 DEBUG, 选项有 CRITICAL, ERROR, WARNING, INFO, DEBUG, NOTSET +# file_log_level = DEBUG + +# log 文件路径,默认 无 ,即不输出 log 文件 +# log_file_path = /tmp/petrel.log + +# log 文件最大长度,默认 1GB +# file_log_max_bytes = 1073741824 + +# log 文件备份数目,默认 1 +# file_log_backup_count = 1 + +# 每隔 count_disp 次 get 操作后,日志记录 IO 统计信息。默认值 5000 +# 如果 IO 统计信息输出过于频繁,可将该数值增大 +# 如果需要关闭 IO 统计信,可将该数值设置为 0 +# count_disp = 5000 + +# 内存统计,默认关闭 +# enable_mem_trace = False + +# get 操作失败后,允许重试的次数,默认 10 +# get_retry_max = 10 + +# 默认 cluster,即当访问 Ceph 没有指定 cluster 时,从 default_cluster 获取数据 +default_cluster = cluster1 + +[mc] +# 若访问的路径过长(超过250个字节),mc 将出现 McKeySizeExceed 错误。 +# 配置 mc_key_cb 可将传给 mc 的路径进行转换,可选的参数有: +# blake2b, blake2s, md5, pbkdf2_hmac, sha1, sha224, +# sha256, sha384, sha3_224, sha3_256, sha3_384, +# sha3_512, sha512, shake_128, shake_256 + +# mc_key_cb = sha256 + + +# 是否输出 mc debug log,默认 True +# 注意最终是否输出到 console 和 file 分别还需要由 console_log_level 和 file_log_level 决定 +# debug_mc = True + + +[dfs] +enable_mc = True + +# 至少需要配置一个 cluster ,否则将出现 ConfigSectionNotFoundError +[cluster1] +# 对于每个集群的具体配置,如果没有指定,则以[DEFAULT]作为取值 +# 例如在此处设置 ‘enable_mc = False’ 将覆盖默认配置 +enable_mc = True + +# 启用 S3 boto,默认 True +# boto = c++ 将启用 c++ 版本实现的 S3 +boto = True + +# 若不设置 access_key 和 secret_key,将以 anonymous 账户访问数据 +access_key = lili1 +secret_key = lili1 + +host_base = http://127.0.0.1:7480 +# 若 boto = False ,则需要增加以下配置 +# conf = conf/ceph.conf +# keyring = conf/keyring +# name = client.rgw.train +# cluster = ceph + +[cluster2] + +access_key = lili1 +secret_key = lili1 + +host_base = http://127.0.0.1:7480 diff --git a/tests/dataloader_test.py b/tests/dataloader_test.py new file mode 100644 index 0000000000000000000000000000000000000000..46c88fc599ea6b89651ef57f5cef75b11f409053 --- /dev/null +++ b/tests/dataloader_test.py @@ -0,0 +1,116 @@ +import torch +from torch.utils.data import get_worker_info +from torch.utils.data import DataLoader + +import random +import time + +from functools import partial + +from itertools import chain + + +from petrel_client.utils.data import DataLoader as MyDataLoader +MyDataLoader = partial(MyDataLoader, prefetch_factor=4, persistent_workers=True) + + +def assert_equal(lhs, rhs): + if isinstance(lhs, dict): + assert lhs.keys() == rhs.keys() + for k in lhs.keys(): + assert_equal(lhs[k], rhs[k]) + elif isinstance(lhs, list): + assert len(lhs) == len(rhs) + for i in range(len(lhs)): + assert_equal(lhs[i], rhs[i]) + elif isinstance(lhs, torch.Tensor): + assert torch.equal(lhs, rhs) + else: + assert False + + +def wait(dt): + time.sleep(dt) + + +class Dataset(list): + def __init__(self, *args, **kwargs): + super(Dataset, self).__init__(*args, **kwargs) + self._seed_inited = False + + def __getitem__(self, *args, **kwargs): + worker_info = get_worker_info() + if not self._seed_inited: + if worker_info is None: + random.seed(0) + else: + random.seed(worker_info.id) + self._seed_inited = True + rand_int = random.randint(1, 4) + time_to_sleep = rand_int * 0.05 + if worker_info is not None and worker_info.id == 0: + time_to_sleep *= 2 + wait(time_to_sleep) + val = super(Dataset, self).__getitem__(*args, **kwargs) + return {'val': val} + + +def test(dataloader, result): + print('\ntest') + random.seed(0) + data_time = 0 + tstart = t1 = time.time() + for i, data in enumerate(chain(dataloader, dataloader), 1): + t2 = time.time() + d = t2 - t1 + print('{0:>5}' .format(int((t2 - t1)*1000)), end='') + if i % 10: + print('\t', end='') + else: + print('') + + result.append(data) + + data_time += d + + rand_int = random.randrange(1, 4) + wait(0.05 * rand_int) + + t1 = time.time() + tend = time.time() + print('\ntotal time: %.3f' % (tend - tstart)) + print('total data time: %.3f' % data_time) + print(type(dataloader)) + + +def worker_init_fn(worker_id): + print('start worker:', worker_id) + wait(3) + + +dataloader_args = { + 'dataset': Dataset(range(1024)), + 'drop_last': False, + 'shuffle': False, + 'batch_size': 32, + 'num_workers': 8, + 'worker_init_fn': worker_init_fn, +} + + +torch.manual_seed(0) +l2 = MyDataLoader(**dataloader_args) +r2 = [] +test(l2, r2) + +torch.manual_seed(0) +l1 = DataLoader(**dataloader_args) +r1 = [] +test(l1, r1) + + +print('len l1:', len(l1)) +print('len l2:', len(l2)) + +assert_equal(r1, r2) +print(torch) diff --git a/tests/etag_test.py b/tests/etag_test.py new file mode 100644 index 0000000000000000000000000000000000000000..4ad6c27a81e0654aaa5b67290923f9916e109018 --- /dev/null +++ b/tests/etag_test.py @@ -0,0 +1,18 @@ +from petrel_client.client import Client +import hashlib + + +client = Client('conf/s3config.ini') +filename = 'cluster1:s3://my-bucket/object.111' +content = b'a' * 1024 +print('put: key len:', len(filename), 'content len:', len(content)) + +client.put(filename, content, update_cache=True) +content, info = client.get_and_update(filename, enable_etag=True) + +print('size:', len(content)) + +digest = hashlib.md5(content).hexdigest() +print('digest:', digest) + +print('etag:', info['etag']) diff --git a/tests/fake_client_test.py b/tests/fake_client_test.py new file mode 100644 index 0000000000000000000000000000000000000000..59e9202beacf71f45130c83eff649c6d22e709d4 --- /dev/null +++ b/tests/fake_client_test.py @@ -0,0 +1,51 @@ +# -*- coding: utf-8 -*- + +from random import random + +from petrel_client.client import Client +from petrel_client.fake_client import FakeClient + + +def customized_get(self, *args, **kwargs): + # type 有 s3、dfs、mc + if self.type == 's3': + # s3 的参数有 cluster, bucket, key + cluster, bucket, key = args + else: + # 剩余类型的参数只包含 key + cluster, bucket, key = 'unknow', 'unknow', args[0] + + if self.type == 'mc' and random() < 0.5: + # 模拟缓存失效 + return None + else: + return b'x' * 1024 + + +def customized_put(self, *args, **kwargs): + if self.type == 's3': + # s3 的参数有 cluster, bucket, key, body + cluster, bucket, key, body = args + else: + # 剩余类型的参数只包含 key, body + cluster, bucket, (key, body) = 'unknow', 'unknow', args + + return len(body) + + +FakeClient.customized_get = customized_get +FakeClient.customized_put = customized_put + +client = Client("~/fake_client.conf") + +urls = [ + 'cluster1:s3://my-bucket/object.1', # 从 cluster1 中读取 + 'cluster2:s3://my-bucket/object.2', # 从 cluster2 中读取 + 's3://my-bucket/object.3', # 若不指定 cluster,则从配置文件中指定的 default_cluster 中读取 + 'file://tmp/xxx', # 从 DFS 中读取 + '/tmp/xxx', # 若不包含 's3:' 或 'file:',从 DFS 中读取 +] + +for _ in range(1000): + for url in urls: + client.get(url) diff --git a/tests/file_iterator_test.py b/tests/file_iterator_test.py new file mode 100644 index 0000000000000000000000000000000000000000..9c90e1b0baf4ed3ee6d5751d2430f37b5d79205f --- /dev/null +++ b/tests/file_iterator_test.py @@ -0,0 +1,40 @@ +from petrel_client.client import Client + +c = Client(conf_path='../conf/petreloss.conf') + + +data = c.get('cluster1:s3://lili1.test2/sometest') +print (data) + + +c.put('s3://lili1.test2/sometest', 'sometest') + +files = c.get_file_iterator('s3://lili1.test2/test3') +files1 = c.get_file_iterator('s3://lili1.test2') +files2 = c.get_file_iterator('cluster1:s3://lili1.test2/') + +cluster = 'cluster1' + +for path, key in files: + k = '{0}:s3://{1}'.format(cluster, path) + print (k) + #c.get(k) + +print ('='*20) + +for path, key in files: + k = '{0}:s3://{1}'.format(cluster, path) + print (k) + #c.get(k) + +print ('='*20) + +for path, key in files: + k = '{0}:s3://{1}'.format(cluster, path) + print (k) + #c.get(k) +for path, key in files1: + print (path, key) + +for path, key in files2: + print (path, key) diff --git a/tests/io_profile_test.py b/tests/io_profile_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ea8e1ede9affb227c57a4373ee560dc4299d5ecc --- /dev/null +++ b/tests/io_profile_test.py @@ -0,0 +1,63 @@ +import logging +import time +import random +from os.path import expanduser, abspath +from petrel_client.client_base import ClientBase +from petrel_client.common.io_profile import profile +from petrel_client.common.exception import ObjectNotFoundError +from petrel_client.common.config import Config +from petrel_client.common.log import init_log +from petrel_client.common.io_profile import Profiler + + +LOG = logging.getLogger(__name__) + + +class Client(ClientBase): + def __init__(self, conf_path, name, count_disp): + conf_path = abspath(expanduser(conf_path)) + config = Config(conf_path) + self._default_config = config.default() + + init_log(self._default_config) + LOG.info('init io_profile_test.Client, conf_path %s', conf_path) + + Profiler.set_default_conf(self._default_config) + super(Client, self).__init__(name=name, count_disp=count_disp) + + @profile('get') + def get(self, key): + + def not_found(): + raise ObjectNotFoundError(key) + + def error(): + raise Exception(key) + + def found(): + return 'content' + + action = random.choice([found, not_found, error]) + time.sleep(0.001) + return action() + + @profile('put') + def put(self, key, content): + def normal(): + return len(content) + + def error(): + raise Exception(key) + + action = random.choice([normal, error]) + return action() + + +c = Client(conf_path='~/petreloss.conf', name='cluster1', count_disp=50) + + +for _ in range(100): + try: + c.get('key') + except Exception: + pass diff --git a/tests/io_retry_test.py b/tests/io_retry_test.py new file mode 100644 index 0000000000000000000000000000000000000000..cf2d1c29d41e5b5d84a50ac2147aa2f296076ed7 --- /dev/null +++ b/tests/io_retry_test.py @@ -0,0 +1,105 @@ +import logging +import time +import random +from os.path import expanduser, abspath + +from petrel_client.client_base import ClientBase +from petrel_client.common.io_profile import profile +from petrel_client.common.io_retry import retry +from petrel_client.common import exception +from petrel_client.common.exception import ObjectNotFoundError, ResourceNotFoundError +from petrel_client.common.config import Config +from petrel_client.common.log import init_log +from petrel_client.common.io_profile import Profiler + + +LOG = logging.getLogger(__name__) + + +class Client(ClientBase): + def __init__(self, conf_path, name, count_disp): + conf_path = abspath(expanduser(conf_path)) + config = Config(conf_path) + self._default_config = config.default() + + init_log(self._default_config) + LOG.info('init io_retry_test.Client, conf_path %s', conf_path) + + Profiler.set_default_conf(self._default_config) + super(Client, self).__init__(name=name, count_disp=count_disp) + + @profile('get') + def get(self, key): + + def not_found(): + raise ObjectNotFoundError(key) + + def resource_not_found(): + raise ResourceNotFoundError() + + def error(): + raise Exception(key) + + def found(): + return 'content' + + action = random.choice([found, not_found, resource_not_found, error]) + time.sleep(0.001) + return action() + + @profile('put') + def put(self, key, content): + def normal(): + return len(content) + + def error(): + raise Exception(key) + + action = random.choice([normal, error]) + return action() + + +class FakeMixedClient(object): + def __init__(self, client): + self.client = client + + def get(self, uri, **kwargs): + @retry('get', exceptions=(Exception,), raises=(exception.ResourceNotFoundError,), tries=3) + def do_get(self, uri, **kwargs): + try: + self.client.get(uri) + except exception.ObjectNotFoundError as err: + LOG.debug(err) + return None + except exception.ResourceNotFoundError as err: + raise + except Exception as err: + raise + + return do_get(self, uri, **kwargs) + + def put(self, uri, content, **kwargs): + @retry('put', exceptions=(Exception,), tries=3) + def do_put(self, uri, content, **kwargs): + try: + self.client.put(uri, content) + except Exception as err: + raise + + return do_put(self, uri, content, **kwargs) + + +c = Client(conf_path='~/petreloss.conf', name='cluster1', count_disp=50) +mc = FakeMixedClient(c) + +for _ in range(50): + try: + mc.get('key') + except Exception: + pass + +for _ in range(50): + try: + mc.put('key', '!@#$%'*10) + except Exception: + pass diff --git a/tests/multi_cluster_test.py b/tests/multi_cluster_test.py new file mode 100644 index 0000000000000000000000000000000000000000..d8d7fa7708c0ee5e550a1e1b4c60a401f4cd1b31 --- /dev/null +++ b/tests/multi_cluster_test.py @@ -0,0 +1,43 @@ +# -*- coding: utf-8 -*- + +from petrel_client.client import Client +from multiprocessing import Process +import logging +import random + +LOG = logging.getLogger('petrel_client.test') + + +def f(conf_path, repeat): + client = Client(conf_path) + total_bytes = 0 + + for itr in range(repeat): + urls = [ + 'cluster1:s3://my-bucket/object.1', # 从 cluster1 中读取 + 'cluster2:s3://my-bucket/object.2', # 从 cluster2 中读取 + 's3://my-bucket/object.3', # 若不指定 cluster,则从配置文件中指定的 default_cluster 中读取 + 'file://tmp/xxx', # 从 DFS 中读取 + '/tmp/xxx', # 若不包含 's3:' 或 'file:',从 DFS 中读取 + ] + url = random.choice(urls) + body = client.get(url) + + if not body: + LOG.warn('can not get content from %s', url) + else: + total_bytes += len(body) + + LOG.debug('total_bytes: %s', total_bytes) + + +conf_path = '~/petreloss.conf' +repeat = 5000 +parallelism = 4 + +process_list = [Process(target=f, args=(conf_path, repeat)) + for _ in range(parallelism)] + + +[p.start() for p in process_list] +[p.join() for p in process_list] diff --git a/tests/multipart_test.py b/tests/multipart_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7ad54a59892f01b0cf34ff480df23be3390f34b1 --- /dev/null +++ b/tests/multipart_test.py @@ -0,0 +1,17 @@ +# 如果上传的文件过大,则需要分片上传 +# put 传入的参数如果实现了 read 接口即可分片上传 +# 例如 open 的返回值和 io.BytesIO 均实现了 read 接口 + +# 上传大文件 +with open("large_file", "rb") as f: + client.put(url, f) + +# 上传 Tensor +with io.BytesIO() as f: + torch.save(data, f) + f.seek(0) + client.put(url, f) + +# 上传大对象 +with io.BytesIO(large_bytes) as f: + client.put(url, f) diff --git a/tests/multipart_upload_test.py b/tests/multipart_upload_test.py new file mode 100644 index 0000000000000000000000000000000000000000..ba4d7168e8d1509239a1c0a49230a2dcb39a3f25 --- /dev/null +++ b/tests/multipart_upload_test.py @@ -0,0 +1,14 @@ +from petrel_client.client import Client + +url = 's3://mybucket/100M' +client = Client() + +content, get_info = client.get_with_info( + url, enable_stream=True, enable_etag=True) +print('etag', get_info['etag']) + +put_result, put_info = client.put_with_info( + url + '.put', content, enable_md5=True, enable_etag=True) +print('put_result:', put_result) +print('md5:', put_info['md5']) +print('etag:', put_info['etag']) diff --git a/tests/profile_test.py b/tests/profile_test.py new file mode 100644 index 0000000000000000000000000000000000000000..5cf79893965bef0357bd8028f0017811b6c046ed --- /dev/null +++ b/tests/profile_test.py @@ -0,0 +1,14 @@ +from petrel_client.utils.profile.profile import profileit + + +@profileit(name='xxx',count= 500) +def test(): + import time + time.sleep(0.001) + return 1 + + +l = [test() for _ in range(2000)] +print('sum:', sum(l)) + + diff --git a/tests/run_test.py b/tests/run_test.py new file mode 100644 index 0000000000000000000000000000000000000000..2a5af91c51e939e79659508857f523f5edf53a21 --- /dev/null +++ b/tests/run_test.py @@ -0,0 +1,198 @@ +#coding:utf-8 +import os +import sys +import argparse +import copy +from datetime import datetime +import common_util +import signal + +test_dir = os.path.dirname(os.path.realpath(__file__)) + +TESTS = ['test_config', 'test_read'] + +# Tests need to be run with pytest. +USE_PYTEST_LIST = [] + +CUSTOM_HANDLERS = {} + + +def print_to_stderr(message): + print(message, file=sys.stderr) + + +def parse_test_module(test): + return test.split('.')[0] + + +class TestChoices(list): + def __init__(self, *args, **kwargs): + super(TestChoices, self).__init__(args[0]) + + def __contains__(self, item): + return list.__contains__(self, parse_test_module(item)) + + +def parse_args(): + parser = argparse.ArgumentParser( + description='Run the Petrel unit test suite', + epilog='where TESTS is any of: {}'.format(', '.join(TESTS))) + + parser.add_argument( + '-pt', + '--pytest', + action='store_true', + help='If true, use `pytest` to execute the tests. E.g., this runs ' + 'python run_test.py -pt') + + parser.add_argument( + '-i', + '--include', + nargs='+', + choices=TestChoices(TESTS), + default=TESTS, + metavar='TESTS', + help='select a set of tests to include (defaults to ALL tests).' + ' tests are specified with module name') + + parser.add_argument('-x', + '--exclude', + nargs='+', + choices=TESTS, + metavar='TESTS', + default=[], + help='select a set of tests to exclude') + + parser.add_argument( + '--continue-through-error', + action='store_true', + help='Runs the full test suite despite one of the tests failing') + + parser.add_argument( + 'additional_unittest_args', + nargs='*', + help='additional arguments passed through to unittest, e.g., ' + 'python run_test.py -i test_config -- -s test_report.log' + 'to save test report in test_report.log') + + return parser.parse_args() + + +def exclude_tests(exclude_list, selected_tests, exclude_message=None): + for exclude_test in exclude_list: + tests_copy = selected_tests[:] + for test in tests_copy: + if test.startswith(exclude_test): + if exclude_message is not None: + print_to_stderr('Excluding {} {}'.format( + test, exclude_message)) + selected_tests.remove(test) + return selected_tests + + +def get_selected_tests(options): + selected_tests = options.include + selected_tests = exclude_tests(options.exclude, selected_tests) + return selected_tests + + +def get_executable_command(options, allow_pytest): + executable = [sys.executable] + + if options.pytest: + if allow_pytest: + executable += ['-m', 'pytest'] + else: + print_to_stderr( + 'Pytest cannot be used for this test. Falling back to unittest.' + ) + return executable + + +def run_test(test_module, + test_directory, + options, + launcher_cmd=None, + extra_unittest_args=None): + unittest_args = options.additional_unittest_args.copy() + + if extra_unittest_args: + assert isinstance(extra_unittest_args, list) + unittest_args.extend(extra_unittest_args) + + # If using pytest, replace -f with equivalent -x + if options.pytest: + unittest_args = [arg if arg != '-f' else '-x' for arg in unittest_args] + + # Can't call `python -m unittest test_*` here because it doesn't run code + # in `if __name__ == '__main__': `. So call `python test_*.py` instead. + print(test_module) + argv = [test_module + '.py'] + unittest_args + + # Extra arguments are not supported with pytest + executable = get_executable_command(options, + allow_pytest=not extra_unittest_args) + + command = (launcher_cmd or []) + executable + argv + print_to_stderr('Executing {} ... [{}]'.format(command, datetime.now())) + return common_util.shell(command, test_directory) + + +# https://stackoverflow.com/questions/2549939/get-signal-names-from-numbers-in-python +SIGNALS_TO_NAMES_DICT = { + getattr(signal, n): n + for n in dir(signal) if n.startswith('SIG') and '_' not in n +} + + +def run_test_module(test, test_directory, options): + test_module = parse_test_module(test) + + # Printing the date here can help diagnose which tests are slow + print_to_stderr('Running {} ... [{}]'.format(test, datetime.now())) + handler = CUSTOM_HANDLERS.get(test, run_test) + return_code = handler(test_module, test_directory, options) + assert isinstance(return_code, int) and not isinstance( + return_code, bool), 'Return code should be an integer' + + if return_code == 0: + return None + + message = '{test} failed!' + if return_code < 0: + # subprocess.Popen returns the child process' exit signal as + # return code -N, where N is the signal number. + signal_name = SIGNALS_TO_NAMES_DICT[-return_code] + print(signal_name) + message += ' Received signal: ' + signal_name + return message + + +def main(): + options = parse_args() + test_directory = os.path.dirname(os.path.abspath(__file__)) + selected_tests = get_selected_tests(options) + + failure_messages = [] + has_failed = False + for test in selected_tests: + options_clone = copy.deepcopy(options) + if test in USE_PYTEST_LIST: + options_clone.pytest = True + err_message = run_test_module(test, test_directory, options_clone) + if err_message is None: + continue + has_failed = True + failure_messages.append(err_message) + if not options_clone.continue_through_error: + raise RuntimeError(err_message) + print_to_stderr(err_message) + + if options.continue_through_error and has_failed: + for err in failure_messages: + print_to_stderr(err) + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/tests/tensor_json_test.py b/tests/tensor_json_test.py new file mode 100644 index 0000000000000000000000000000000000000000..7eda31bbc54730ff4ec51e6f2b95a64b4b5e4ffc --- /dev/null +++ b/tests/tensor_json_test.py @@ -0,0 +1,32 @@ +import torch +import io +import json +from petrel_client.client import Client + +client = Client('./conf/petreloss.conf') + +# Pytorch save & load + +data = torch.tensor([0, 1, 2, 3]) +tensor_url = 's3://bucket1/tensor_data' + +with io.BytesIO() as f: + torch.save(data, f) + client.put(tensor_url, f.getvalue()) + +with io.BytesIO(client.get(tensor_url)) as f: + data2 = torch.load(f) + +assert torch.equal(data, data2) + + +# Json dumps & loads + +data = [0, 1, 2, 3] +json_data_url = 's3://bucket1/json_data' + +client.put(json_data_url, json.dumps(data).encode('utf-8')) + +data2 = json.loads(client.get(json_data_url)) + +assert data == data2 diff --git a/tests/test_config.py b/tests/test_config.py new file mode 100644 index 0000000000000000000000000000000000000000..b113c2a965d573bc4a1146ba7bc8b97711b7285e --- /dev/null +++ b/tests/test_config.py @@ -0,0 +1,144 @@ +import logging +import os +from petrel_client.common.config import Config, CONFIG_DEFAULT, Section, _value_to_str +from petrel_client.common import exception + +import unittest +import common_util +from unittest import mock +from petrel_client.client import Client +test_dir = os.path.dirname(os.path.realpath(__file__)) + + +class TestSection(unittest.TestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + def test_valuetoStr(self): + self.assertEqual(_value_to_str(100), "100") + self.assertEqual(_value_to_str(False), "False") + expect = {'a': '2', 'b': '2', 'c': '3', 'd': '4'} + input = dict(a=2, b=2, c=3, d=4) + self.assertEqual(expect, _value_to_str(input)) + + def test_init(self): + session = Section(CONFIG_DEFAULT) + self.assertEqual(session._conf, CONFIG_DEFAULT) + self.assertTrue(isinstance(session, Section)) + + def test_key(self): + session = Section(CONFIG_DEFAULT) + self.assertEqual(session['enable_mc'], 'False') + + def test_ConfigKeyNotFoundError(self): + session = Section(CONFIG_DEFAULT) + with self.assertRaises(exception.ConfigKeyNotFoundError): + _ = session['empty'] + + def test_update(self): + session = Section(CONFIG_DEFAULT) + toUpdate = dict(enable_mc='True', file_log_backup_count=3) + session.update(toUpdate) + self.assertEqual(session['enable_mc'], 'True') + self.assertEqual(session['file_log_backup_count'], '3') + + # def testGetitem(self): + # expected = CONFIG_DEFAULT + + def test_get(self): + session = Section(CONFIG_DEFAULT) + self.assertEqual(session.get('enable_mc'), 'False') + with self.assertRaises(exception.ConfigItemNotFoundError): + _ = session.get('enable_mc1') + self.assertIsNone(session.get('enable_mc1', default=None)) + + def test_has_option(self): + session = Section(CONFIG_DEFAULT) + self.assertTrue(session.has_option('enable_mc')) + self.assertFalse(session.has_option('enable_mc1')) + + def test_get_boolean(self): + session = Section(CONFIG_DEFAULT) + self.assertFalse(session.get_boolean('enable_mc')) + with self.assertRaises(exception.ConfigKeyTypeError): + _ = session.get_boolean('endpoint_url') + + def test_get_int(self): + session = Section(CONFIG_DEFAULT) + self.assertEqual(session.get_int('file_log_backup_count'), 1) + with self.assertRaises(exception.ConfigKeyTypeError): + _ = session.get_int('enable_mc') + + def test_get_log_level(self): + session = Section(CONFIG_DEFAULT) + self.assertEqual(session.get_log_level('file_log_level'), + logging.DEBUG) + with self.assertRaises(exception.ConfigKeyTypeError): + _ = session.get_log_level('enable_mc') + + +class TestConfig(unittest.TestCase): + def setUp(self): + pass + + def tearDown(self): + pass + + def test_init(self): + with self.assertRaises(exception.ConfigFileNotFoundError): + conf_path = test_dir + '/tests/conf/petreloss.conf1' + self.config = Config(conf_path) + + with self.assertRaises(exception.ConfigSectionNotFoundError): + conf_path = test_dir + '/conf/test_empty.conf' + self.config = Config(conf_path) + + expect_session = Section(CONFIG_DEFAULT) + toUpdate = dict(default_cluster='cluster1') + expect_session.update(toUpdate) + + conf_path = test_dir + '/conf/petreloss.conf' + config = Config(conf_path) + default_session = config.default() + self.assertTrue(default_session._conf == expect_session._conf) + + samll_case_conf_path = test_dir + '/conf/test_petreloss.conf' + samll_case_config = Config(samll_case_conf_path) + samll_case_default_session = samll_case_config.default() + self.assertTrue( + samll_case_default_session._conf == expect_session._conf) + + def test_get(self): + conf_path = test_dir + '/conf/petreloss.conf' + config = Config(conf_path) + cluster1_session = config['cluster1'] + self.assertTrue(cluster1_session.get_boolean("enable_mc")) + self.assertEqual(cluster1_session.get("access_key"), 'lili1') + + samll_case_conf_path = test_dir + '/conf/test_petreloss.conf' + samll_case_config = Config(samll_case_conf_path) + cluster1_session = samll_case_config['cluster1'] + + self.assertEqual(cluster1_session.get("default_cluster"), 'cluster1') + + with self.assertRaises(exception.ConfigSectionNotFoundError): + config["noncluster1"] + + def test_update(self): + conf_path = test_dir + '/conf/petreloss.conf' + config = Config(conf_path) + toUpdate = dict(cluster1=dict(default_cluster='cluster3')) + config.update(toUpdate) + self.assertEqual(config["cluster1"].get("default_cluster"), 'cluster3') + + def test_items(self): + conf_path = test_dir + '/conf/petreloss.conf' + config = Config(conf_path) + sections = config.items() + self.assertEqual(len(sections), 4) + +if __name__ == '__main__': + common_util.run_test() diff --git a/tests/test_read.py b/tests/test_read.py new file mode 100644 index 0000000000000000000000000000000000000000..2c7f8f6f2cd45f17f78f80e0207e7a54dc874b7b --- /dev/null +++ b/tests/test_read.py @@ -0,0 +1,58 @@ +import os +import unittest +import common_util +from unittest import mock +from petrel_client.client import Client +test_dir = os.path.dirname(os.path.realpath(__file__)) + + +class TestRead(unittest.TestCase): + def setUp(self): + # 实例化一个 Mock 对象,用于替换 petrel_client.ceph.s3.s3_client.S3Client.get_with_info 函数 + self._mock_get_with_info = mock.Mock() + self._mock_get_with_info.return_value = "23", {} + # 替换 petrel_client.ceph.s3.s3_client.S3Client.get_with_info 函数 + self._patcher = mock.patch( + 'petrel_client.ceph.s3.s3_client.S3Client.get_with_info', + self._mock_get_with_info) + self._patcher.start() + pass + + def tearDown(self): + self._patcher.stop() + pass + + # 1. 在setUp注入Mock方法,替换petrel_client.ceph.s3.s3_client.S3Client.get_with_info函数,并在tearDown停止Mock + def test_read1(self): + _conf_path = test_dir + '/conf/petreloss.conf' + c = Client(conf_path=_conf_path) + # self._mock_get_with_info.return_value = "23", {} + data = c.get('cluster1:s3://lili1.test2/sometest') + # print(data) + self.assertEqual("23", data) + + # 2.使用@mock.patch指定被替换的方法,并默认按参数列表的顺序替换该方法 + @mock.patch("petrel_client.ceph.s3.s3_client.S3Client.get_with_info") + #@mock.patch("...") + def test_read2(self, mock_get_with_info): + mock_get_with_info.return_value = "15", {} + _conf_path = test_dir + '/conf/petreloss.conf' + c = Client(conf_path=_conf_path) + data = c.get('cluster1:s3://lili1.test2/sometest') + self.assertEqual("15", data) + + # 3.使用with mock.patch 替换掉指定模块 + def test_read3(self): + mock_get_with_info = mock.Mock() + mock_get_with_info.return_value = "15", {} + _conf_path = test_dir + '/conf/petreloss.conf' + c = Client(conf_path=_conf_path) + with mock.patch( + 'petrel_client.ceph.s3.s3_client.S3Client.get_with_info', + mock_get_with_info): + data = c.get('cluster1:s3://lili1.test2/sometest') + self.assertEqual("15", data) + + +if __name__ == '__main__': + common_util.run_test() \ No newline at end of file