cloudops_tsf / cloudops_tsf.py
gorold's picture
remove unnecessary loader kwargs
ef4ad84
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from dataclasses import dataclass, field
from typing import Iterator, Optional
import datasets
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from gluonts.dataset.field_names import FieldName
_CITATION = """\
@article{woo2023pushing,
title={Pushing the Limits of Pre-training for Time Series Forecasting in the CloudOps Domain},
author={Woo, Gerald and Liu, Chenghao and Kumar, Akshat and Sahoo, Doyen},
journal={arXiv preprint arXiv:2310.05063},
year={2023}
}
"""
_CONFIGS = {
"azure_vm_traces_2017": {
"optional_fields": (
FieldName.FEAT_STATIC_CAT,
FieldName.FEAT_STATIC_REAL,
FieldName.PAST_FEAT_DYNAMIC_REAL,
),
"prediction_length": 48,
"freq": "5T",
"stride": 48,
"univariate": True,
"multivariate": False,
"rolling_evaluations": 12,
"test_split_date": pd.Period(
year=2016, month=12, day=13, hour=15, minute=55, freq="5T"
),
"_feat_static_cat_cardinalities": {
"train_test": (
("vm_id", 17568),
("subscription_id", 2713),
("deployment_id", 3255),
("vm_category", 3),
),
"pretrain": (
("vm_id", 177040),
("subscription_id", 5514),
("deployment_id", 15208),
("vm_category", 3),
),
},
"target_dim": 1,
"feat_static_real_dim": 3,
"past_feat_dynamic_real_dim": 2,
},
"borg_cluster_data_2011": {
"optional_fields": (
FieldName.FEAT_STATIC_CAT,
FieldName.PAST_FEAT_DYNAMIC_REAL,
),
"prediction_length": 48,
"freq": "5T",
"stride": 48,
"univariate": False,
"multivariate": True,
"rolling_evaluations": 12,
"test_split_date": pd.Period(
year=2011, month=5, day=28, hour=18, minute=55, freq="5T"
),
"_feat_static_cat_cardinalities": {
"train_test": (
("job_id", 850),
("task_id", 11117),
("user", 282),
("scheduling_class", 4),
("logical_job_name", 718),
),
"pretrain": (
("job_id", 6072),
("task_id", 154503),
("user", 518),
("scheduling_class", 4),
("logical_job_name", 3899),
),
},
"target_dim": 2,
"past_feat_dynamic_real_dim": 5,
},
"alibaba_cluster_trace_2018": {
"optional_fields": (
FieldName.FEAT_STATIC_CAT,
FieldName.PAST_FEAT_DYNAMIC_REAL,
),
"prediction_length": 48,
"freq": "5T",
"stride": 48,
"univariate": False,
"multivariate": True,
"rolling_evaluations": 12,
"test_split_date": pd.Period(
year=2018, month=1, day=8, hour=11, minute=55, freq="5T"
),
"_feat_static_cat_cardinalities": {
"train_test": (
("container_id", 6048),
("app_du", 1292),
),
"pretrain": (
("container_id", 64457),
("app_du", 9484),
),
},
"target_dim": 2,
"past_feat_dynamic_real_dim": 6,
},
}
PRETRAIN = datasets.splits.NamedSplit("pretrain")
TRAIN_TEST = datasets.splits.NamedSplit("train_test")
Cardinalities = tuple[tuple[str, int], ...]
@dataclass
class CloudOpsTSFConfig(datasets.BuilderConfig):
"""BuilderConfig for CloudOpsTSF."""
# builder kwargs
prediction_length: int = field(default=None)
freq: str = field(default=None)
stride: int = field(default=None)
univariate: bool = field(default=None)
multivariate: bool = field(default=None)
optional_fields: tuple[str, ...] = field(default=None)
rolling_evaluations: int = field(default=None)
test_split_date: pd.Period = field(default=None)
_feat_static_cat_cardinalities: dict[str, Cardinalities] = field(
default_factory=dict
)
target_dim: int = field(default=1)
feat_static_real_dim: int = field(default=0)
past_feat_dynamic_real_dim: int = field(default=0)
def feat_static_cat_cardinalities(
self, split: str = "train_test"
) -> Optional[list[int]]:
if FieldName.FEAT_STATIC_CAT not in self.optional_fields:
return None
return [c[1] for c in self._feat_static_cat_cardinalities[split]]
class CloudOpsTSF(datasets.ArrowBasedBuilder):
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = []
for dataset, config in _CONFIGS.items():
BUILDER_CONFIGS.append(
CloudOpsTSFConfig(name=dataset, version=VERSION, description="", **config)
)
def _info(self) -> datasets.DatasetInfo:
def sequence_feature(dtype: str, univar: bool) -> datasets.Sequence:
if univar:
return datasets.Sequence(datasets.Value(dtype))
return datasets.Sequence(datasets.Sequence(datasets.Value(dtype)))
features = {
FieldName.START: datasets.Value("timestamp[s]"),
FieldName.TARGET: sequence_feature("float32", self.config.univariate),
FieldName.ITEM_ID: datasets.Value("string"),
}
CAT_FEATS = (
FieldName.FEAT_STATIC_CAT,
FieldName.FEAT_DYNAMIC_CAT,
FieldName.PAST_FEAT_DYNAMIC,
)
REAL_FEATS = (
FieldName.FEAT_STATIC_REAL,
FieldName.FEAT_DYNAMIC_REAL,
FieldName.PAST_FEAT_DYNAMIC_REAL,
)
STATIC_FEATS = (FieldName.FEAT_STATIC_CAT, FieldName.FEAT_STATIC_REAL)
DYNAMIC_FEATS = (
FieldName.FEAT_DYNAMIC_CAT,
FieldName.FEAT_DYNAMIC_REAL,
FieldName.PAST_FEAT_DYNAMIC,
FieldName.PAST_FEAT_DYNAMIC_REAL,
)
for ts_field in self.config.optional_fields:
# Determine field dtype
if ts_field in CAT_FEATS:
dtype = "int32"
elif ts_field in REAL_FEATS:
dtype = "float32"
else:
raise ValueError(f"Invalid field: {ts_field}")
# Determine field shape
if ts_field in STATIC_FEATS:
univar = True
elif ts_field in DYNAMIC_FEATS:
univar = False
else:
raise ValueError(f"Invalid field: {ts_field}")
features[ts_field] = sequence_feature(dtype, univar)
features = datasets.Features(features)
return datasets.DatasetInfo(
features=features,
citation=_CITATION,
)
def _split_generators(self, dl_manager) -> list[datasets.SplitGenerator]:
downloaded_files = dl_manager.download_and_extract(
[
f"{self.config.name}/train_test.zip",
f"{self.config.name}/pretrain.zip",
]
)
generators = [
datasets.SplitGenerator(
name=TRAIN_TEST,
gen_kwargs={"filepath": downloaded_files[0]},
),
datasets.SplitGenerator(
name=PRETRAIN,
gen_kwargs={"filepath": downloaded_files[1]},
),
]
return generators
def _generate_tables(self, filepath: str) -> Iterator[pa.Table]:
table = pq.read_table(filepath)
for batch in table.to_batches():
columns = batch.columns
schema = batch.schema
yield batch[FieldName.ITEM_ID].to_pylist(), pa.Table.from_arrays(
columns, schema=schema
)