|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from dataclasses import dataclass, field |
|
from typing import Iterator, Optional |
|
from functools import cached_property |
|
|
|
import datasets |
|
import pandas as pd |
|
import pyarrow as pa |
|
import pyarrow.parquet as pq |
|
from gluonts.dataset.field_names import FieldName |
|
|
|
_CITATION = """\ |
|
@article{woo2023pushing, |
|
title={Pushing the Limits of Pre-training for Time Series Forecasting in the CloudOps Domain}, |
|
author={Woo, Gerald and Liu, Chenghao and Kumar, Akshat and Sahoo, Doyen}, |
|
journal={arXiv preprint arXiv:2310.05063}, |
|
year={2023} |
|
} |
|
""" |
|
|
|
_URLS = { |
|
"azure_vm_traces_2017": "azure_vm_traces_2017.parquet", |
|
"borg_cluster_data_2011": "borg_cluster_data_2011.parquet", |
|
"alibaba_cluster_trace_2018": "alibaba_cluster_trace_2018.parquet", |
|
} |
|
|
|
_CONFIGS = { |
|
"azure_vm_traces_2017": { |
|
"optional_fields": ( |
|
FieldName.FEAT_STATIC_CAT, |
|
FieldName.FEAT_STATIC_REAL, |
|
FieldName.PAST_FEAT_DYNAMIC_REAL, |
|
), |
|
"prediction_length": 48, |
|
"freq": "5T", |
|
"stride": 48, |
|
"univariate": True, |
|
"multivariate": False, |
|
"rolling_evaluations": 12, |
|
"test_split_date": pd.Period( |
|
year=2016, month=12, day=13, hour=15, minute=55, freq="5T" |
|
), |
|
"_feat_static_cat_cardinalities": { |
|
"train_test": ( |
|
("vm_id", 17568), |
|
("subscription_id", 2713), |
|
("deployment_id", 3255), |
|
("vm_category", 3), |
|
), |
|
"pretrain": ( |
|
("vm_id", 177040), |
|
("subscription_id", 5514), |
|
("deployment_id", 15208), |
|
("vm_category", 3), |
|
), |
|
}, |
|
"target_dim": 1, |
|
"feat_static_real_dim": 3, |
|
"past_feat_dynamic_real_dim": 2, |
|
}, |
|
"borg_cluster_data_2011": { |
|
"optional_fields": ( |
|
FieldName.FEAT_STATIC_CAT, |
|
FieldName.PAST_FEAT_DYNAMIC_REAL, |
|
), |
|
"prediction_length": 48, |
|
"freq": "5T", |
|
"stride": 48, |
|
"univariate": False, |
|
"multivariate": True, |
|
"rolling_evaluations": 12, |
|
"test_split_date": pd.Period( |
|
year=2011, month=5, day=28, hour=18, minute=55, freq="5T" |
|
), |
|
"_feat_static_cat_cardinalities": { |
|
"train_test": ( |
|
("job_id", 850), |
|
("task_id", 11117), |
|
("user", 282), |
|
("scheduling_class", 4), |
|
("logical_job_name", 718), |
|
), |
|
"pretrain": ( |
|
("job_id", 6072), |
|
("task_id", 154503), |
|
("user", 518), |
|
("scheduling_class", 4), |
|
("logical_job_name", 3899), |
|
), |
|
}, |
|
"target_dim": 2, |
|
"past_feat_dynamic_real_dim": 5, |
|
}, |
|
"alibaba_cluster_trace_2018": { |
|
"optional_fields": ( |
|
FieldName.FEAT_STATIC_CAT, |
|
FieldName.PAST_FEAT_DYNAMIC_REAL, |
|
), |
|
"prediction_length": 48, |
|
"freq": "5T", |
|
"stride": 48, |
|
"univariate": False, |
|
"multivariate": True, |
|
"rolling_evaluations": 12, |
|
"test_split_date": pd.Period( |
|
year=2018, month=1, day=8, hour=11, minute=55, freq="5T" |
|
), |
|
"_feat_static_cat_cardinalities": { |
|
"train_test": ( |
|
("container_id", 6048), |
|
("app_du", 1292), |
|
), |
|
"pretrain": ( |
|
("container_id", 64457), |
|
("app_du", 9484), |
|
), |
|
}, |
|
"target_dim": 2, |
|
"past_feat_dynamic_real_dim": 6, |
|
}, |
|
} |
|
|
|
PRETRAIN = datasets.splits.NamedSplit("pretrain") |
|
TRAIN_TEST = datasets.splits.NamedSplit("train_test") |
|
|
|
Cardinalities = tuple[tuple[str, int], ...] |
|
|
|
|
|
@dataclass |
|
class AIOpsTSFConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for AIOpsTSF.""" |
|
|
|
|
|
train_test: bool = field(default=True, init=False) |
|
pretrain: bool = field(default=False, init=False) |
|
_include_metadata: tuple[str, ...] = field(default_factory=tuple, init=False) |
|
|
|
|
|
prediction_length: int = field(default=None) |
|
freq: str = field(default=None) |
|
stride: int = field(default=None) |
|
univariate: bool = field(default=None) |
|
multivariate: bool = field(default=None) |
|
optional_fields: tuple[str, ...] = field(default=None) |
|
rolling_evaluations: int = field(default=None) |
|
test_split_date: pd.Period = field(default=None) |
|
_feat_static_cat_cardinalities: dict[str, Cardinalities] = field( |
|
default_factory=dict |
|
) |
|
target_dim: int = field(default=1) |
|
feat_static_real_dim: int = field(default=0) |
|
past_feat_dynamic_real_dim: int = field(default=0) |
|
|
|
METADATA = [ |
|
"freq", |
|
"prediction_length", |
|
"stride", |
|
"rolling_evaluations", |
|
] |
|
|
|
@property |
|
def include_metadata(self) -> tuple[str, ...]: |
|
return self._include_metadata |
|
|
|
@include_metadata.setter |
|
def include_metadata(self, value: tuple[str, ...]): |
|
assert all([v in self.METADATA for v in value]), ( |
|
f"Metadata: {value} is not supported, each item should be one of" |
|
f" {self.METADATA}" |
|
) |
|
self._include_metadata = value |
|
|
|
@cached_property |
|
def feat_static_cat_cardinalities(self) -> Optional[list[int]]: |
|
if FieldName.FEAT_STATIC_CAT not in self.optional_fields: |
|
return None |
|
|
|
if self.pretrain: |
|
split = "pretrain" |
|
elif self.train_test: |
|
split = "train_test" |
|
else: |
|
raise ValueError( |
|
"At least one of `train_test` and `pretrain` should be True" |
|
) |
|
return [c[1] for c in self._feat_static_cat_cardinalities[split]] |
|
|
|
|
|
class AIOpsTSF(datasets.ArrowBasedBuilder): |
|
VERSION = datasets.Version("1.0.0") |
|
|
|
BUILDER_CONFIGS = [] |
|
for dataset, config in _CONFIGS.items(): |
|
BUILDER_CONFIGS.append( |
|
AIOpsTSFConfig(name=dataset, version=VERSION, description="", **config) |
|
) |
|
|
|
def _info(self) -> datasets.DatasetInfo: |
|
def sequence_feature(dtype: str, univar: bool) -> datasets.Sequence: |
|
if univar: |
|
return datasets.Sequence(datasets.Value(dtype)) |
|
return datasets.Sequence(datasets.Sequence(datasets.Value(dtype))) |
|
|
|
features = { |
|
FieldName.START: datasets.Value("timestamp[s]"), |
|
FieldName.TARGET: sequence_feature("float32", self.config.univariate), |
|
FieldName.ITEM_ID: datasets.Value("string"), |
|
} |
|
|
|
CAT_FEATS = ( |
|
FieldName.FEAT_STATIC_CAT, |
|
FieldName.FEAT_DYNAMIC_CAT, |
|
FieldName.PAST_FEAT_DYNAMIC, |
|
) |
|
REAL_FEATS = ( |
|
FieldName.FEAT_STATIC_REAL, |
|
FieldName.FEAT_DYNAMIC_REAL, |
|
FieldName.PAST_FEAT_DYNAMIC_REAL, |
|
) |
|
STATIC_FEATS = (FieldName.FEAT_STATIC_CAT, FieldName.FEAT_STATIC_REAL) |
|
DYNAMIC_FEATS = ( |
|
FieldName.FEAT_DYNAMIC_CAT, |
|
FieldName.FEAT_DYNAMIC_REAL, |
|
FieldName.PAST_FEAT_DYNAMIC, |
|
FieldName.PAST_FEAT_DYNAMIC_REAL, |
|
) |
|
|
|
for ts_field in self.config.optional_fields: |
|
|
|
if ts_field in CAT_FEATS: |
|
dtype = "int32" |
|
elif ts_field in REAL_FEATS: |
|
dtype = "float32" |
|
else: |
|
raise ValueError(f"Invalid field: {ts_field}") |
|
|
|
|
|
if ts_field in STATIC_FEATS: |
|
univar = True |
|
elif ts_field in DYNAMIC_FEATS: |
|
univar = False |
|
else: |
|
raise ValueError(f"Invalid field: {ts_field}") |
|
|
|
features[ts_field] = sequence_feature(dtype, univar) |
|
|
|
for metadata in self.config.include_metadata: |
|
if metadata == "freq": |
|
features[metadata] = datasets.Value("string") |
|
elif metadata in ("prediction_length", "stride", "rolling_evaluations"): |
|
features[metadata] = datasets.Value("int32") |
|
else: |
|
raise ValueError(f"Invalid metadata: {metadata}") |
|
|
|
features = datasets.Features(features) |
|
|
|
return datasets.DatasetInfo( |
|
features=features, |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager) -> list[datasets.SplitGenerator]: |
|
split = 'train_test' if self.config.train_test else 'pretrain' |
|
url = _URLS[self.config.name] + f'/split={split}' |
|
downloaded_files = dl_manager.download(url) |
|
|
|
generators = [ |
|
datasets.SplitGenerator( |
|
name=TRAIN_TEST if self.config.train_test else PRETRAIN, |
|
gen_kwargs={"filepath": downloaded_files} |
|
) |
|
] |
|
return generators |
|
|
|
def _generate_tables(self, filepath: str) -> Iterator[pa.Table]: |
|
table = pq.read_table(filepath) |
|
|
|
for batch in table.to_batches(): |
|
columns = batch.columns |
|
schema = batch.schema |
|
if self.config.include_metadata: |
|
freq = pa.array([self.config.freq] * len(batch)) |
|
prediction_length = pa.array([self.config.prediction_length] * len(batch)) |
|
rolling_evaluations = pa.array([self.config.rolling_evaluations] * len(batch)) |
|
stride = pa.array([self.config.stride] * len(batch)) |
|
columns += [freq, prediction_length, rolling_evaluations, stride] |
|
for pa_field in [pa.field('freq', pa.string()), |
|
pa.field('prediction_length', pa.int32()), |
|
pa.field('rolling_evaluations', pa.int32()), |
|
pa.field('stride', pa.int32())]: |
|
schema = schema.append(pa_field) |
|
yield batch[FieldName.ITEM_ID].to_pylist(), pa.Table.from_arrays(columns, schema=schema) |
|
|