# coding=utf-8

import mindspore.dataset as ds

def my_generator():
    for i in range(5):
        col1 = {"number": i, "square": i ** 2}
        col2 = i
        yield col1, col2

data = ds.GeneratorDataset(source=my_generator, column_names=["col1", "col2"])

for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):
    print(d)

import mindspore.dataset as ds

def my_generator():
    for i in range(5):
        yield i

def my_pyfunc(col1):
    new_col1 = {"original_col1": col1, "square": col1 ** 2}
    return new_col1

data = ds.GeneratorDataset(source=my_generator, column_names=["col1"])
data = data.map(operations=my_pyfunc, input_columns=["col1"])

for d in data.create_dict_iterator(num_epochs=2, output_numpy=True):
    print(d)

# batch操作处理字典对象
import numpy as np
import mindspore.dataset as ds

def my_generator():
    for i in range(5):
        col1 = {"nested_dict": {"powers": np.power(i, [1, 2, 3])}}
        col2 = i
        yield (col1, col2)

def my_pyfunc(col1):
    assert isinstance(col1, dict)
    new_col1 = col1["nested_dict"]
    return new_col1


data = ds.GeneratorDataset(source=my_generator, column_names=["col1", "col2"])
data = data.map(operations=my_pyfunc, input_columns=["col1"])

print("before batch")
for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):
    print(d)

data = data.batch(batch_size=5)

print("after batch")
for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):
    print(d)



import numpy as np
import mindspore.dataset as ds

def my_generator():
    for i in range(9):
        yield i

def my_per_batch_map(col1, batch_info):
    new_col1 = {"original_col1": col1, "index": np.arange(3)}
    new_col2 = {"copied_col1": col1}
    return new_col1, new_col2

data = ds.GeneratorDataset(source=my_generator, column_names=["col1"])
data = data.batch(batch_size=3, per_batch_map=my_per_batch_map, output_columns=["col1", "col2"])

for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):
    print(d)

# 从数据处理管道中获取Python字典
import numpy as np
import mindspore.dataset as ds

def my_generator():
    for i in range(5):
        col1 = {"my_data": np.array(i)}
        col2 = i
        yield col1, col2

data = ds.GeneratorDataset(source=my_generator, column_names=["col1", "col2"])

print("Iter dataset with converting all data to Tensor")
for d in data.create_dict_iterator(num_epochs=1):
    print(d)
# 当使用迭代器获取数据时，数据处理管道会尝试将字典对象中的所有值转成Tensor类型（如果output_numpy设置为True，则将转为NumPy类型）。
print("Iter dataset with converting all data to Numpy")
for d in data.create_dict_iterator(num_epochs=1, output_numpy=True):
    print(d)

# 在数据下沉模式下，由于数据下沉通道当前无法支持字典类型的数据，字典类型的数据发送到下沉通道会造成错误。
# 因此可以考虑关闭数据下沉模式（dataset_sink_mode=False），或在最后一个数据处理节点将字典类型的数据展开为列表或元组类型的数据，例如：

import numpy as np
import mindspore.dataset as ds

def my_generator():
    for i in range(5):
        col1 = {'my_data': np.array(i), 'my_data2': np.array(i + 1)}
        yield col1

data = ds.GeneratorDataset(source=my_generator, column_names=['col1'])

print('get data in dict type')
for d in data:
    print(d)

def dict_to_tuple(d):
    return tuple([i for i in d.values()])

# flatten the dict object bedfore it passed into network
data = data.map(dict_to_tuple, input_columns=['col1'], output_columns=['my_data', 'my_data2'])

print('get data in sequence type')
for d in data:
    print(d)
