// SPDX-FileCopyrightText: © 2023 Tenstorrent Inc.
//
// SPDX-License-Identifier: Apache-2.0

#pragma once

#include <memory>

#include "ttnn/tensor/tensor.hpp"
#include "ttnn/distributed/types.hpp"

namespace ttnn::distributed {

std::shared_ptr<MeshDevice> open_mesh_device(
    const MeshShape& mesh_shape,
    size_t l1_small_size,
    size_t trace_region_size,
    size_t num_command_queues,
    const tt::tt_metal::DispatchCoreConfig& dispatch_core_config,
    const std::optional<MeshCoordinate>& offset = std::nullopt,
    const std::vector<int>& physical_device_ids = {},
    size_t worker_l1_size = DEFAULT_WORKER_L1_SIZE);

// Open a mesh device with optionally-provided mesh shape, defaults to system mesh global shape if not provided.
std::shared_ptr<MeshDevice> open_mesh_device(
    size_t l1_small_size,
    size_t trace_region_size,
    size_t num_command_queues,
    const tt::tt_metal::DispatchCoreConfig& dispatch_core_config,
    const std::optional<MeshShape>& mesh_shape,
    const std::optional<MeshCoordinate>& offset = std::nullopt,
    const std::vector<int>& physical_device_ids = {},
    size_t worker_l1_size = DEFAULT_WORKER_L1_SIZE);

void close_mesh_device(const std::shared_ptr<MeshDevice>& mesh_device);

// Given a multi-device tensor, returns a list of individual per-device tensors.
std::vector<Tensor> get_device_tensors(const Tensor& tensor);

// Given a list of host shards, returns a multi-device tensor.
// Tensor specs (including shapes) must match for all shards, and the number of shards must match the mesh size.
// `shard_dim` is the dimension that was sharded over, which is used to create the tensor topology (assumes sharded
// along dim 0 if not provided).
Tensor from_host_shards(const std::vector<Tensor>& tensor_shards, const MeshShape& mesh_shape, int shard_dim = 0);

// Combines tensor shards allocated on individual devices into a single multi-device tensor.
// All tensors shards must be allocated on the same mesh buffer.
// `shard_dim` is the dimension that was sharded over, which is used to create the tensor topology (assumes sharded
// along dim 0 if not provided).
Tensor combine_device_tensors(const std::vector<Tensor>& tensor_shards, int shard_dim = 0);

}  // namespace ttnn::distributed
