// SPDX-FileCopyrightText: © 2025 Tenstorrent AI ULC
//
// SPDX-License-Identifier: Apache-2.0

#pragma once

#include <vector>
#include <string>
#include <fstream>
#include <unordered_map>
#include <yaml-cpp/yaml.h>
#include <variant>
#include <random>
#include <algorithm>
#include <numeric>

#include <tt_stl/assert.hpp>
#include <tt-logger/tt-logger.hpp>

#include "impl/context/metal_context.hpp"

#include "tests/tt_metal/test_utils/test_common.hpp"

#include <tt-metalium/experimental/fabric/fabric_edm_types.hpp>
#include <tt-metalium/experimental/fabric/mesh_graph.hpp>
#include <tt-metalium/device.hpp>
#include <tt-metalium/experimental/fabric/routing_table_generator.hpp>
#include <umd/device/types/cluster_descriptor_types.hpp>

#include "tt_fabric_test_interfaces.hpp"
#include "tt_fabric_test_common_types.hpp"
#include <tt-metalium/hal.hpp>
#include <llrt/tt_cluster.hpp>

namespace tt::tt_fabric {
namespace fabric_tests {

// Helper template for static_assert in visitor - must be defined before use
template <class>
inline constexpr bool always_false_v = false;

// Helper functions and mappings for converting between string representations in YAML
// and their corresponding enum types.
namespace detail {
template <typename T>
struct StringEnumMapper {
    std::unordered_map<std::string, T> to_enum;
    std::unordered_map<T, std::string> to_string_map;

    StringEnumMapper(const std::initializer_list<std::pair<const char*, T>>& mapping_data) {
        for (const auto& pair : mapping_data) {
            to_enum[pair.first] = pair.second;
            to_string_map[pair.second] = pair.first;
        }
    }

    const std::string& to_string(T value, const std::string& type_name) const {
        auto it = to_string_map.find(value);
        if (it == to_string_map.end()) {
            TT_THROW("Unknown enum value for {}", type_name);
        }
        return it->second;
    }

    T from_string(const std::string& s, const std::string& type_name) const {
        auto it = to_enum.find(s);
        if (it == to_enum.end()) {
            TT_THROW("Unsupported string value '{}' for {}", s, type_name);
        }
        return it->second;
    }
};

static const StringEnumMapper<ChipSendType> chip_send_type_mapper({
    {"mcast", ChipSendType::CHIP_MULTICAST},
    {"unicast", ChipSendType::CHIP_UNICAST},
});

static const StringEnumMapper<NocSendType> noc_send_type_mapper({
    {"unicast_write", NocSendType::NOC_UNICAST_WRITE},
    {"atomic_inc", NocSendType::NOC_UNICAST_ATOMIC_INC},
    {"fused_atomic_inc", NocSendType::NOC_FUSED_UNICAST_ATOMIC_INC},
    {"unicast_scatter_write", NocSendType::NOC_UNICAST_SCATTER_WRITE},
});

static const StringEnumMapper<RoutingDirection> routing_direction_mapper({
    {"N", RoutingDirection::N},
    {"S", RoutingDirection::S},
    {"E", RoutingDirection::E},
    {"W", RoutingDirection::W},
});

static const StringEnumMapper<Topology> topology_mapper({
    {"Ring", Topology::Ring},
    {"Linear", Topology::Linear},
    {"Mesh", Topology::Mesh},
    {"Torus", Topology::Torus},
});

static const StringEnumMapper<FabricTensixConfig> fabric_tensix_type_mapper({
    {"Default", FabricTensixConfig::DISABLED},
    {"Mux", FabricTensixConfig::MUX},
});

static const StringEnumMapper<FabricReliabilityMode> fabric_reliability_mode_mapper({
    {"STRICT_SYSTEM_HEALTH_SETUP_MODE", FabricReliabilityMode::STRICT_SYSTEM_HEALTH_SETUP_MODE},
    {"RELAXED_SYSTEM_HEALTH_SETUP_MODE", FabricReliabilityMode::RELAXED_SYSTEM_HEALTH_SETUP_MODE},
    {"DYNAMIC_RECONFIGURATION_SETUP_MODE", FabricReliabilityMode::DYNAMIC_RECONFIGURATION_SETUP_MODE},
});

static const StringEnumMapper<CoreAllocationPolicy> core_allocation_policy_mapper({
    {"RoundRobin", CoreAllocationPolicy::RoundRobin},
    {"ExhaustFirst", CoreAllocationPolicy::ExhaustFirst},
});

static const StringEnumMapper<HighLevelTrafficPattern> high_level_traffic_pattern_mapper({
    {"all_to_all", HighLevelTrafficPattern::AllToAll},
    {"one_to_all", HighLevelTrafficPattern::OneToAll},
    {"all_to_one", HighLevelTrafficPattern::AllToOne},
    {"all_to_one_random", HighLevelTrafficPattern::AllToOneRandom},
    {"full_device_random_pairing", HighLevelTrafficPattern::FullDeviceRandomPairing},
    {"unidirectional_linear", HighLevelTrafficPattern::UnidirectionalLinear},
    {"full_ring", HighLevelTrafficPattern::FullRing},
    {"half_ring", HighLevelTrafficPattern::HalfRing},
    {"all_devices_uniform_pattern", HighLevelTrafficPattern::AllDevicesUniformPattern},
    {"neighbor_exchange", HighLevelTrafficPattern::NeighborExchange},
    {"sequential_all_to_all", HighLevelTrafficPattern::SequentialAllToAll},
});
// Optimized string concatenation utility to avoid multiple allocations
template <typename... Args>
void append_with_separator(std::string& target, std::string_view separator, const Args&... args) {
    // Calculate total size needed
    size_t total_size = target.size();
    auto add_size = [&total_size, &separator](const auto& arg) {
        if constexpr (std::is_arithmetic_v<std::decay_t<decltype(arg)>>) {
            // For numeric types, estimate string length (conservative estimate)
            total_size += separator.size() + 20;  // 20 chars should handle most numeric types
        } else {
            // For string types
            total_size += separator.size() + std::string(arg).size();
        }
    };

    // fold expression: calls add_size for each argument to calculate total size
    // For args (a, b, c), this expands to: add_size(a), add_size(b), add_size(c)
    (add_size(args), ...);

    // Reserve space to avoid reallocations
    target.reserve(total_size);

    // Append each argument with separator
    auto append_arg = [&target, &separator](const auto& arg) {
        target += separator;
        if constexpr (std::is_arithmetic_v<std::decay_t<decltype(arg)>>) {
            target += std::to_string(arg);
        } else {
            target += std::string(arg);
        }
    };

    // fold expression: calls append_arg for each argument in sequence
    // For args (a, b, c), this expands to: append_arg(a), append_arg(b), append_arg(c)
    (append_arg(args), ...);
}

}  // namespace detail

// Helper function to resolve DeviceIdentifier to FabricNodeId
inline FabricNodeId resolve_device_identifier(const DeviceIdentifier& device_id, const IDeviceInfoProvider& provider) {
    return std::visit(
        [&provider](const auto& id) -> FabricNodeId {
            using T = std::decay_t<decltype(id)>;
            if constexpr (std::is_same_v<T, FabricNodeId>) {
                return id;  // Already resolved
            } else if constexpr (std::is_same_v<T, ChipId>) {
                return provider.get_fabric_node_id(id);
            } else if constexpr (std::is_same_v<T, std::pair<MeshId, ChipId>>) {
                return FabricNodeId{id.first, id.second};
            } else if constexpr (std::is_same_v<T, std::pair<MeshId, MeshCoordinate>>) {
                return provider.get_fabric_node_id(id.first, id.second);
            } else {
                static_assert(always_false_v<T>, "Unsupported DeviceIdentifier type");
            }
        },
        device_id);
}

struct ParsedYamlConfig {
    std::vector<ParsedTestConfig> test_configs;
    std::optional<AllocatorPolicies> allocation_policies;
    std::optional<PhysicalMeshConfig> physical_mesh_config;
};

template <typename TrafficPatternType>
inline TrafficPatternType merge_patterns(const TrafficPatternType& base, const TrafficPatternType& specific) {
    TrafficPatternType merged;

    merged.ftype = specific.ftype.has_value() ? specific.ftype : base.ftype;
    merged.ntype = specific.ntype.has_value() ? specific.ntype : base.ntype;
    merged.size = specific.size.has_value() ? specific.size : base.size;
    merged.num_packets = specific.num_packets.has_value() ? specific.num_packets : base.num_packets;
    merged.atomic_inc_val = specific.atomic_inc_val.has_value() ? specific.atomic_inc_val : base.atomic_inc_val;
    merged.mcast_start_hops = specific.mcast_start_hops.has_value() ? specific.mcast_start_hops : base.mcast_start_hops;

    // Special handling for nested destination
    if (specific.destination.has_value()) {
        if (base.destination.has_value()) {
            // Both have destinations, merge them.
            auto merged_dest = base.destination.value();  // Start with base as default
            const auto& spec_dest = specific.destination.value();

            // Override with specific values if they exist
            if (spec_dest.device.has_value()) {
                merged_dest.device = spec_dest.device;
            }
            if (spec_dest.core.has_value()) {
                merged_dest.core = spec_dest.core;
            }
            if (spec_dest.hops.has_value()) {
                merged_dest.hops = spec_dest.hops;
            }
            if (spec_dest.target_address.has_value()) {
                merged_dest.target_address = spec_dest.target_address;
            }
            if (spec_dest.atomic_inc_address.has_value()) {
                merged_dest.atomic_inc_address = spec_dest.atomic_inc_address;
            }

            merged.destination = merged_dest;
        } else {
            // Only specific has a destination, use it directly.
            merged.destination = specific.destination;
        }
    } else {
        // Specific has no destination, use the base one.
        merged.destination = base.destination;
    }

    return merged;
}

class YamlConfigParser {
public:
    YamlConfigParser() = default;

    ParsedYamlConfig parse_file(const std::string& yaml_config_path);

private:
    DeviceIdentifier parse_device_identifier(const YAML::Node& node);
    ParsedDestinationConfig parse_destination_config(const YAML::Node& dest_yaml);
    ParsedTrafficPatternConfig parse_traffic_pattern_config(const YAML::Node& pattern_yaml);
    ParsedSenderConfig parse_sender_config(const YAML::Node& sender_yaml, const ParsedTrafficPatternConfig& defaults);
    TestFabricSetup parse_fabric_setup(const YAML::Node& fabric_setup_yaml);
    ParsedTestConfig parse_test_config(const YAML::Node& test_yaml);
    AllocatorPolicies parse_allocator_policies(const YAML::Node& policies_yaml);
    CoreAllocationConfig parse_core_allocation_config(const YAML::Node& config_yaml, CoreAllocationConfig base_config);
    PhysicalMeshConfig parse_physical_mesh_config(const YAML::Node& physical_mesh_yaml);

    // Parsing helpers
    CoreCoord parse_core_coord(const YAML::Node& node);
    MeshCoordinate parse_mesh_coord(const YAML::Node& node);
    MeshId parse_mesh_id(const YAML::Node& yaml_node);
    template <typename T>
    T parse_scalar(const YAML::Node& yaml_node);
    template <typename T>
    std::vector<T> parse_scalar_sequence(const YAML::Node& yaml_node);
    template <typename T>
    std::vector<std::vector<T>> parse_2d_array(const YAML::Node& yaml_node);
    template <typename T1, typename T2>
    std::pair<T1, T2> parse_pair(const YAML::Node& yaml_sequence);
    template <typename T1, typename T2>
    std::vector<std::pair<T1, T2>> parse_pair_sequence(const YAML::Node& yaml_node);
    template <typename T>
    std::vector<T> get_elements_in_range(T start, T end);
    ParametrizationOptionsMap parse_parametrization_params(const YAML::Node& params_yaml);
    HighLevelPatternConfig parse_high_level_pattern_config(const YAML::Node& pattern_yaml);
};

class CmdlineParser {
public:
    CmdlineParser(const std::vector<std::string>& input_args);

    std::optional<std::string> get_yaml_config_path();
    bool check_filter(ParsedTestConfig& test_config, bool fine_grained);
    void apply_overrides(std::vector<ParsedTestConfig>& test_configs);
    std::optional<uint32_t> get_master_seed();
    bool dump_built_tests();
    std::string get_built_tests_dump_file_name(const std::string& default_file_name);
    bool has_help_option();
    void print_help();

    // Progress monitoring options
    bool show_progress();
    uint32_t get_progress_interval();
    uint32_t get_hung_threshold();

private:
    const std::vector<std::string>& input_args_;
    std::optional<std::string> filter_type;
    std::optional<std::string> filter_value;
};

const std::string no_default_test_yaml_config;

template <typename T>
inline T YamlConfigParser::parse_scalar(const YAML::Node& yaml_node) {
    TT_FATAL(yaml_node.IsScalar(), "Expected yaml node to be a scalar value");
    if constexpr (std::is_same_v<T, MeshId>) {
        return parse_mesh_id(yaml_node);
    } else {
        return yaml_node.as<T>();
    }
}

template <typename T>
inline std::vector<T> YamlConfigParser::parse_scalar_sequence(const YAML::Node& yaml_node) {
    std::vector<T> sequence;
    sequence.reserve(yaml_node.size());
    for (const auto& entry : yaml_node) {
        sequence.push_back(parse_scalar<T>(entry));
    }

    return sequence;
}

template <typename T1, typename T2>
inline std::pair<T1, T2> YamlConfigParser::parse_pair(const YAML::Node& yaml_sequence) {
    TT_FATAL(yaml_sequence.size() == 2, "Expected only 2 entries for the pair");
    return {parse_scalar<T1>(yaml_sequence[0]), parse_scalar<T2>(yaml_sequence[1])};
}

template <typename T1, typename T2>
inline std::vector<std::pair<T1, T2>> YamlConfigParser::parse_pair_sequence(const YAML::Node& yaml_node) {
    std::vector<std::pair<T1, T2>> pair_sequence;
    pair_sequence.reserve(yaml_node.size());
    for (const auto& entry : yaml_node) {
        TT_FATAL(entry.IsSequence(), "Expected each entry to be sequence");
        pair_sequence.push_back(parse_pair<T1, T2>(entry));
    }

    return pair_sequence;
}

template <typename T>
inline std::vector<std::vector<T>> YamlConfigParser::parse_2d_array(const YAML::Node& yaml_node) {
    std::vector<std::vector<T>> array;
    TT_FATAL(yaml_node.IsSequence(), "Expected a sequence for 2D array");

    for (const auto& row : yaml_node) {
        TT_FATAL(row.IsSequence(), "Expected each row to be a sequence");
        std::vector<T> row_vector;
        row_vector.reserve(row.size());
        for (const auto& entry : row) {
            // only deals with ethernet core case
            if constexpr (std::is_same_v<T, EthCoord>) {
                TT_FATAL(entry.size() == 5, "Expected ethernet core coordinates to be a sequence of 5 elements");
                row_vector.push_back(EthCoord{
                    parse_scalar<uint32_t>(entry[0]),
                    parse_scalar<uint32_t>(entry[1]),
                    parse_scalar<uint32_t>(entry[2]),
                    parse_scalar<uint32_t>(entry[3]),
                    parse_scalar<uint32_t>(entry[4])});
            } else {
                TT_THROW("Unsupported entry type in 2D array for type: {}", entry.Type());
            }
        }
        array.push_back(std::move(row_vector));
    }

    return array;
}

template <typename T>
inline std::vector<T> YamlConfigParser::get_elements_in_range(T start, T end) {
    std::vector<T> range(end - start + 1);
    std::iota(range.begin(), range.end(), start);
    return range;
}

class TestConfigBuilder {
public:
    TestConfigBuilder(IDeviceInfoProvider& device_info_provider, IRouteManager& route_manager, std::mt19937& gen) :
        device_info_provider_(device_info_provider), route_manager_(route_manager), gen_(gen) {}

    std::vector<TestConfig> build_tests(
        const std::vector<ParsedTestConfig>& raw_configs, CmdlineParser& cmdline_parser) {
        std::vector<TestConfig> built_tests;

        for (const auto& raw_config : raw_configs) {
            std::vector<ParsedTestConfig> parametrized_configs = this->expand_parametrizations(raw_config);

            // For each newly generated parametrized config, expand its high-level patterns
            for (auto& p_config : parametrized_configs) {
                if (!cmdline_parser.check_filter(p_config, false)) {
                    log_info(LogTest, "Skipping part of test '{}' due to filter criteria.", p_config.name);
                    continue;
                }
                auto expanded_tests = this->expand_high_level_patterns(p_config);
                built_tests.insert(
                    built_tests.end(),
                    std::make_move_iterator(expanded_tests.begin()),
                    std::make_move_iterator(expanded_tests.end()));
            }
        }

        return built_tests;
    }
    // Helper function to check if a test should be skipped based on architecture or cluster type.
    bool should_skip_test_on_platform(const ParsedTestConfig& test_config) const {
        // Skip if the test declares platforms to skip and this platform matches
        if (test_config.skip.has_value()) {
            // Determine current platform identifiers
            auto arch_name = tt::tt_metal::hal::get_arch_name();
            auto cluster_type = tt::tt_metal::MetalContext::instance().get_cluster().get_cluster_type();
            std::string cluster_name = std::string(enchantum::to_string(cluster_type));
            for (const auto& token : test_config.skip.value()) {
                if (token == arch_name || token == cluster_name) {
                    log_info(LogTest, "Skipping test '{}' on architecture or platform '{}'", test_config.name, token);
                    return true;
                }
            }
        }
        return false;
    }

    // Helper function to check if a test should be skipped based on topology incompatibilities.
    bool should_skip_test_on_topology(const ParsedTestConfig& test_config) const {
        if (test_config.fabric_setup.topology == Topology::Ring) {
            uint32_t num_devices = device_info_provider_.get_local_node_ids().size();
            if (num_devices < MIN_RING_TOPOLOGY_DEVICES) {
                log_info(
                    LogTest,
                    "Skipping test '{}' - Ring topology requires at least {} devices, but only {} devices available",
                    test_config.name,
                    MIN_RING_TOPOLOGY_DEVICES,
                    num_devices);
                return true;
            }
        }
        return false;
    }

private:
    static constexpr uint32_t MIN_RING_TOPOLOGY_DEVICES = 4;

    // Convert ParsedTestConfig to TestConfig by resolving device identifiers
    TestConfig resolve_test_config(const ParsedTestConfig& parsed_test, uint32_t iteration_number) {
        TestConfig resolved_test;
        resolved_test.name = parsed_test.name;
        resolved_test.parametrized_name = parsed_test.parametrized_name;
        resolved_test.iteration_number = iteration_number;
        resolved_test.fabric_setup = parsed_test.fabric_setup;
        resolved_test.on_missing_param_policy = parsed_test.on_missing_param_policy;
        resolved_test.parametrization_params = parsed_test.parametrization_params;
        resolved_test.patterns = parsed_test.patterns;
        resolved_test.bw_calc_func = parsed_test.bw_calc_func;
        resolved_test.seed = parsed_test.seed;
        resolved_test.global_sync_configs = parsed_test.global_sync_configs;
        resolved_test.performance_test_mode = parsed_test.performance_test_mode;
        resolved_test.global_sync = parsed_test.global_sync;
        resolved_test.global_sync_val = parsed_test.global_sync_val;
        resolved_test.enable_flow_control = parsed_test.enable_flow_control;

        // Resolve defaults
        if (parsed_test.defaults.has_value()) {
            resolved_test.defaults = resolve_traffic_pattern(parsed_test.defaults.value());
        }

        // Resolve senders
        resolved_test.senders.reserve(parsed_test.senders.size());
        for (const auto& parsed_sender : parsed_test.senders) {
            resolved_test.senders.push_back(resolve_sender_config(parsed_sender));
        }

        return resolved_test;
    }

    SenderConfig resolve_sender_config(const ParsedSenderConfig& parsed_sender) {
        SenderConfig resolved_sender;
        resolved_sender.device = resolve_device_identifier(parsed_sender.device, device_info_provider_);
        resolved_sender.core = parsed_sender.core;
        resolved_sender.link_id = parsed_sender.link_id.value_or(0);  // Default to link 0 if not specified

        resolved_sender.patterns.reserve(parsed_sender.patterns.size());
        for (const auto& parsed_pattern : parsed_sender.patterns) {
            resolved_sender.patterns.push_back(resolve_traffic_pattern(parsed_pattern));
        }

        return resolved_sender;
    }

    TrafficPatternConfig resolve_traffic_pattern(const ParsedTrafficPatternConfig& parsed_pattern) {
        TrafficPatternConfig resolved_pattern;
        resolved_pattern.ftype = parsed_pattern.ftype;
        resolved_pattern.ntype = parsed_pattern.ntype;
        resolved_pattern.size = parsed_pattern.size;
        resolved_pattern.num_packets = parsed_pattern.num_packets;
        resolved_pattern.atomic_inc_val = parsed_pattern.atomic_inc_val;
        resolved_pattern.mcast_start_hops = parsed_pattern.mcast_start_hops;

        if (parsed_pattern.destination.has_value()) {
            resolved_pattern.destination = resolve_destination_config(parsed_pattern.destination.value());
        }

        // Credit info fields (will be populated by GlobalAllocator during resource allocation)
        resolved_pattern.sender_credit_info = std::nullopt;
        resolved_pattern.credit_return_batch_size = std::nullopt;

        return resolved_pattern;
    }

    DestinationConfig resolve_destination_config(const ParsedDestinationConfig& parsed_dest) {
        DestinationConfig resolved_dest;
        if (parsed_dest.device.has_value()) {
            resolved_dest.device = resolve_device_identifier(parsed_dest.device.value(), device_info_provider_);
        }
        resolved_dest.core = parsed_dest.core;
        resolved_dest.hops = parsed_dest.hops;
        resolved_dest.target_address = parsed_dest.target_address;
        resolved_dest.atomic_inc_address = parsed_dest.atomic_inc_address;

        return resolved_dest;
    }

    std::vector<TestConfig> expand_high_level_patterns(ParsedTestConfig& p_config) {
        std::vector<TestConfig> expanded_tests;

        p_config.parametrization_params.reset();  // Clear now-used params before final expansion

        uint32_t max_iterations = 1;
        if (p_config.patterns) {
            for (const auto& p : p_config.patterns.value()) {
                if (p.iterations.has_value()) {
                    max_iterations = std::max(max_iterations, p.iterations.value());
                    // Edge Case: If both iterations and all_to_one are supplied, iterations will override the number of
                    // iterations set by all_to_one
                    if (p.type == "all_to_one") {
                        log_warning(
                            tt::LogTest,
                            "'iterations' specified alongside 'all_to_one' test, `iterations` will be followed instead "
                            "of auto-generating iterations based on number of devices");
                    }
                } else if (p.type == "all_to_one") {
                    // Dynamically calculate iterations for all_to_one patterns based on number of devices
                    uint32_t num_devices = static_cast<uint32_t>(device_info_provider_.get_global_node_ids().size());
                    max_iterations = std::max(max_iterations, num_devices);
                    log_info(
                        LogTest,
                        "Auto-detected {} iterations for all_to_one pattern in test '{}'",
                        num_devices,
                        p_config.name);
                } else if (p.type == "sequential_all_to_all") {
                    // Dynamically calculate iterations for sequential_all_to_all patterns based on all device pairs
                    auto all_pairs = this->route_manager_.get_all_to_all_unicast_pairs();
                    uint32_t num_pairs = static_cast<uint32_t>(all_pairs.size());
                    max_iterations = std::max(max_iterations, num_pairs);
                    log_info(
                        LogTest,
                        "Auto-detected {} iterations for sequential_all_to_all pattern in test '{}'",
                        num_pairs,
                        p_config.name);
                }
            }
        }

        if (max_iterations > 1 && p_config.patterns.has_value() && p_config.patterns.value().size() > 1) {
            log_warning(
                LogTest,
                "Test '{}' has multiple high-level patterns and specifies iterations. All patterns will be "
                "expanded "
                "together in each iteration. This may lead to a very large number of connections.",
                p_config.name);
        }

        expanded_tests.reserve(max_iterations);
        for (uint32_t i = 0; i < max_iterations; ++i) {
            ParsedTestConfig iteration_test = p_config;
            iteration_test.patterns.reset();  // Will be expanded into concrete senders.

            // Initialize parametrized_name with original name if empty
            if (iteration_test.parametrized_name.empty()) {
                iteration_test.parametrized_name = iteration_test.name;
            }
            if (max_iterations > 1) {
                // Use optimized string concatenation utility for parametrized name
                detail::append_with_separator(iteration_test.parametrized_name, "_", "iter", i);
            }

            iteration_test.seed = std::uniform_int_distribution<uint32_t>()(this->gen_);

            // Add line sync pattern expansion if enabled
            if (iteration_test.global_sync) {
                expand_sync_patterns(iteration_test);
            }

            if (p_config.patterns.has_value()) {
                if (!p_config.senders.empty()) {
                    TT_FATAL(
                        false,
                        "Test '{}' has both concrete 'senders' and high-level 'patterns' specified. This is ambiguous. "
                        "Please specify one or the other.",
                        p_config.name);
                }
                expand_patterns_into_test(iteration_test, p_config.patterns.value(), i);
            } else if (p_config.defaults.has_value()) {
                // if we have concrete senders, we still need to apply the defaults to them
                for (auto& sender : iteration_test.senders) {
                    for (auto& pattern : sender.patterns) {
                        pattern = merge_patterns(p_config.defaults.value(), pattern);
                    }
                }
            }

            // After patterns are expanded, duplicate senders for different links if specified
            if (!expand_link_duplicates(iteration_test)) {
                // Test was skipped due to insufficient routing planes, continue to next iteration
                continue;
            }

            // After patterns are expanded, resolve any missing params based on policy
            resolve_missing_params(iteration_test);

            // After expansion and resolution, apply universal transformations like mcast splitting.
            split_all_unicast_or_multicast_patterns(iteration_test);

            // Convert to resolved TestConfig
            TestConfig resolved_test = resolve_test_config(iteration_test, i);

            validate_test(resolved_test);
            expanded_tests.push_back(resolved_test);
        }
        return expanded_tests;
    }

    std::vector<ParsedTestConfig> expand_parametrizations(const ParsedTestConfig& raw_config) {
        std::vector<ParsedTestConfig> parametrized_configs;
        parametrized_configs.push_back(raw_config);

        if (raw_config.parametrization_params.has_value()) {
            for (const auto& [param_name, values_variant] : raw_config.parametrization_params.value()) {
                std::vector<ParsedTestConfig> next_level_configs;

                // Pre-calculate total size to avoid reallocations
                size_t total_new_configs = 0;
                if (std::holds_alternative<std::vector<std::string>>(values_variant)) {
                    const auto& values = std::get<std::vector<std::string>>(values_variant);
                    total_new_configs = parametrized_configs.size() * values.size();
                } else if (std::holds_alternative<std::vector<uint32_t>>(values_variant)) {
                    const auto& values = std::get<std::vector<uint32_t>>(values_variant);
                    total_new_configs = parametrized_configs.size() * values.size();
                }
                next_level_configs.reserve(total_new_configs);

                for (const auto& current_config : parametrized_configs) {
                    // Handle string-based parameters
                    if (std::holds_alternative<std::vector<std::string>>(values_variant)) {
                        const auto& values = std::get<std::vector<std::string>>(values_variant);
                        for (const auto& value : values) {
                            next_level_configs.emplace_back(current_config);
                            auto& next_config = next_level_configs.back();
                            // Explicitly preserve performance_test_mode
                            next_config.performance_test_mode = current_config.performance_test_mode;

                            // Initialize parametrized_name with original name if empty
                            if (next_config.parametrized_name.empty()) {
                                next_config.parametrized_name = next_config.name;
                            }
                            // Update parametrized name to include parameter name and value
                            detail::append_with_separator(next_config.parametrized_name, "_", param_name, value);

                            ParsedTrafficPatternConfig param_default;
                            if (param_name == "ftype") {
                                param_default.ftype = detail::chip_send_type_mapper.from_string(value, "ftype");
                            } else if (param_name == "ntype") {
                                param_default.ntype = detail::noc_send_type_mapper.from_string(value, "ntype");
                            }
                            next_config.defaults = merge_patterns(
                                current_config.defaults.value_or(ParsedTrafficPatternConfig{}), param_default);
                        }
                    }
                    // Handle integer-based parameters
                    else if (std::holds_alternative<std::vector<uint32_t>>(values_variant)) {
                        const auto& values = std::get<std::vector<uint32_t>>(values_variant);
                        for (const auto& value : values) {
                            next_level_configs.emplace_back(current_config);
                            auto& next_config = next_level_configs.back();
                            // Explicitly preserve performance_test_mode
                            next_config.performance_test_mode = current_config.performance_test_mode;

                            // Initialize parametrized_name with original name if empty
                            if (next_config.parametrized_name.empty()) {
                                next_config.parametrized_name = next_config.name;
                            }
                            // Update parametrized name to include parameter name and value
                            detail::append_with_separator(
                                next_config.parametrized_name, "_", param_name, std::to_string(value));

                            if (param_name == "num_links") {
                                // num_links is part of fabric_setup, not traffic pattern defaults
                                next_config.fabric_setup.num_links = value;
                            } else {
                                ParsedTrafficPatternConfig param_default;
                                if (param_name == "size") {
                                    param_default.size = value;
                                } else if (param_name == "num_packets") {
                                    param_default.num_packets = value;
                                }
                                next_config.defaults = merge_patterns(
                                    current_config.defaults.value_or(ParsedTrafficPatternConfig{}), param_default);
                            }
                        }
                    }
                }
                // Move the newly generated configs to be the input for the next parameter loop.
                parametrized_configs = std::move(next_level_configs);
            }
        }
        return parametrized_configs;
    }

    void validate_pattern(const TrafficPatternConfig& pattern, const TestConfig& test) const {
        // 1. Validate destination ambiguity
        TT_FATAL(
            pattern.destination.has_value(),
            "Test '{}': Pattern is missing a destination. This should have been resolved by the builder.",
            test.name);
        const auto& dest = pattern.destination.value();
        TT_FATAL(
            !(dest.device.has_value() && dest.hops.has_value()),
            "Test '{}': A pattern's destination cannot have both 'device' and 'hops' specified.",
            test.name);
        TT_FATAL(
            dest.device.has_value() || dest.hops.has_value(),
            "Test '{}': A pattern's destination must specify either a 'device' or 'hops'.",
            test.name);

        // 2. Validate atomic-related fields
        if (pattern.ntype.has_value() && pattern.ntype.value() == NocSendType::NOC_UNICAST_WRITE) {
            TT_FATAL(
                !pattern.atomic_inc_val.has_value(),
                "Test '{}': 'atomic_inc_val' should not be specified for 'unicast_write' ntype.",
                test.name);
        }

        // 3. Validate payload size
        if (pattern.size.has_value()) {
            const uint32_t max_payload_size = this->device_info_provider_.get_max_payload_size_bytes();
            TT_FATAL(
                pattern.size.value() <= max_payload_size,
                "Test '{}': Payload size {} exceeds the maximum of {} bytes",
                test.name,
                pattern.size.value(),
                max_payload_size);
        }
    }

    void validate_chip_unicast(
        const TrafficPatternConfig& pattern, const SenderConfig& sender, const TestConfig& test) const {
        TT_FATAL(
            pattern.destination.has_value() &&
                (pattern.destination->device.has_value() || pattern.destination->hops.has_value()),
            "Test '{}': Unicast pattern for sender on device {} is missing a destination device or hops.",
            test.name,
            sender.device);

        if (pattern.destination->device.has_value()) {
            TT_FATAL(
                sender.device != pattern.destination->device.value(),
                "Test '{}': Sender on device {} cannot have itself as a destination.",
                test.name,
                sender.device);
        }

        TT_FATAL(
            !pattern.mcast_start_hops.has_value(),
            "Test '{}': 'mcast_start_hops' cannot be specified for a 'unicast' ftype pattern.",
            test.name);
    }

    void validate_chip_multicast(
        const TrafficPatternConfig& pattern, const SenderConfig& sender, const TestConfig& test) const {
        TT_FATAL(
            pattern.destination.has_value() && pattern.destination->hops.has_value(),
            "Test '{}': Multicast pattern for sender on device {} must have a destination specified by 'hops'.",
            test.name,
            sender.device);
    }

    void validate_sync_pattern(
        const TrafficPatternConfig& pattern, const SenderConfig& sender, const TestConfig& test) const {
        TT_FATAL(
            pattern.ftype.has_value() && pattern.ftype.value() == ChipSendType::CHIP_MULTICAST,
            "Test '{}': Line sync pattern for sender on device {} must use CHIP_MULTICAST.",
            test.name,
            sender.device);

        TT_FATAL(
            pattern.ntype.has_value() && pattern.ntype.value() == NocSendType::NOC_UNICAST_ATOMIC_INC,
            "Test '{}': Line sync pattern for sender on device {} must use NOC_UNICAST_ATOMIC_INC.",
            test.name,
            sender.device);

        TT_FATAL(
            pattern.destination.has_value() && pattern.destination->hops.has_value(),
            "Test '{}': Line sync pattern for sender on device {} must have destination specified by 'hops'.",
            test.name,
            sender.device);

        TT_FATAL(
            pattern.size.has_value() && pattern.size.value() == 0,
            "Test '{}': Line sync pattern for sender on device {} must have size 0 (no payload).",
            test.name,
            sender.device);

        TT_FATAL(
            pattern.num_packets.has_value() && pattern.num_packets.value() == 1,
            "Test '{}': Line sync pattern for sender on device {} must have num_packets 1.",
            test.name,
            sender.device);
    }

    void validate_test(const TestConfig& test) const {
        for (const auto& sender : test.senders) {
            for (const auto& pattern : sender.patterns) {
                validate_pattern(pattern, test);

                if (pattern.ftype.value() == ChipSendType::CHIP_UNICAST) {
                    validate_chip_unicast(pattern, sender, test);
                } else if (pattern.ftype.value() == ChipSendType::CHIP_MULTICAST) {
                    validate_chip_multicast(pattern, sender, test);
                }
            }
        }

        // Validate line sync patterns if present
        if (test.global_sync) {
            for (const auto& sync_sender : test.global_sync_configs) {
                for (const auto& sync_pattern : sync_sender.patterns) {
                    validate_sync_pattern(sync_pattern, sync_sender, test);
                }
            }
        }

        if (test.fabric_setup.topology == tt::tt_fabric::Topology::Linear) {
            for (const auto& sender : test.senders) {
                for (const auto& pattern : sender.patterns) {
                    if (pattern.destination->device.has_value()) {
                        TT_FATAL(
                            this->route_manager_.are_devices_linear(
                                {sender.device, pattern.destination->device.value()}),
                            "For a 'Linear' topology, all specified devices must be in the same row or column. Test: "
                            "{}",
                            test.name);
                    }
                }
            }
        }
    }

    void expand_patterns_into_test(
        ParsedTestConfig& test, const std::vector<HighLevelPatternConfig>& patterns, uint32_t iteration_idx) {
        const auto& defaults = test.defaults.value_or(ParsedTrafficPatternConfig{});

        for (const auto& pattern : patterns) {
            if (pattern.iterations.has_value() && iteration_idx >= pattern.iterations.value()) {
                continue;
            }

            if (pattern.type == "all_to_all") {
                if (defaults.ftype == ChipSendType::CHIP_UNICAST) {
                    expand_one_or_all_to_all_unicast(test, defaults, HighLevelTrafficPattern::AllToAll);
                } else {
                    expand_one_or_all_to_all_multicast(test, defaults, HighLevelTrafficPattern::AllToAll);
                }
            } else if (pattern.type == "one_to_all") {
                if (defaults.ftype == ChipSendType::CHIP_UNICAST) {
                    expand_one_or_all_to_all_unicast(test, defaults, HighLevelTrafficPattern::OneToAll);
                } else {
                    expand_one_or_all_to_all_multicast(test, defaults, HighLevelTrafficPattern::OneToAll);
                }
            } else if (pattern.type == "all_to_one") {
                expand_all_to_one_unicast(test, defaults, iteration_idx);
            } else if (pattern.type == "all_to_one_random") {
                expand_all_to_one_random_unicast(test, defaults);
            } else if (pattern.type == "full_device_random_pairing") {
                expand_full_device_random_pairing(test, defaults);
            } else if (pattern.type == "unidirectional_linear") {
                expand_unidirectional_linear_unicast_or_multicast(test, defaults);
            } else if (pattern.type == "full_ring" || pattern.type == "half_ring") {
                HighLevelTrafficPattern pattern_type =
                    detail::high_level_traffic_pattern_mapper.from_string(pattern.type, "HighLevelTrafficPattern");
                expand_full_or_half_ring_unicast_or_multicast(test, defaults, pattern_type);
            } else if (pattern.type == "all_devices_uniform_pattern") {
                expand_all_devices_uniform_pattern(test, defaults);
            } else if (pattern.type == "neighbor_exchange") {
                expand_neighbor_exchange(test, defaults);
            } else if (pattern.type == "sequential_all_to_all") {
                expand_sequential_all_to_all_unicast(test, defaults, iteration_idx);
            } else {
                TT_THROW("Unsupported pattern type: {}", pattern.type);
            }
        }
    }

    void expand_one_or_all_to_all_unicast(
        ParsedTestConfig& test, const ParsedTrafficPatternConfig& base_pattern, HighLevelTrafficPattern pattern_type) {
        log_debug(
            LogTest,
            "Expanding {}_unicast pattern for test: {}",
            (pattern_type == HighLevelTrafficPattern::OneToAll) ? "one_to_all" : "all_to_all",
            test.name);
        std::vector<std::pair<FabricNodeId, FabricNodeId>> all_pairs =
            this->route_manager_.get_all_to_all_unicast_pairs();

        if (pattern_type == HighLevelTrafficPattern::OneToAll) {
            TT_FATAL(!all_pairs.empty(), "Cannot expand one_to_all_unicast because no device pairs were found.");

            // Get the first device as the single sender
            FabricNodeId first_device = all_pairs[0].first;

            // Filter pairs to only include those with the first device as sender
            std::vector<std::pair<FabricNodeId, FabricNodeId>> filtered_pairs;
            for (const auto& pair : all_pairs) {
                if (pair.first == first_device) {
                    filtered_pairs.push_back(pair);
                }
            }
            add_senders_from_pairs(test, filtered_pairs, base_pattern);
        } else {
            add_senders_from_pairs(test, all_pairs, base_pattern);
        }
    }

    void expand_all_to_one_unicast(
        ParsedTestConfig& test, const ParsedTrafficPatternConfig& base_pattern, uint32_t iteration_idx) {
        log_debug(
            LogTest, "Expanding all_to_one_unicast pattern for test: {} (iteration {})", test.name, iteration_idx);
        auto filtered_pairs = this->route_manager_.get_all_to_one_unicast_pairs(iteration_idx);
        if (!filtered_pairs.empty()) {
            add_senders_from_pairs(test, filtered_pairs, base_pattern);
        }
    }

    void expand_all_to_one_random_unicast(ParsedTestConfig& test, const ParsedTrafficPatternConfig& base_pattern) {
        log_debug(LogTest, "Expanding all_to_one_unicast pattern for test: {}", test.name);
        uint32_t index = get_random_in_range(0, device_info_provider_.get_global_node_ids().size() - 1);
        auto filtered_pairs = this->route_manager_.get_all_to_one_unicast_pairs(index);
        add_senders_from_pairs(test, filtered_pairs, base_pattern);
    }

    void expand_full_device_random_pairing(ParsedTestConfig& test, const ParsedTrafficPatternConfig& base_pattern) {
        log_debug(LogTest, "Expanding full_device_random_pairing pattern for test: {}", test.name);
        auto random_pairs = this->route_manager_.get_full_device_random_pairs(this->gen_);
        add_senders_from_pairs(test, random_pairs, base_pattern);
    }

    void expand_sequential_all_to_all_unicast(
        ParsedTestConfig& test, const ParsedTrafficPatternConfig& base_pattern, uint32_t iteration_idx) {
        log_debug(
            LogTest,
            "Expanding sequential_all_to_all_unicast pattern for test: {} (iteration {})",
            test.name,
            iteration_idx);

        auto all_pairs = this->route_manager_.get_all_to_all_unicast_pairs();

        if (all_pairs.empty()) {
            log_warning(LogTest, "No valid pairs found for sequential_all_to_all pattern");
            return;
        }

        // Select only the pair for this iteration
        if (iteration_idx < all_pairs.size()) {
            std::vector<std::pair<FabricNodeId, FabricNodeId>> single_pair = {all_pairs[iteration_idx]};
            add_senders_from_pairs(test, single_pair, base_pattern);
        } else {
            TT_THROW(
                "Iteration index {} exceeds number of available device pairs {} for sequential_all_to_all pattern",
                iteration_idx,
                all_pairs.size());
        }
    }

    void expand_all_devices_uniform_pattern(ParsedTestConfig& test, const ParsedTrafficPatternConfig& base_pattern) {
        log_debug(LogTest, "Expanding all_devices_uniform_pattern for test: {}", test.name);
        std::vector<FabricNodeId> devices = device_info_provider_.get_global_node_ids();
        TT_FATAL(!devices.empty(), "Cannot expand all_devices_uniform_pattern because no devices were found.");

        for (const auto& src_node : devices) {
            // Apply the base pattern (from defaults) to each device
            test.senders.emplace_back(ParsedSenderConfig{.device = src_node, .patterns = {base_pattern}});
        }
    }

    void expand_one_or_all_to_all_multicast(
        ParsedTestConfig& test, const ParsedTrafficPatternConfig& base_pattern, HighLevelTrafficPattern pattern_type) {
        const char* pattern_name = (pattern_type == HighLevelTrafficPattern::OneToAll) ? "one_to_all" : "all_to_all";
        log_debug(LogTest, "Expanding {}_multicast pattern for test: {}", pattern_name, test.name);
        std::vector<FabricNodeId> devices = device_info_provider_.get_global_node_ids();
        TT_FATAL(!devices.empty(), "Cannot expand {}_multicast because no devices were found.", pattern_name);

        // Determine which devices should be senders
        std::vector<FabricNodeId> sender_devices;
        if (pattern_type == HighLevelTrafficPattern::OneToAll) {
            sender_devices = {devices[0]};  // Only first device
        } else {
            sender_devices = devices;  // All devices
        }

        for (const auto& src_node : sender_devices) {
            auto hops = this->route_manager_.get_full_mcast_hops(src_node);

            ParsedTrafficPatternConfig specific_pattern;
            specific_pattern.destination = ParsedDestinationConfig{.hops = hops};
            specific_pattern.ftype = ChipSendType::CHIP_MULTICAST;

            auto merged_pattern = merge_patterns(base_pattern, specific_pattern);

            auto it = std::find_if(test.senders.begin(), test.senders.end(), [&](const ParsedSenderConfig& s) {
                // Compare FabricNodeId with DeviceIdentifier
                if (std::holds_alternative<FabricNodeId>(s.device)) {
                    return std::get<FabricNodeId>(s.device) == src_node;
                }
                return false;
            });

            if (it != test.senders.end()) {
                it->patterns.emplace_back(std::move(merged_pattern));
            } else {
                test.senders.emplace_back(ParsedSenderConfig{.device = src_node, .patterns = {merged_pattern}});
            }
        }
    }

    void expand_unidirectional_linear_unicast_or_multicast(
        ParsedTestConfig& test, const ParsedTrafficPatternConfig& base_pattern) {
        log_debug(LogTest, "Expanding unidirectional_linear pattern for test: {}", test.name);
        std::vector<FabricNodeId> devices = device_info_provider_.get_local_node_ids();
        TT_FATAL(!devices.empty(), "Cannot expand unidirectional_linear because no devices were found.");

        for (const auto& src_node : devices) {
            // instantiate N/S E/W traffic on separate senders to avoid bottlenecking on sender.
            for (uint32_t dim = 0; dim < this->route_manager_.get_num_mesh_dims(); ++dim) {
                // Skip dimensions with only one device
                if (this->route_manager_.get_mesh_shape()[dim] < 2) {
                    continue;
                }

                auto hops = this->route_manager_.get_unidirectional_linear_mcast_hops(src_node, dim);

                ParsedTrafficPatternConfig specific_pattern;
                specific_pattern.destination = ParsedDestinationConfig{.hops = hops};

                auto merged_pattern = merge_patterns(base_pattern, specific_pattern);
                test.senders.push_back(ParsedSenderConfig{.device = src_node, .patterns = {merged_pattern}});
            }
        }
    }

    void expand_neighbor_exchange(ParsedTestConfig& test, const ParsedTrafficPatternConfig& base_pattern) {
        log_debug(LogTest, "Expanding neighbor_exchange pattern for test: {}", test.name);
        auto neighbor_pairs = this->route_manager_.get_neighbor_exchange_pairs();
        if (!neighbor_pairs.empty()) {
            add_senders_from_pairs(test, neighbor_pairs, base_pattern);
        }
    }

    void expand_full_or_half_ring_unicast_or_multicast(
        ParsedTestConfig& test, const ParsedTrafficPatternConfig& base_pattern, HighLevelTrafficPattern pattern_type) {
        log_debug(LogTest, "Expanding full_or_half_ring pattern for test: {}", test.name);
        std::vector<FabricNodeId> devices = device_info_provider_.get_local_node_ids();
        TT_FATAL(!devices.empty(), "Cannot expand full_or_half_ring because no devices were found.");

        bool wrap_around_mesh = this->route_manager_.wrap_around_mesh(devices.front());

        std::unordered_map<RoutingDirection, uint32_t> hops;
        for (const auto& src_node : devices) {
            if (wrap_around_mesh) {
                // Get ring neighbors - returns nullopt for non-perimeter devices
                auto ring_neighbors = this->route_manager_.get_wrap_around_mesh_ring_neighbors(src_node, devices);

                // Check if the result is valid (has value)
                if (!ring_neighbors.has_value()) {
                    // Skip this device as it's not on the perimeter and can't participate in ring multicast
                    log_debug(LogTest, "Skipping device {} as it's not on the perimeter ring", src_node.chip_id);
                    continue;
                }

                // Extract the valid ring neighbors
                auto [dst_node_forward, dst_node_backward] = ring_neighbors.value();

                hops = this->route_manager_.get_wrap_around_mesh_full_or_half_ring_mcast_hops(
                    src_node, dst_node_forward, dst_node_backward, pattern_type);

                ParsedTrafficPatternConfig specific_pattern;
                specific_pattern.destination = ParsedDestinationConfig{.hops = hops};

                auto merged_pattern = merge_patterns(base_pattern, specific_pattern);

                auto it = std::find_if(test.senders.begin(), test.senders.end(), [&](const ParsedSenderConfig& s) {
                    // Compare FabricNodeId with DeviceIdentifier
                    if (std::holds_alternative<FabricNodeId>(s.device)) {
                        return std::get<FabricNodeId>(s.device) == src_node;
                    }
                    return false;
                });

                if (it != test.senders.end()) {
                    it->patterns.push_back(merged_pattern);
                } else {
                    test.senders.push_back(ParsedSenderConfig{.device = src_node, .patterns = {merged_pattern}});
                }
            } else {
                for (uint32_t dim = 0; dim < this->route_manager_.get_num_mesh_dims(); ++dim) {
                    // Skip dimensions with only one device
                    if (this->route_manager_.get_mesh_shape()[dim] < 2) {
                        continue;
                    }

                    hops = this->route_manager_.get_full_or_half_ring_mcast_hops(src_node, pattern_type, dim);

                    ParsedTrafficPatternConfig specific_pattern;
                    specific_pattern.destination = ParsedDestinationConfig{.hops = hops};

                    auto merged_pattern = merge_patterns(base_pattern, specific_pattern);
                    test.senders.push_back(ParsedSenderConfig{.device = src_node, .patterns = {merged_pattern}});
                }
            }
        }
    }

    void expand_sync_patterns(ParsedTestConfig& test) {
        log_debug(
            LogTest,
            "Expanding line sync patterns for test: {} with topology: {}",
            test.name,
            static_cast<int>(test.fabric_setup.topology));

        std::vector<FabricNodeId> all_devices = device_info_provider_.get_global_node_ids();
        TT_FATAL(!all_devices.empty(), "Cannot expand line sync patterns because no devices were found.");

        // Create sync patterns based on topology - returns multiple patterns per device for mcast
        for (const auto& src_device : all_devices) {
            const auto& sync_patterns_and_sync_val_pair = create_sync_patterns_for_topology(src_device, all_devices);

            const auto& sync_patterns = sync_patterns_and_sync_val_pair.first;
            const auto& sync_val = sync_patterns_and_sync_val_pair.second;

            // Create sender config with all split sync patterns
            // Sync always uses link 0 (no override allowed)
            SenderConfig sync_sender = {.device = src_device, .patterns = sync_patterns, .link_id = 0};

            test.global_sync_configs.push_back(std::move(sync_sender));

            // global sync value
            test.global_sync_val = sync_val;
        }

        log_debug(
            LogTest,
            "Generated {} line sync configurations, line_syn_val: {}",
            test.global_sync_configs.size(),
            test.global_sync_val);
    }

    std::pair<std::vector<TrafficPatternConfig>, uint32_t> create_sync_patterns_for_topology(
        const FabricNodeId& src_device, const std::vector<FabricNodeId>& devices) {
        std::vector<TrafficPatternConfig> sync_patterns;

        // Common sync pattern characteristics
        TrafficPatternConfig base_sync_pattern;
        base_sync_pattern.ftype = ChipSendType::CHIP_MULTICAST;         // Global sync across devices
        base_sync_pattern.ntype = NocSendType::NOC_UNICAST_ATOMIC_INC;  // Sync signal via atomic increment
        base_sync_pattern.size = 0;                                     // No payload, just sync signal
        base_sync_pattern.num_packets = 1;                              // Single sync signal
        base_sync_pattern.atomic_inc_val = 1;                           // Increment by 1

        // Topology-specific routing - get multi-directional hops first
        auto [multi_directional_hops, global_sync_val] =
            this->route_manager_.get_sync_hops_and_val(src_device, devices);

        // Split multi-directional hops into single-direction patterns
        auto split_hops_vec = this->route_manager_.split_multicast_hops(multi_directional_hops);

        log_debug(
            LogTest,
            "Splitting sync pattern for device {} from 1 multi-directional to {} single-direction patterns",
            src_device.chip_id,
            split_hops_vec.size());

        // Create separate sync pattern for each mcast direction. This is required since test infra only handle mcast
        // for one direction. Ex, mcast to E/W will split into EAST and WEST patterns.
        sync_patterns.reserve(split_hops_vec.size());
        for (const auto& single_direction_hops : split_hops_vec) {
            TrafficPatternConfig sync_pattern = base_sync_pattern;
            sync_pattern.destination = DestinationConfig{.hops = single_direction_hops};
            sync_patterns.push_back(std::move(sync_pattern));
        }

        return {sync_patterns, global_sync_val};
    }

    void add_senders_from_pairs(
        ParsedTestConfig& test,
        const std::vector<std::pair<FabricNodeId, FabricNodeId>>& pairs,
        const ParsedTrafficPatternConfig& base_pattern) {
        std::map<FabricNodeId, std::vector<ParsedTrafficPatternConfig>> generated_senders;

        for (const auto& pair : pairs) {
            const auto& src_node = pair.first;
            const auto& dst_node = pair.second;

            ParsedTrafficPatternConfig specific_pattern;
            specific_pattern.destination = ParsedDestinationConfig{.device = dst_node};
            specific_pattern.ftype = ChipSendType::CHIP_UNICAST;

            // Use try_emplace to avoid creating empty vectors unnecessarily
            auto [it, inserted] = generated_senders.try_emplace(src_node);
            it->second.emplace_back(merge_patterns(base_pattern, specific_pattern));
        }

        test.senders.reserve(test.senders.size() + generated_senders.size());
        for (const auto& [src_node, patterns] : generated_senders) {
            test.senders.emplace_back(ParsedSenderConfig{.device = src_node, .patterns = patterns});
        }
    }

    void split_all_unicast_or_multicast_patterns(ParsedTestConfig& test) {
        // This function iterates through all sender patterns and splits any multi-direction
        // multicast hops.
        for (auto& sender : test.senders) {
            std::vector<ParsedTrafficPatternConfig> new_patterns;
            bool sender_was_modified = false;

            for (size_t i = 0; i < sender.patterns.size(); ++i) {
                const auto& pattern = sender.patterns[i];

                // Determine if this specific pattern needs to be split.
                bool needs_split = false;
                std::vector<std::unordered_map<RoutingDirection, uint32_t>> split_hops_vec;
                if (pattern.destination.has_value() && pattern.destination.value().hops.has_value()) {
                    const auto& hops = pattern.destination.value().hops.value();
                    split_hops_vec = this->route_manager_.split_multicast_hops(hops);
                    if (split_hops_vec.size() > 1) {
                        needs_split = true;
                    }
                }

                if (needs_split) {
                    if (!sender_was_modified) {
                        sender_was_modified = true;
                        // This is the first split for this sender.
                        // Lazily allocate and copy the patterns processed so far.
                        new_patterns.reserve(sender.patterns.size() + split_hops_vec.size() - 1);
                        new_patterns.insert(new_patterns.end(), sender.patterns.begin(), sender.patterns.begin() + i);
                    }
                    // Add the newly split patterns.
                    for (const auto& split_hop : split_hops_vec) {
                        ParsedTrafficPatternConfig new_pattern = pattern;
                        new_pattern.destination->hops = split_hop;
                        new_patterns.emplace_back(std::move(new_pattern));
                    }
                } else if (sender_was_modified) {
                    // We are in copy-mode because a previous pattern was split.
                    new_patterns.emplace_back(pattern);
                }
            }

            if (sender_was_modified) {
                sender.patterns = std::move(new_patterns);
            }
        }
    }

    bool expand_link_duplicates(ParsedTestConfig& test) {
        // If num_links is 1, no duplication needed
        if (test.fabric_setup.num_links <= 1) {
            return true;  // Success - no expansion needed
        }

        uint32_t num_links = test.fabric_setup.num_links;
        log_debug(LogTest, "Expanding link duplicates for test '{}' with {} links", test.name, num_links);

        // Validate that num_links doesn't exceed available routing planes for any device
        if (!route_manager_.validate_num_links_supported(num_links)) {
            return false;  // Indicate test should be skipped
        }

        std::vector<ParsedSenderConfig> new_senders;
        new_senders.reserve(test.senders.size() * num_links);

        for (const auto& sender : test.senders) {
            for (uint32_t link_id = 0; link_id < num_links; ++link_id) {
                ParsedSenderConfig duplicated_sender = sender;
                duplicated_sender.link_id = link_id;  // Assign link ID
                new_senders.push_back(duplicated_sender);
            }
        }

        test.senders = std::move(new_senders);
        return true;  // Success
    }

    void resolve_missing_params(ParsedTestConfig& test) {
        if (test.on_missing_param_policy.has_value() && test.on_missing_param_policy.value() == "randomize") {
            for (auto& sender : test.senders) {
                for (auto& pattern : sender.patterns) {
                    if (!pattern.ftype.has_value()) {
                        pattern.ftype =
                            get_random_choice<ChipSendType>({ChipSendType::CHIP_UNICAST, ChipSendType::CHIP_MULTICAST});
                    }
                    if (!pattern.ntype.has_value()) {
                        pattern.ntype = get_random_choice<NocSendType>(
                            {NocSendType::NOC_UNICAST_WRITE, NocSendType::NOC_UNICAST_ATOMIC_INC});
                    }
                    if (!pattern.size.has_value()) {
                        pattern.size = get_random_in_range(64, 2048);
                    }
                    if (!pattern.num_packets.has_value()) {
                        pattern.num_packets = get_random_in_range(10, 1000);
                    }

                    if (!pattern.destination.has_value()) {
                        if (pattern.ftype.value() == ChipSendType::CHIP_UNICAST) {
                            // Need to resolve sender.device to FabricNodeId for route manager
                            FabricNodeId sender_node = resolve_device_identifier(sender.device, device_info_provider_);
                            FabricNodeId dst_node =
                                this->route_manager_.get_random_unicast_destination(sender_node, this->gen_);
                            pattern.destination = ParsedDestinationConfig{.device = dst_node};
                        } else if (pattern.ftype.value() == ChipSendType::CHIP_MULTICAST) {
                            // For multicast, the random default is an mcast to all devices.
                            FabricNodeId sender_node = resolve_device_identifier(sender.device, device_info_provider_);
                            auto hops = this->route_manager_.get_full_mcast_hops(sender_node);
                            pattern.destination = ParsedDestinationConfig{.hops = hops};
                        }
                    }
                }
            }
        } else {
            // Not 'randomize', so fill with sane defaults.
            for (auto& sender : test.senders) {
                for (auto& pattern : sender.patterns) {
                    if (!pattern.ftype.has_value()) {
                        pattern.ftype = ChipSendType::CHIP_UNICAST;
                    }
                    if (!pattern.ntype.has_value()) {
                        pattern.ntype = NocSendType::NOC_UNICAST_WRITE;
                    }
                    if (!pattern.size.has_value()) {
                        pattern.size = 1024;  // Default from cmdline parser
                    }
                    if (!pattern.num_packets.has_value()) {
                        pattern.num_packets = 10;  // A reasonable default
                    }
                }
            }
        }
    }

    IDeviceInfoProvider& device_info_provider_;
    IRouteManager& route_manager_;
    std::mt19937& gen_;
    // Randomization helpers
    template <typename T>
    T get_random_choice(const std::vector<T>& choices) {
        if (choices.empty()) {
            TT_THROW("Cannot make a random choice from an empty list.");
        }
        std::uniform_int_distribution<> distrib(0, choices.size() - 1);
        return choices[distrib(this->gen_)];
    }

    uint32_t get_random_in_range(uint32_t min, uint32_t max) {
        if (min > max) {
            std::swap(min, max);
        }
        std::uniform_int_distribution<uint32_t> distrib(min, max);
        return distrib(this->gen_);
    }
};

// ======================================================================================
// Serialization to YAML
// ======================================================================================

class YamlTestConfigSerializer {
public:
    static void dump(const PhysicalMeshConfig& physical_mesh_config, std::ofstream& fout) {
        YAML::Emitter out;
        out << YAML::BeginMap;

        out << YAML::Key << "physical_mesh";
        out << YAML::Value;
        to_yaml(out, physical_mesh_config);

        out << YAML::EndMap;

        fout << out.c_str() << std::endl;
    }

    static void dump(const AllocatorPolicies& policies, std::ofstream& fout) {
        YAML::Emitter out;
        out << YAML::BeginMap;

        out << YAML::Key << "allocation_policies";
        out << YAML::Value;
        to_yaml(out, policies);

        out << YAML::EndMap;

        fout << out.c_str() << std::endl;
    }

    static void dump(const std::vector<TestConfig>& test_configs, std::ofstream& fout) {
        YAML::Emitter out;
        out << YAML::BeginMap;

        out << YAML::Key << "Test";
        out << YAML::Value;

        out << YAML::BeginSeq;
        for (const auto& test_config : test_configs) {
            to_yaml(out, test_config);
        }
        out << YAML::EndSeq;

        out << YAML::EndMap;

        fout << out.c_str() << std::endl;
    }

private:
    static void to_yaml(YAML::Emitter& out, const FabricNodeId& id) {
        out << YAML::Flow;
        out << YAML::BeginSeq << *id.mesh_id << id.chip_id << YAML::EndSeq;
    }

    static void to_yaml(YAML::Emitter& out, const CoreCoord& core) {
        out << YAML::Flow;
        out << YAML::BeginSeq << core.x << core.y << YAML::EndSeq;
    }

    static void to_yaml(YAML::Emitter& out, const DestinationConfig& config) {
        out << YAML::BeginMap;
        if (config.device) {
            out << YAML::Key << "device";
            out << YAML::Value;
            to_yaml(out, config.device.value());
        }
        if (config.core) {
            out << YAML::Key << "core";
            out << YAML::Value;
            to_yaml(out, config.core.value());
        }
        if (config.hops) {
            out << YAML::Key << "hops";
            out << YAML::Value;
            out << YAML::BeginMap;
            for (const auto& [dir, count] : config.hops.value()) {
                out << YAML::Key << to_string(dir);
                out << YAML::Value << count;
            }
            out << YAML::EndMap;
        }
        if (config.target_address) {
            out << YAML::Key << "target_address";
            out << YAML::Value << config.target_address.value();
        }
        if (config.atomic_inc_address) {
            out << YAML::Key << "atomic_inc_address";
            out << YAML::Value << config.atomic_inc_address.value();
        }
        out << YAML::EndMap;
    }

    static std::string to_string(ChipSendType ftype) {
        return detail::chip_send_type_mapper.to_string(ftype, "ChipSendType");
    }

    static std::string to_string(NocSendType ntype) {
        return detail::noc_send_type_mapper.to_string(ntype, "NocSendType");
    }

    static std::string to_string(RoutingDirection dir) {
        return detail::routing_direction_mapper.to_string(dir, "RoutingDirection");
    }

    static std::string to_string(FabricTensixConfig ftype) {
        return detail::fabric_tensix_type_mapper.to_string(ftype, "FabricTensixConfig");
    }
    static std::string to_string(FabricReliabilityMode mode) {
        return detail::fabric_reliability_mode_mapper.to_string(mode, "FabricReliabilityMode");
    }

    static std::string to_string(tt::tt_fabric::Topology topology) {
        return detail::topology_mapper.to_string(topology, "Topology");
    }

    static void to_yaml(YAML::Emitter& out, const TrafficPatternConfig& config) {
        out << YAML::BeginMap;

        if (config.ftype) {
            out << YAML::Key << "ftype";
            out << YAML::Value << to_string(config.ftype.value());
        }
        if (config.ntype) {
            out << YAML::Key << "ntype";
            out << YAML::Value << to_string(config.ntype.value());
        }
        if (config.size) {
            out << YAML::Key << "size";
            out << YAML::Value << config.size.value();
        }
        if (config.num_packets) {
            out << YAML::Key << "num_packets";
            out << YAML::Value << config.num_packets.value();
        }
        if (config.destination) {
            out << YAML::Key << "destination";
            out << YAML::Value;
            to_yaml(out, config.destination.value());
        }
        if (config.atomic_inc_val) {
            out << YAML::Key << "atomic_inc_val";
            out << YAML::Value << config.atomic_inc_val.value();
        }
        if (config.mcast_start_hops) {
            out << YAML::Key << "mcast_start_hops";
            out << YAML::Value << config.mcast_start_hops.value();
        }

        out << YAML::EndMap;
    }

    static void to_yaml(YAML::Emitter& out, const SenderConfig& config) {
        out << YAML::BeginMap;
        out << YAML::Key << "device";
        out << YAML::Value;
        to_yaml(out, config.device);

        if (config.core) {
            out << YAML::Key << "core";
            out << YAML::Value;
            to_yaml(out, config.core.value());
        }

        out << YAML::Key << "link_id";
        out << YAML::Value << config.link_id;

        out << YAML::Key << "patterns";
        out << YAML::Value;
        out << YAML::BeginSeq;
        for (const auto& pattern : config.patterns) {
            to_yaml(out, pattern);
        }
        out << YAML::EndSeq;

        out << YAML::EndMap;
    }

    static void to_yaml(YAML::Emitter& out, const TestConfig& config) {
        out << YAML::BeginMap;
        out << YAML::Key << "name";
        out << YAML::Value << config.parametrized_name;  // Use parametrized name for readability

        // Optionally include original base name as metadata if different
        if (!config.name.empty() && config.name != config.parametrized_name) {
            out << YAML::Key << "base_name";
            out << YAML::Value << config.name;  // Original name for reference
        }

        if (config.seed != 0) {
            out << YAML::Key << "seed";
            out << YAML::Value << config.seed;
        }

        if (config.performance_test_mode == PerformanceTestMode::BANDWIDTH) {
            out << YAML::Key << "benchmark_mode";
            out << YAML::Value << true;
        } else if (config.performance_test_mode == PerformanceTestMode::LATENCY) {
            out << YAML::Key << "latency_test_mode";
            out << YAML::Value << true;
        }

        if (config.global_sync) {
            out << YAML::Key << "sync";
            out << YAML::Value << config.global_sync;
        }

        out << YAML::Key << "fabric_setup";
        out << YAML::Value;
        to_yaml(out, config.fabric_setup);

        // We only dump concrete senders, not the high-level patterns or randomization policies
        // as they have already been resolved into the sender list.
        out << YAML::Key << "senders";
        out << YAML::Value;
        out << YAML::BeginSeq;
        for (const auto& sender : config.senders) {
            to_yaml(out, sender);
        }
        out << YAML::EndSeq;
        out << YAML::EndMap;
    }

    static std::string to_string(CoreAllocationPolicy policy) {
        return detail::core_allocation_policy_mapper.to_string(policy, "CoreAllocationPolicy");
    }

    static void to_yaml(YAML::Emitter& out, const CoreAllocationConfig& config) {
        out << YAML::BeginMap;
        out << YAML::Key << "policy";
        out << YAML::Value << to_string(config.policy);
        out << YAML::Key << "max_configs_per_core";
        out << YAML::Value << config.max_configs_per_core;
        out << YAML::Key << "initial_pool_size";
        out << YAML::Value << config.initial_pool_size;
        out << YAML::Key << "pool_refill_size";
        out << YAML::Value << config.pool_refill_size;
        out << YAML::EndMap;
    }

    static void to_yaml(YAML::Emitter& out, const PhysicalMeshConfig& config) {
        out << YAML::BeginMap;
        out << YAML::Key << "mesh_descriptor_path";
        out << YAML::Value << config.mesh_descriptor_path;
        out << YAML::Key << "eth_coord_mapping";
        out << YAML::Value;
        to_yaml(out, config.eth_coord_mapping);
        out << YAML::EndMap;
    }

    static void to_yaml(YAML::Emitter& out, const std::vector<std::vector<EthCoord>>& mapping) {
        out << YAML::BeginSeq;
        for (const auto& row : mapping) {
            out << YAML::BeginSeq;
            for (const auto& coord : row) {
                out << YAML::Flow << YAML::BeginSeq << coord.cluster_id << coord.x << coord.y << coord.rack
                    << coord.shelf << YAML::EndSeq;
            }
            out << YAML::EndSeq;
        }
        out << YAML::EndSeq;
    }

    static void to_yaml(YAML::Emitter& out, const AllocatorPolicies& policies) {
        out << YAML::BeginMap;
        out << YAML::Key << "sender";
        out << YAML::Value;
        to_yaml(out, policies.sender_config);
        out << YAML::Key << "receiver";
        out << YAML::Value;
        to_yaml(out, policies.receiver_config);
        out << YAML::Key << "default_payload_chunk_size";
        out << YAML::Value << policies.default_payload_chunk_size;
        out << YAML::EndMap;
    }

    static void to_yaml(YAML::Emitter& out, const TestFabricSetup& config) {
        out << YAML::BeginMap;
        out << YAML::Key << "topology";
        out << YAML::Value << to_string(config.topology);
        if (config.fabric_tensix_config.has_value()) {
            out << YAML::Key << "fabric_tensix_config";
            out << YAML::Value << to_string(config.fabric_tensix_config.value());
        }
        if (config.fabric_reliability_mode.has_value()) {
            out << YAML::Key << "fabric_reliability_mode";
            out << YAML::Value << to_string(config.fabric_reliability_mode.value());
        }
        if (config.topology == Topology::Torus && config.torus_config.has_value()) {
            out << YAML::Key << "torus_config";
            out << YAML::Value << config.torus_config.value();
        }
        out << YAML::Key << "num_links";
        out << YAML::Value << config.num_links;
        out << YAML::EndMap;
    }
};

}  // namespace fabric_tests
}  // namespace tt::tt_fabric
