# This script was created by ChatGPT 5.0.

import onnx
from onnx import helper, TensorProto

def save_model(graph, name, opset=13):
    """Build, validate, and save an ONNX model."""
    model = helper.make_model(
        graph,
        opset_imports=[helper.make_operatorsetid("", opset)]
    )
    onnx.checker.check_model(model)
    onnx.save(model, name)
    print(f"=> wrote {name}")

# F32: Arithmetic chain Add → Sub → Mul → Div
def make_arith_chain_f32():
    X = helper.make_tensor_value_info("x", TensorProto.FLOAT, [2, 2])
    Y = helper.make_tensor_value_info("y", TensorProto.FLOAT, [2, 2])
    Z = helper.make_tensor_value_info("z", TensorProto.FLOAT, [2, 2])

    # constant scalar 2.5 with shape [1] to trigger broadcasting
    C = helper.make_tensor("c", TensorProto.FLOAT, [1], [2.5])

    add0 = helper.make_node("Add", ["x", "y"], ["t_add"], name="add0")
    sub0 = helper.make_node("Sub", ["y", "x"], ["t_sub"], name="sub0")
    mul0 = helper.make_node("Mul", ["t_add", "c"], ["t_mul"], name="mul0")
    div0 = helper.make_node("Div", ["t_mul", "t_sub"], ["z"], name="div0")

    graph = helper.make_graph(
        [add0, sub0, mul0, div0],
        "arith_chain_f32",
        [X, Y],
        [Z],
        initializer=[C],
    )
    return graph, "arith_chain_f32.onnx"

# F32: Diamond dependency Add ∥ Sub → Mul
def make_diamond_f32():
    X = helper.make_tensor_value_info("x", TensorProto.FLOAT, [2, 2])
    Y = helper.make_tensor_value_info("y", TensorProto.FLOAT, [2, 2])
    Z = helper.make_tensor_value_info("z", TensorProto.FLOAT, [2, 2])

    add0 = helper.make_node("Add", ["x", "y"], ["t_add"], name="add0")
    sub0 = helper.make_node("Sub", ["x", "y"], ["t_sub"], name="sub0")
    mul0 = helper.make_node("Mul", ["t_add", "t_sub"], ["z"], name="mul0")

    graph = helper.make_graph(
        [add0, sub0, mul0],
        "diamond_mix_f32",
        [X, Y],
        [Z],
    )
    return graph, "diamond_mix_f32.onnx"

# F32: Broadcasting test (scalar & vector)
# Z = (X + alpha + row_bias) * Y
def make_broadcast_f32():
    X = helper.make_tensor_value_info("x", TensorProto.FLOAT, [2, 2])
    Y = helper.make_tensor_value_info("y", TensorProto.FLOAT, [2, 2])
    Z = helper.make_tensor_value_info("z", TensorProto.FLOAT, [2, 2])

    # scalar
    alpha = helper.make_tensor("alpha", TensorProto.FLOAT, [1], [0.1])
    # [2], broadcast along last dim
    row_bias = helper.make_tensor("row_bias", TensorProto.FLOAT, [2], [1.0, -1.0])

    add_alpha = helper.make_node("Add", ["x", "alpha"], ["t1"], name="add_alpha")
    add_bias  = helper.make_node("Add", ["t1", "row_bias"], ["t2"], name="add_row_bias")
    mul_xy    = helper.make_node("Mul", ["t2", "y"], ["z"], name="mul_final")

    graph = helper.make_graph(
        [add_alpha, add_bias, mul_xy],
        "broadcast_cases_f32",
        [X, Y],
        [Z],
        initializer=[alpha, row_bias],
    )
    return graph, "broadcast_cases_f32.onnx"

# I32: Integer mix including integer Div
# z = ((a + b) * k) / (b - a)
def make_int32_mix():
    A = helper.make_tensor_value_info("a", TensorProto.INT32, [3])
    B = helper.make_tensor_value_info("b", TensorProto.INT32, [3])
    Z = helper.make_tensor_value_info("z", TensorProto.INT32, [3])

    k = helper.make_tensor("k", TensorProto.INT32, [1], [3])

    add0 = helper.make_node("Add", ["a", "b"], ["t_add"], name="add0_i32")
    mul0 = helper.make_node("Mul", ["t_add", "k"], ["t_mul"], name="mul0_i32")
    sub0 = helper.make_node("Sub", ["b", "a"], ["t_sub"], name="sub0_i32")
    div0 = helper.make_node("Div", ["t_mul", "t_sub"], ["z"], name="div0_i32")

    graph = helper.make_graph(
        [add0, mul0, sub0, div0],
        "int32_mix",
        [A, B],
        [Z],
        initializer=[k],
    )
    return graph, "int32_mix.onnx"

# U8: Unsigned integer with Min and Add
# z = min(u + bias, v)
def make_uint8_min_add():
    U = helper.make_tensor_value_info("u", TensorProto.UINT8, [2, 2])
    V = helper.make_tensor_value_info("v", TensorProto.UINT8, [2, 2])
    Z = helper.make_tensor_value_info("z", TensorProto.UINT8, [2, 2])

    bias = helper.make_tensor("bias", TensorProto.UINT8, [1], [10])

    add0 = helper.make_node("Add", ["u", "bias"], ["t_add"], name="add_bias_u8")
    min0 = helper.make_node("Min", ["t_add", "v"], ["z"], name="min_u8")

    graph = helper.make_graph(
        [add0, min0],
        "uint8_min_add",
        [U, V],
        [Z],
        initializer=[bias],
    )
    return graph, "uint8_min_add.onnx"

# F32: Max/Min branches merged via Add (single output)
# z = max(x, y) + min(x, y)
def make_maxmin_merge_f32():
    X = helper.make_tensor_value_info("x", TensorProto.FLOAT, [2, 2])
    Y = helper.make_tensor_value_info("y", TensorProto.FLOAT, [2, 2])
    Z = helper.make_tensor_value_info("z", TensorProto.FLOAT, [2, 2])

    max0 = helper.make_node("Max", ["x", "y"], ["t_max"], name="max0")
    min0 = helper.make_node("Min", ["x", "y"], ["t_min"], name="min0")
    add0 = helper.make_node("Add", ["t_max", "t_min"], ["z"], name="merge_add")

    graph = helper.make_graph(
        [max0, min0, add0],
        "maxmin_merge_f32",
        [X, Y],
        [Z],
    )
    return graph, "maxmin_merge_f32.onnx"

if __name__ == "__main__":
    makers = [
        make_arith_chain_f32,
        make_diamond_f32,
        make_broadcast_f32,
        make_int32_mix,
        make_uint8_min_add,
        make_maxmin_merge_f32,
    ]
    for mk in makers:
        g, fname = mk()
        save_model(g, fname, opset=13)
