/*
 *	Copyright 2023 Jan Pfeifer
 *
 *	Licensed under the Apache License, Version 2.0 (the "License");
 *	you may not use this file except in compliance with the License.
 *	You may obtain a copy of the License at
 *
 *	http://www.apache.org/licenses/LICENSE-2.0
 *
 *	Unless required by applicable law or agreed to in writing, software
 *	distributed under the License is distributed on an "AS IS" BASIS,
 *	WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 *	See the License for the specific language governing permissions and
 *	limitations under the License.
 */

package context

import (
	"fmt"
	"reflect"
	"runtime"
	"slices"
	"sync"

	"github.com/gomlx/gomlx/backends"
	. "github.com/gomlx/gomlx/internal/exceptions"
	"github.com/gomlx/gomlx/pkg/core/distributed"
	"github.com/gomlx/gomlx/pkg/core/graph"
	"github.com/gomlx/gomlx/pkg/core/tensors"
	"github.com/pkg/errors"
	"k8s.io/klog/v2"
)

// Generated by `cmd/constraints_generator`:

// ExecGraphFn is a type parameter for accepted function types for MustNewExec constructor.
type ExecGraphFn interface {
	func(*Context, *Graph) |
		func(*Context, []*Node) |
		func(*Context, *Node) |
		func(*Context, *Node, *Node) |
		func(*Context, *Node, *Node, *Node) |
		func(*Context, *Node, *Node, *Node, *Node) |
		func(*Context, *Node, *Node, *Node, *Node, *Node) |
		func(*Context, *Node, *Node, *Node, *Node, *Node, *Node) |

		func(*Context, *Graph) *Node |
		func(*Context, []*Node) *Node |
		func(*Context, *Node) *Node |
		func(*Context, *Node, *Node) *Node |
		func(*Context, *Node, *Node, *Node) *Node |
		func(*Context, *Node, *Node, *Node, *Node) *Node |
		func(*Context, *Node, *Node, *Node, *Node, *Node) *Node |
		func(*Context, *Node, *Node, *Node, *Node, *Node, *Node) *Node |

		func(*Context, *Graph) (*Node, *Node) |
		func(*Context, []*Node) (*Node, *Node) |
		func(*Context, *Node) (*Node, *Node) |
		func(*Context, *Node, *Node) (*Node, *Node) |
		func(*Context, *Node, *Node, *Node) (*Node, *Node) |
		func(*Context, *Node, *Node, *Node, *Node) (*Node, *Node) |
		func(*Context, *Node, *Node, *Node, *Node, *Node) (*Node, *Node) |
		func(*Context, *Node, *Node, *Node, *Node, *Node, *Node) (*Node, *Node) |

		func(*Context, *Graph) (*Node, *Node, *Node) |
		func(*Context, []*Node) (*Node, *Node, *Node) |
		func(*Context, *Node) (*Node, *Node, *Node) |
		func(*Context, *Node, *Node) (*Node, *Node, *Node) |
		func(*Context, *Node, *Node, *Node) (*Node, *Node, *Node) |
		func(*Context, *Node, *Node, *Node, *Node) (*Node, *Node, *Node) |
		func(*Context, *Node, *Node, *Node, *Node, *Node) (*Node, *Node, *Node) |
		func(*Context, *Node, *Node, *Node, *Node, *Node, *Node) (*Node, *Node, *Node) |

		func(*Context, *Graph) []*Node |
		func(*Context, []*Node) []*Node |
		func(*Context, *Node) []*Node |
		func(*Context, *Node, *Node) []*Node |
		func(*Context, *Node, *Node, *Node) []*Node |
		func(*Context, *Node, *Node, *Node, *Node) []*Node |
		func(*Context, *Node, *Node, *Node, *Node, *Node) []*Node |
		func(*Context, *Node, *Node, *Node, *Node, *Node, *Node) []*Node
}

// ExecGraphFnOneOutput is a type parameter for accepted function types for MustNewExec constructor.
type ExecGraphFnOneOutput interface {
	func(*Context, *Graph) *Node |
		func(*Context, []*Node) *Node |
		func(*Context, *Node) *Node |
		func(*Context, *Node, *Node) *Node |
		func(*Context, *Node, *Node, *Node) *Node |
		func(*Context, *Node, *Node, *Node, *Node) *Node |
		func(*Context, *Node, *Node, *Node, *Node, *Node) *Node |
		func(*Context, *Node, *Node, *Node, *Node, *Node, *Node) *Node
}

// Exec creates and executes computation graphs that take as input a
// Context as needed based on the inputs shapes, to allow the function
// to access (both read and set) variables and everything in the Context.
// Otherwise, very similar to graph.Exec.
//
// It simplifies the process of executing a graph building
// function with real values.
//
// For example, assume you wrote the "symbolic computation" (graph) function:
//
//	def LogitsGraph(ctx *context.Context, inputs *Node) *Node {
//	    logits := layers.Dense(ctx.In("dense0", inputs, 5))
//		logits = layers.Dense(ctx.In("dense1", logits, 5))
//		logits = Sigmoid(logits)
//		return logits
//	}
//
// And then with Exec one can do:
//
//	ctx := context.New(backend)  // New context holding the model variables.
//	var logitsFn = context.MustNewExec(backend, ctx, LogitsGraph)
//	batch := [][]float32{ {1, 2, 3}, {4, 5, 6} } // 2 examples with 3 features (shape=[2,3])
//	results, err := logitsFn.Exec(batch)
//	if err != nil {
//		panic(err)
//	}
//	fmt.Printf("Logits(%v) = %s\n", batch, results[0])
//
// A few things to note (valid to all family of Exec.Exec methods, Exec.Exec, Exec.Exec1, Exec.MustExec, etc.):
//
//   - Exec.Exec doesn't take as input the context again, it uses the value used to create the Exec object.
//   - The inputs are materialized as tensors, if they are not yet. The conversion is automatic (but slower if it
//     needs to convert before executing).
//   - The ctxGraphFn represents the "symbolic computation" (graph) function. Exec.Exec calls it the first time
//     (or whenever the input shapes change) to build the computation graph, and then JIT-compile it.
//     That means the first call (for each new input shape) is slow, but further calls are pre-compiled and executed
//     fast.
//   - The hyperparameters in the Context (Context.SetParams) are only used during the graph building and not after that.
//     We recommend freezing them before creating the Exec object.
//   - Only the variables in the Context can be updated -- either from outside a graph execution or within
//     the graph execution itself -- for instance, when training a model. Exec.Exec manages the updates automatically.
//   - While ctxGraphFn is executed in a single goroutine to build the computation graph, the actual execution of
//     for the JIT-compiled program can be done concurrently -- meaning you can call Exec.Exec concurrently from
//     multiple goroutines. It's always safe, but different backends will have different optimal parallelization levels
//     if you are trying to optimize for throughput.
//
// Example: Implementing a counter in a variable:
//
//	ctx := context.New()
//	counterExec := context.MustNewExec(backend, ctx, func(ctx *context.Context, g *Graph) *Node {
//		counterVar := ctx.VariableWithValue("count", int32(10))
//		count := counterVar.ValueGraph(g)
//		count = AddScalar(count, 1)
//		counterVar.SetValueGraph(count)
//		return count
//	})
//	fmt.Println("Counting:")
//	for range(3) {
//		fmt.Printf("\tcount=%s\n", exec.Exec1())  // -> 11, 12, 13
//	}
//	counterVar := ctx.InspectVariable(ctx.Scope(), "count")  // Inspecting the variable from outside the graph execution.
//	fmt.Printf("- State of counter=%s\n", counterVar.Value())  // -> int32(13)
//
// Like with graph.Exec, the need to build different graphs for different
// shapes can be expensive when sizes of the inputs vary a lot.
// The usual solution is to use shapes with size in a power scale (for instance, powers of 2) and padding
// the input tensors for unused slices (often using a mask).
//
// For safety concerns, the cache of JIT-compiled graphs for different input shapes is limited.
// It can be set or disabled with SetMaxCache.
type Exec struct {
	backend backends.Backend
	context *Context
	exec    *graph.Exec

	// Original function that takes ctx and the converted closure
	// that only takes *Node as input.
	ctxGraphFn, graphFn                       any
	inputIsGraph, inputAsSlice, outputAsSlice bool
	inputShardingSpecs                        []*distributed.ShardingSpec
	outputShardingSpecs                       []*distributed.ShardingSpec

	// changedVars maps each graph's GraphId to their list of modified variables.
	// It's used to update the variables in the Context after the graph execution -- these variables are added
	// as extra outputs.
	changedVars   map[graph.GraphId][]*Variable
	muChangedVars sync.Mutex

	// isInitializeVariablesExec indicates this executor is being used to initialize variables.
	// Initializing variables within the cxtGraphFn would lead to an infinite recursion.
	// This checks for that.
	isInitializeVariablesExec bool
}

// NewExecAny constructs an Exec object for the given context and symbolic computation function ctxGraphFn.
//
// The ctxGraphFn is called to build the computation graphs with a Context.
// It must take a *Context input parameter followed by one or more *Node parameters as input and return one or more *Node.
// Alternatively, it can, instead of *Node inputs, take a *Graph object, when there are no input tensors.
//
// The Context ctx passed in the construction is used in all calls to ctxGraphFn, as well as during the graph execution later.
// If set to nil, it automatically creates a new empty context.
//
// Before the execution of a graph, it initializes the variables as needed, using the configured initializer.
// And variables updated in the graph (using Variable.SetValueGraph) are updated also during execution.
// More details see Exec.
func NewExecAny(backend backends.Backend, ctx *Context, ctxGraphFn any) (*Exec, error) {
	if ctx == nil {
		ctx = New()
	}
	e := &Exec{
		backend:     backend,
		context:     ctx,
		ctxGraphFn:  ctxGraphFn,
		changedVars: make(map[graph.GraphId][]*Variable),
	}
	ctxGraphFnT := reflect.TypeOf(ctxGraphFn)
	if ctxGraphFnT.Kind() != reflect.Func {
		return nil, errors.Errorf("ctxGraphFn must be a function")
	}
	var node *Node
	nodeType := reflect.TypeOf(node)
	contextType := reflect.TypeOf(ctx)
	var tmpGraph *Graph
	graphType := reflect.TypeOf(tmpGraph)

	// Must have at least 2 arguments, and the first must be of type *Context.
	if ctxGraphFnT.NumIn() < 2 {
		return nil, errors.Errorf("at least *Context and one input argument required")
	}
	if ctxGraphFnT.In(0) != contextType {
		return nil, errors.Errorf(
			"the first argument for ctxGraphFn must be a *Context, got %s instead",
			ctxGraphFnT.In(0),
		)
	}

	// Check other arguments.
	for ii := 1; ii < ctxGraphFnT.NumIn(); ii++ {
		if ctxGraphFnT.In(ii).Kind() == reflect.Slice && ctxGraphFnT.In(ii).Elem() == nodeType {
			// Case 1: []*Node
			if ctxGraphFnT.NumIn() != 2 {
				return nil, errors.Errorf(
					"[]*Node parameters are only accepted if they are the only input besides the Context, got function type %s instead",
					ctxGraphFnT,
				)
			}
			e.inputAsSlice = true
			break
		}
		if ctxGraphFnT.In(ii) == graphType {
			// Case 2: *Graph
			if ctxGraphFnT.NumIn() != 2 {
				return nil, errors.Errorf(
					"*Graph argument is only accepted if it is the only input besides the Context, got function type %s instead",
					ctxGraphFnT,
				)
			}
			e.inputIsGraph = true
			break
		}
		if ctxGraphFnT.In(ii) != nodeType {
			return nil, errors.Errorf("input parameter %d is not of type *Node", ii)
		}
	}
	for ii := 0; ii < ctxGraphFnT.NumOut(); ii++ {
		if ctxGraphFnT.Out(ii).Kind() == reflect.Slice && ctxGraphFnT.Out(ii).Elem() == nodeType {
			if ctxGraphFnT.NumOut() != 1 {
				return nil, errors.Errorf(
					"[]*Node parameters are only accepted as output if they are the only output, got function type %s instead",
					ctxGraphFnT,
				)
			}
			e.outputAsSlice = true
			break
		}
		if ctxGraphFnT.Out(ii) != nodeType {
			return nil, errors.Errorf("output parameter %d is not of type *Node", ii)
		}
	}

	e.buildGraphFn()
	e.exec = graph.MustNewExecAny(backend, e.graphFn)
	funcName := runtime.FuncForPC(reflect.ValueOf(ctxGraphFn).Pointer()).Name()
	e.exec.WithName(fmt.Sprintf("Context.Exec:%s", funcName))
	e.exec.SetSideParamsHook(e.setSideParams)
	return e, nil
}

// buildGraphFn constructs a function graphFn that can be passed to the wrapped Exec.
// This function is a closure that will call the ctxGraphFn provided by the user with the
// extra *context.Context argument, plus it prepends the output with the updated values --
// so it can behind the scenes update the variables to the user.
func (e *Exec) buildGraphFn() {
	ctxGraphFnT := reflect.TypeOf(e.ctxGraphFn)
	numIn := ctxGraphFnT.NumIn() - 1
	var node *Node
	nodeT := reflect.TypeOf(node)
	var nodeSlice []*Node
	nodeSliceT := reflect.TypeOf(nodeSlice)
	var tmpGraph *Graph
	graphT := reflect.TypeOf(tmpGraph)

	// Build input types for new graphFn: same as ctxGraphFn, but without the Context.
	var inT []reflect.Type
	if e.inputIsGraph {
		// The only input is a graph.
		inT = []reflect.Type{graphT}
	} else if e.inputAsSlice {
		// The only input is a []*Node.
		inT = []reflect.Type{nodeSliceT}
	} else {
		inT = make([]reflect.Type, numIn)
		for ii := 0; ii < numIn; ii++ {
			inT[ii] = nodeT
		}
	}

	// Output types for a new graphFn: it is converted to a []*Node, because we will prepend
	// the changed variables as extra outputs.
	outT := []reflect.Type{nodeSliceT}

	// Builds the function that will be called without Context, by computation.Exec. It will take as
	// input a slice of *Node (or only a *computation.Graph), and as output also a slice of *Node.
	graphFnT := reflect.FuncOf(inT, outT, false)
	e.graphFn = reflect.MakeFunc(graphFnT, func(args []reflect.Value) (results []reflect.Value) {
		// Inputs for the original ctxGraphFn: we prepend the context to the arguments.
		argsWithContext := make([]reflect.Value, len(args)+1)
		argsWithContext[0] = reflect.ValueOf(e.context)
		copy(argsWithContext[1:], args)

		// Call ctxGraphFn, the results will be a slice of *Node.
		ctxGraphFnResults := reflect.ValueOf(e.ctxGraphFn).Call(argsWithContext)

		// Find the graph.
		var g *Graph
		if e.inputIsGraph {
			g = args[0].Interface().(*Graph)
		} else if e.inputAsSlice {
			nodes := args[0].Interface().([]*Node)
			g = nodes[0].Graph()
		} else {
			node := args[0].Interface().(*Node)
			g = node.Graph()
		}
		graphId := g.GraphId()

		// Find variables that were changed and their updated graph values (*Node).
		var changedVars []*Variable
		var allValues []*Node
		e.context.EnumerateVariables(func(v *Variable) {
			if v.ChangedInGraph(g) {
				changedVars = append(changedVars, v)
				allValues = append(allValues, v.ValueGraph(g))
			}
		})
		{
			// Save list of variables changed.
			e.muChangedVars.Lock()
			e.changedVars[graphId] = changedVars
			e.muChangedVars.Unlock()
		}
		// Append ctxGraphFnResults to allValues.
		if e.outputAsSlice {
			// cxtGraphResults returns one value, a []*Node, easy to append.
			allValues = append(allValues, ctxGraphFnResults[0].Interface().([]*Node)...)
		} else {
			// Append one result at a time (it's ok if there are no results).
			for _, r := range ctxGraphFnResults {
				allValues = append(allValues, r.Interface().(*Node))
			}
		}

		// the results will be a []*Node, which will hold all the values.
		results = []reflect.Value{reflect.ValueOf(allValues)}

		// Mark context for reuse after the first time it is used.
		if !e.context.reuse {
			e.context = e.context.Reuse()
		}
		return
	}).Interface()
}

// Finalize clears the cache, finalizing and releasing the memory for all compiled graphs.
// The Exec object shouldn't be used after that.
// Especially if you are compiling the graph to many different shapes, try to manually finalize
// and not wait for the GC -- particularly important if you are running this in benchmarks.
func (e *Exec) Finalize() {
	e.exec.Finalize()
}

// setSideParams is used by computation.Exec.SetSideParamsHook to set up
// the variable values as parameters just before graph execution.
//
// It fills the graph parameter values for every variable used in the given graph.
// It keeps a cache of the variables' mapping for faster access.
//
// It's assumed len(inputBuffers) = len(donate) = g.NumDevices() * g.NumParameters(), organized by device first.
// And this function needs to set the last parameters (used by the variables) for each device,
// in the order they were added to the backend.
//
// `Exec*` methods are used by those implementing an executor (context.Exec) or related tests, not normally
// needed by end users.
func (e *Exec) setSideParams(g *Graph, inputBuffers []backends.Buffer, donate []bool) error {
	// Initialize variables if needed.
	ctx := e.context
	if !e.isInitializeVariablesExec && ctx.NeedsInitialization() {
		err := ctx.InitializeVariables(e.backend, func(initExec *Exec) error {
			return initExec.ConfigureDistributionFrom(e)
		})
		if err != nil {
			return errors.WithMessagef(err, "failed to initialize variables")
		}
	}

	numDevices := e.exec.NumDevices()
	if numDevices > 1 {
		return e.setSideParamsDistributed(g, inputBuffers, donate)
	}
	return e.setSideParamsSingleDevice(g, inputBuffers, donate)
}

// setSideParamsSingleDevice sets the side parameters for single-device execution.
func (e *Exec) setSideParamsSingleDevice(g *Graph, inputBuffers []backends.Buffer, donate []bool) error {
	ctx := e.context
	graphId := g.GraphId()
	deviceAssignment := e.exec.DeviceAssignment()
	deviceNum := backends.DeviceNum(0)
	if len(deviceAssignment) > 0 {
		deviceNum = deviceAssignment[0]
	}

	for v := range ctx.IterVariables() {
		nodes, found := v.graphToNodes.Load(graphId)
		if !found {
			continue
		}
		if nodes == nil || nodes.paramNode == nil || nodes.paramNode.Type() != graph.NodeTypeParameter {
			return errors.Errorf("invalid paramNode for variable %q", v.ParameterName())
		}
		handle := nodes.paramNode.GetParameterHandle()

		if v.ChangedInGraph(g) {
			// We donate the buffer, since we are getting a new one on the output.
			value, err := v.Value()
			if err != nil {
				return err
			}
			inputBuffers[handle], err = value.DonateBuffer(e.backend, deviceNum)
			if err != nil {
				return err
			}
			err = v.Reset()
			if err != nil {
				return err
			}
			donate[handle] = true
		} else {
			if !v.HasValue() {
				if e.isInitializeVariablesExec {
					Panicf("variable %q used and not initialized during variable initialization, this would lead to "+
						"recursive initialization of variables, and is not supported", v.ScopeAndName())
				} else {
					Panicf("variable %q failed to initialize", v.ScopeAndName())
				}
			}
			value, err := v.Value()
			if err != nil {
				return err
			}
			inputBuffers[handle], err = value.Buffer(e.backend, deviceNum)
			if err != nil {
				return err
			}
			donate[handle] = false
		}
	}
	return nil
}

// setSideParamsDistributed sets the side parameters for distributed execution (AutoSharding or SPMD).
// In distributed mode, inputBuffers and donate are organized as:
// [device0_param0, device0_param1, ..., device1_param0, device1_param1, ...]
// For each variable parameter, we need to set numDevices buffers from its distributed shards.
func (e *Exec) setSideParamsDistributed(g *Graph, inputBuffers []backends.Buffer, donate []bool) error {
	ctx := e.context
	graphId := g.GraphId()
	numDevices := e.exec.NumDevices()
	numParams := g.NumParameters()
	deviceAssignment := e.exec.DeviceAssignment()

	for v := range ctx.IterVariables() {
		nodes, found := v.graphToNodes.Load(graphId)
		if !found {
			continue
		}
		if nodes == nil || nodes.paramNode == nil || nodes.paramNode.Type() != graph.NodeTypeParameter {
			return errors.Errorf("invalid paramNode for variable %q", v.ParameterName())
		}
		handle := int(nodes.paramNode.GetParameterHandle())

		if !v.HasValue() {
			if e.isInitializeVariablesExec {
				return errors.Errorf(
					"variable %q used and not initialized during variable initialization, this would lead to "+
						"recursive initialization of variables, and is not supported", v.ScopeAndName())
			} else {
				return errors.Errorf("variable %q failed to initialize", v.ScopeAndName())
			}
		}

		// Get shards for this variable: either from distValue or by replicating value.
		dTensor, err := v.DistributedValue()
		if err != nil {
			return errors.WithMessagef(err, "failed to get distributed value for variable %q", v.ScopeAndName())
		}
		shards := dTensor.Shards()
		changedInGraph := v.ChangedInGraph(g)
		if changedInGraph {
			// Donate buffers since we'll get new ones on output.
			for deviceIdx := range numDevices {
				bufIdx := deviceIdx*numParams + handle
				deviceNum := backends.DeviceNum(deviceIdx)
				if len(deviceAssignment) > deviceIdx {
					deviceNum = deviceAssignment[deviceIdx]
				}
				inputBuffers[bufIdx], err = shards[deviceIdx].DonateBuffer(e.backend, deviceNum)
				if err != nil {
					return errors.WithMessagef(err, "failed to donate buffer for variable %q on device %d",
						v.ScopeAndName(), deviceIdx)
				}
				donate[bufIdx] = true
			}
			// Reset the variable since we donated all shards.
			err = v.Reset()
			if err != nil {
				return err
			}
		} else {
			for deviceIdx := range numDevices {
				bufIdx := deviceIdx*numParams + handle
				deviceNum := backends.DeviceNum(deviceIdx)
				if len(deviceAssignment) > deviceIdx {
					deviceNum = deviceAssignment[deviceIdx]
				}
				inputBuffers[bufIdx], err = shards[deviceIdx].Buffer(e.backend, deviceNum)
				if err != nil {
					return errors.WithMessagef(err, "failed to get buffer for variable %q on device %d",
						v.ScopeAndName(), deviceIdx)
				}
				donate[bufIdx] = false
			}
		}
	}
	return nil
}

// ConfigureDistributionFrom configures the distribution of the executor from another executor.
// At the end e will have been configure exactly like e2.
func (e *Exec) ConfigureDistributionFrom(e2 *Exec) error {
	switch e2.exec.DistributionStrategy() {
	case distributed.None:
		return nil
	case distributed.AutoSharding:
		e.AutoSharding(e2.Meshes()...)
	case distributed.SPMD:
		e.SPMD(e2.Meshes()[0])
	}
	deviceAssignment := e2.DeviceAssignment()
	e.WithDeviceAssignment(deviceAssignment)
	return nil
}

// SetNodeLogger with the function to be called for the nodes
// marked for logging during execution. If set to nil,
// nothing will be logged.
func (e *Exec) SetNodeLogger(loggerFn graph.LoggerFn) {
	e.exec.SetNodeLogger(loggerFn)
}

// GetNodeLogger returns the currently registered LoggerFn.
func (e *Exec) GetNodeLogger() graph.LoggerFn {
	return e.exec.GetNodeLogger()
}

// WithDeviceAssignment specifies which concrete devices to use when compiling computation graphs.
//
// These must be valid numbers for the backend and must match the number of devices of the
// largest mesh given to WithAutoSharding or WithSPMD, or one fixed device for non-portable single-device
// execution.
//
// The default assignment is simply using the devices in the order they were added to the backend
// (sequential DeviceNum values, starting from 0).
//
// For single-device execution (distributed strategy "None"), the default is to make it portable.
// If the backend supports that, it can be executed in any device with ExecOnDevice().
func (e *Exec) WithDeviceAssignment(devices []backends.DeviceNum) *Exec {
	e.exec.WithDeviceAssignment(devices)
	return e
}

// DeviceAssignment returns the current device assignment used by this Exec.
// It returns nil if none was provided.
func (e *Exec) DeviceAssignment() []backends.DeviceNum {
	return e.exec.DeviceAssignment()
}

// DistributionStrategy returns the distribution strategy used by this Exec.
//
// The default is distributed.None, which means that graphs constructed by this Exec will not be distributed,
// and it will be executed on the device specified by Exec.WithDevice (defaults to 0).
func (e *Exec) DistributionStrategy() distributed.Strategy {
	return e.exec.DistributionStrategy()
}

// NumDevices returns the number of devices used by this Exec.
// It depends on the strategy and meshes used.
// The default is 1 (no distribution strategy)
func (e *Exec) NumDevices() int {
	return e.exec.NumDevices()
}

// SPMD sets the distribution strategy to SPMD, which means that graphs constructed by this Exec will be replicated
// across the devices specified in the mesh.
//
// A nil mesh will cause a panic.
//
// It returns a reference to itself, so configuration calls can be cascaded.
func (e *Exec) SPMD(mesh *distributed.DeviceMesh) *Exec {
	e.exec.SPMD(mesh)
	e.context.data.defaultShardingSpec = distributed.NewReplicatedShardingSpec(mesh)
	return e
}

// AutoSharding sets the distribution strategy to AutoSharding and records the meshes that will be used in the
// computation graph(s).
//
// A nil mesh will cause a panic.
//
// It returns a reference to itself, so configuration calls can be cascaded.
func (e *Exec) AutoSharding(meshes ...*distributed.DeviceMesh) *Exec {
	e.exec.AutoSharding(meshes...)
	e.context.data.defaultShardingSpec = distributed.NewReplicatedShardingSpec(meshes[0])
	return e
}

// Meshes returns the slice of currently configured meshes.
// It returns nil if no meshes were provided (e.g., for non-distributed execution).
func (e *Exec) Meshes() []*distributed.DeviceMesh {
	return e.exec.Meshes()
}

// WithInputShardingSpecs sets the sharding specs for the inputs.
//
// This is used for distributed computations with AutoSharding.
// If the function takes variable inputs (`[]*Node`), then the last spec provided is used for all remaining inputs.
//
// The specs are not validated here, any errors will only be surfaced during the execution.
//
// It returns a reference to itself so calls can be cascaded.
func (e *Exec) WithInputShardingSpecs(specs ...*distributed.ShardingSpec) *Exec {
	e.inputShardingSpecs = specs
	e.exec.WithInputShardingSpecs(specs...)
	return e
}

// WithOutputShardingSpecs sets the sharding specs for the outputs.
//
// If the function takes variable inputs (`[]*Node`), then the last spec provided is used for all remaining inputs.
// This is used for distributed computations with AutoSharding.
//
// The specs are not validated here, any errors will only be surfaced during the execution.
//
// It returns a reference to itself so calls can be cascaded.
func (e *Exec) WithOutputShardingSpecs(specs ...*distributed.ShardingSpec) *Exec {
	e.outputShardingSpecs = specs
	e.exec.WithOutputShardingSpecs(specs...)
	return e
}

// DefaultShardingSpec returns the default sharding spec configured for the associated Context object.
// It returns nil if no default sharding spec was provided (e.g., for non-distributed execution).
func (e *Exec) DefaultShardingSpec() *distributed.ShardingSpec {
	return e.context.data.defaultShardingSpec
}

// SetDefaultShardingSpec sets the default sharding spec for the associated Context object.
// It returns an error if the mesh of the sharding spec given was not configured with Exec.SPMD or Exec.AutoSharding.
//
// If you pass a nil spec and distributed execution is configured, it sets the default sharding spec to replicate all axes,
// the predefined default.
//
// If not using distributed execution you don't need to set this.
func (e *Exec) SetDefaultShardingSpec(spec *distributed.ShardingSpec) error {
	if spec != nil {
		if e.exec.DistributionStrategy() == distributed.None {
			return errors.Errorf("cannot set non-nil default sharding spec for non-distributed execution")
		}
		mesh := spec.Mesh
		execMesshes := e.exec.Meshes()
		if slices.Index(execMesshes, mesh) == -1 {
			return errors.Errorf("spec given uses a mesh %q that was not configured with Exec.SPMD or Exec.AutoSharding", mesh.Name())
		}
		if err := spec.Validate(); err != nil {
			return err
		}
	} else {
		if e.exec.DistributionStrategy() != distributed.None {
			spec = distributed.NewReplicatedShardingSpec(e.exec.Meshes()[0])
		}
	}
	e.context.data.defaultShardingSpec = spec
	return nil
}

// WithName sets the name of Exec, used to provide the name to graphs created.
// This should be called before any invocations of MustExec().
// It returns a reference to itself so calls can be cascaded.
func (e *Exec) WithName(name string) *Exec {
	e.exec.WithName(name)
	return e
}

// Name returns the Exec name, a string used as a prefix for Graph construction.
func (e *Exec) Name() string {
	return e.exec.Name()
}

// SetMaxCache sets the maximum size of the cache.
// Set it to -1 to have unlimited cache size.
// It returns a reference to itself so calls can be cascaded.
func (e *Exec) SetMaxCache(maxCacheSize int) *Exec {
	e.exec.SetMaxCache(maxCacheSize)
	return e
}

// Context returns the associated Context object, usually created
// during the creation of the Exec object. It can be set to something
// different with SetContext().
func (e *Exec) Context() *Context {
	return e.context
}

// SetContext associates the given Context with the Exec object.
// It should be called before the first Exec is made.
// Notice that only after the first time context is used to build a graph,
// it is set to Reuse. If the Context variables were already created,
// it should be marked with Context.Reuse.
// It returns a reference to itself so calls can be cascaded.
func (e *Exec) SetContext(context *Context) *Exec {
	e.context = context
	return e
}

// Exec parses the arguments into tensors (if they are not yet) and executes
// the graph corresponding to the shapes of the arguments.
//
// Notice it uses the Context object used during creation -- if needed, you can change it with SetContext.
//
// If a graph does not yet exist, one is created (using ctxGraphFn provided during creation), compiled, and cached
// for these shapes of the inputs.
// After the very first invocation of Exec, the context is marked as Context.Reuse().
//
// It returns the outputs in a slice. See Exec1, Exec2, ..., Exec4 as aliases when you expect a fixed number of outputs.
func (e *Exec) Exec(args ...any) ([]*tensors.Tensor, error) {
	outputs, _, err := e.ExecWithGraph(args...)
	return outputs, err
}

// ExecWithGraph is similar to Exec, but it also returns the computation graph used in the call.
//
// Since Exec creates different computation graphs when the inputs shapes change,
// this can help disambiguate in case the user needs to use the Graph for something else.
//
// It returns an error if something goes wrong.
func (e *Exec) ExecWithGraph(args ...any) (outputs []*tensors.Tensor, g *Graph, err error) {
	outputs, g, err = e.exec.ExecWithGraph(args...)
	if err != nil {
		return nil, nil, err
	}

	// Separate the changed variables new values and set the variables accordingly:
	// the changed variables outputs come first.
	e.muChangedVars.Lock()
	changedVars := e.changedVars[g.GraphId()]
	e.muChangedVars.Unlock()
	numDevices := e.exec.NumDevices()

	// For distributed execution, outputs are organized as:
	// [device0_output0, device0_output1, ..., device1_output0, device1_output1, ...]
	// where the first len(changedVars) outputs per device are the changed variables.
	numOutputsPerDevice := len(outputs) / numDevices

	if len(changedVars) > numOutputsPerDevice {
		return nil, nil, errors.Errorf(
			"not enough outputs of the graph for updated variables: expected at least %d per device, got %d",
			len(changedVars),
			numOutputsPerDevice,
		)
	}

	if numDevices > 1 {
		outputs, err = e.collectOutputsForDistributed(outputs, changedVars, numDevices, numOutputsPerDevice)
	} else {
		outputs, err = e.collectOutputs(outputs, changedVars)
	}
	if err != nil {
		return nil, nil, err
	}
	return
}

// collectOutputs processes outputs for single-device execution.
// It updates changed variables with their new values and returns the remaining outputs.
func (e *Exec) collectOutputs(outputs []*tensors.Tensor, changedVars []*Variable) ([]*tensors.Tensor, error) {
	var firstErr error
	for ii, v := range changedVars {
		if !v.shape.Equal(outputs[ii].Shape()) {
			return nil, errors.Errorf(
				"variable %q changed shape in graph execution: expected %v, got %v",
				v.ScopeAndName(),
				v.shape,
				outputs[ii].Shape(),
			)
		}
		err := v.SetValue(outputs[ii])
		if err != nil {
			err = errors.WithMessagef(err, "failed updating value for %q", v.ScopeAndName())
			if firstErr == nil {
				firstErr = err
			} else {
				klog.Errorf("Exec error: %v", err)
			}
		}
	}
	if firstErr != nil {
		return nil, firstErr
	}
	return outputs[len(changedVars):], nil
}

// collectOutputsForDistributed processes outputs for distributed execution.
// It collects shards for each changed variable from all devices, creates distributed.Tensor objects,
// and returns the rearranged outputs (excluding variables).
func (e *Exec) collectOutputsForDistributed(
	outputs []*tensors.Tensor, changedVars []*Variable, numDevices, numOutputsPerDevice int) (
	[]*tensors.Tensor, error) {
	var firstErr error

	// Collect shards for each changed variable and create distributed.Tensor.
	for varIdx, v := range changedVars {
		// Collect shards for this variable from all devices.
		shards := make([]*tensors.Tensor, numDevices)
		for deviceIdx := range numDevices {
			shards[deviceIdx] = outputs[deviceIdx*numOutputsPerDevice+varIdx]
		}

		// Get the sharding spec for this variable.
		shardingSpec := v.shardingSpec
		if shardingSpec == nil {
			shardingSpec = e.context.data.defaultShardingSpec
		}
		if shardingSpec == nil {
			err := errors.Errorf("variable %q has no sharding spec for distributed execution", v.ScopeAndName())
			if firstErr == nil {
				firstErr = err
			} else {
				klog.Errorf("Exec error: %v", err)
			}
			continue
		}

		// Create distributed.Tensor from the shards.
		distValue, err := distributed.NewTensor(shardingSpec, shards)
		if err != nil {
			err = errors.WithMessagef(err, "failed to create distributed tensor for variable %q", v.ScopeAndName())
			if firstErr == nil {
				firstErr = err
			} else {
				klog.Errorf("Exec error: %v", err)
			}
			continue
		}

		// Set the variable's distributed value.
		err = v.SetDistributedValue(distValue)
		if err != nil {
			err = errors.WithMessagef(err, "failed updating distributed value for %q", v.ScopeAndName())
			if firstErr == nil {
				firstErr = err
			} else {
				klog.Errorf("Exec error: %v", err)
			}
		}
	}

	if firstErr != nil {
		return nil, firstErr
	}

	// Rearrange outputs to exclude changed variables:
	// Result should be [device0_param0, device0_param1, ..., device1_param0, device1_param1, ...]
	// where params start after the changed variables.
	numParamsPerDevice := numOutputsPerDevice - len(changedVars)
	newOutputs := make([]*tensors.Tensor, numDevices*numParamsPerDevice)
	for deviceIdx := range numDevices {
		for paramIdx := range numParamsPerDevice {
			srcIdx := deviceIdx*numOutputsPerDevice + len(changedVars) + paramIdx
			dstIdx := deviceIdx*numParamsPerDevice + paramIdx
			newOutputs[dstIdx] = outputs[srcIdx]
		}
	}
	return newOutputs, nil
}

// PreCompile will build the computation graph, JIT-compile and cache it, but not yet execute.
//
// Useful when one wants to measure the time separately, from graph compilation and its execution.
func (e *Exec) PreCompile(args ...any) error {
	return e.exec.PreCompile(args...)
}
