package model

import (
	"context"
	"fmt"
	"os"
	"path/filepath"
	"shyxy-model-agent/core/config"
	"shyxy-model-agent/core/model"
	"shyxy-model-agent/core/services"
	"shyxy-model-agent/core/types"
	"shyxy-model-agent/pkg/grpc"
	"shyxy-model-agent/pkg/utils"
	"strings"
	"sync"
	"time"

	"github.com/phayes/freeport"
	"github.com/rs/zerolog/log"
)

type ModelLoader struct {
	mu            sync.Mutex
	singletonLock sync.Mutex
	singletonMode bool
	models        map[string]*Model
	wd            *WatchDog
}

func NewModelLoader(singleActiveBackend bool) *ModelLoader {
	nml := &ModelLoader{
		models:        make(map[string]*Model),
		singletonMode: singleActiveBackend,
	}

	return nml
}

func (ml *ModelLoader) ListModels() ([]*Model, error) {
	ml.mu.Lock()
	defer ml.mu.Unlock()
	var models []*Model
	if ml.models == nil {
		return nil, fmt.Errorf("no models loaded")
	}
	for _, model := range ml.models {
		models = append(models, model)
	}
	return models, nil
}

func (ml *ModelLoader) ListModelByID(modelID string) (*Model, error) {
	ml.mu.Lock()
	defer ml.mu.Unlock()
	if ml.models == nil {
		return nil, fmt.Errorf("no models loaded")
	}
	model, ok := ml.models[modelID]
	if !ok {
		return nil, fmt.Errorf("model not found")
	}
	return model, nil
}

func (ml *ModelLoader) SetWatchDog(wd *WatchDog) {
	ml.wd = wd
}

func backendPath(assetDir, backend string) string {
	return filepath.Join(assetDir, "backend-assets", "grpc", backend)
}

const retryTimeout = 2 * time.Minute

// starts the grpcModelProcess for the backend, and returns a grpc client
// It also loads the model
func (ml *ModelLoader) grpcModel(dbLoader *config.DBLoader, backend string, o *Options) func(string, string) (*Model, error) {

	generateInstanceID := func(modelName, machineTag string, index int) string {
		return fmt.Sprintf("%s-%s-%d", modelName, machineTag, index)
	}

	return func(modelID, modelName string) (*Model, error) {

		log.Debug().Msgf("Loading Model %s with gRPC  : %+v", modelID, *o)

		var client *Model

		getFreeAddress := func() (string, error) {
			port, err := freeport.GetFreePort()
			if err != nil {
				return "", fmt.Errorf("failed allocating free ports: %s", err.Error())
			}
			return fmt.Sprintf("127.0.0.1:%d", port), nil
		}

		grpcProcess := backendPath(o.assetDir, backend)
		if err := utils.VerifyPath(grpcProcess, o.assetDir); err != nil {
			return nil, fmt.Errorf("referring to a backend not in asset dir: %s", err.Error())
		}

		// Check if the file exists
		if _, err := os.Stat(grpcProcess); os.IsNotExist(err) {
			return nil, fmt.Errorf("backend not found: %s", grpcProcess)
		}

		serverAddress, err := getFreeAddress()
		if err != nil {
			return nil, fmt.Errorf("failed allocating free ports: %s", err.Error())
		}

		var args []string
		if o.args != nil {
			args = o.args
		}
		args = append(args, "--modelName", modelName)
		if strings.EqualFold(o.modelInfo.Type, types.ModelTypeOnline) {
			baseUrl, apiKey, err := services.GetOnlineModelStartInfo(o.modelInfo.ID, dbLoader.OnlineModelRepo())
			log.Info().Msgf("Starting online model with baseUrl: %s,apiKey: %s ", baseUrl, apiKey)
			if err != nil {
				return nil, fmt.Errorf("failed to get online model start info: %s", err.Error())
			}
			args = append(args, "--baseUrl", baseUrl)
			if apiKey != "" {
				args = append(args, "--apiKey", apiKey)
			}
		}
		// Make sure the process is executable in any circumstance
		process, err := ml.startProcess(grpcProcess, modelID, serverAddress, args...)
		if err != nil {
			return nil, err
		}

		log.Debug().Msgf("GRPC Service Started")

		client = NewModel(modelID, serverAddress, process)
		//}

		log.Debug().Msgf("Wait for the service to start up")
		log.Debug().Msgf("Options: %+v", o.gRPCOptions)

		// Wait for the service to start up
		ready := false
		for i := 0; i < o.grpcAttempts; i++ {
			alive, err := client.GRPC(o.parallelRequests, ml.wd).HealthCheck(context.Background())
			if alive {
				log.Debug().Msgf("GRPC Service Ready")
				ready = true
				break
			}
			if err != nil && i == o.grpcAttempts-1 {
				log.Error().Err(err).Msg("failed starting/connecting to the gRPC service")
			}
			time.Sleep(time.Duration(o.grpcAttemptsDelay) * time.Second)
		}

		if !ready {
			log.Debug().Msgf("GRPC Service NOT ready")
			if proc := client.Process(); proc != nil {
				proc.Stop()
			}
			return nil, fmt.Errorf("grpc service not ready")
		}

		options := o.gRPCOptions
		options.Model = modelName

		log.Debug().Msgf("GRPC: Loading model with options: %+v", options)

		res, err := client.GRPC(o.parallelRequests, ml.wd).LoadModel(o.context, options)
		if err != nil {
			if proc := client.Process(); proc != nil {
				proc.Stop()
			}
			return nil, fmt.Errorf("could not load model: %w", err)
		}
		if !res.Success {
			if proc := client.Process(); proc != nil {
				proc.Stop()
			}
			return nil, fmt.Errorf("could not load model (no success): %s", res.Message)
		}

		instance := &model.GrpcInstance{
			InstanceID:  generateInstanceID(o.modelInfo.Name, "local", int(time.Now().Unix())),
			ModelInfoID: o.modelInfo.ID,
			ModelName:   o.modelInfo.Name,
			Status:      "running",
		}

		dbLoader.GrpcInstanceRepo().Create(instance)

		if strings.EqualFold(o.modelInfo.Type, types.ModelTypeOnline) {
			dbLoader.ModelInfoRepo().UpdateStatusByID(o.modelInfo.ID, types.RunningStatus)
		}

		return client, nil
	}
}

func (ml *ModelLoader) LoadModel(modelID, modelName string, loader func(string, string) (*Model, error)) (*Model, error) {
	// modelID = qwen-max_1 modelName = qwen-max
	// Check if we already have a loaded model
	if model := ml.CheckIsLoaded(modelID); model != nil {
		return model, nil
	}

	ml.mu.Lock()
	defer ml.mu.Unlock()
	model, err := loader(modelID, modelName)
	if err != nil {
		return nil, fmt.Errorf("failed to load model with internal loader: %s", err)
	}

	if model == nil {
		return nil, fmt.Errorf("loader didn't return a model")
	}

	ml.models[modelID] = model

	return model, nil
}

func (ml *ModelLoader) ShutdownModel(modelName string) error {
	ml.mu.Lock()
	defer ml.mu.Unlock()

	return ml.deleteProcess(modelName)
}

func (ml *ModelLoader) CheckIsLoaded(s string) *Model {
	ml.mu.Lock()
	defer ml.mu.Unlock()
	m, ok := ml.models[s]
	if !ok {
		return nil
	}

	log.Debug().Msgf("Model already loaded in memory: %s", s)
	client := m.GRPC(false, ml.wd)

	log.Debug().Msgf("Checking model availability (%s)", s)
	cTimeout, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
	defer cancel()

	alive, err := client.HealthCheck(cTimeout)
	if !alive {
		log.Warn().Msgf("GRPC Model not responding: %s", err.Error())
		log.Warn().Msgf("Deleting the process in order to recreate it")
		process := m.Process()
		if process == nil {
			log.Error().Msgf("Process not found for '%s' and the model is not responding anymore !", s)
			return m
		}
		if !process.IsAlive() {
			log.Debug().Msgf("GRPC Process is not responding: %s", s)
			// stop and delete the process, this forces to re-load the model and re-create again the service
			err := ml.deleteProcess(s)
			if err != nil {
				log.Error().Err(err).Str("process", s).Msg("error stopping process")
			}
			return nil
		}
	}

	return m
}

func (ml *ModelLoader) stopActiveBackends(modelID string, singleActiveBackend bool) {
	if !singleActiveBackend {
		return
	}

	// If we can have only one backend active, kill all the others (except external backends)

	// Stop all backends except the one we are going to load
	log.Debug().Msgf("Stopping all backends except '%s'", modelID)
	err := ml.StopGRPC(allExcept(modelID))
	if err != nil {
		log.Error().Err(err).Str("keptModel", modelID).Msg("error while shutting down all backends except for the keptModel - greedyloader continuing")
	}
}

func (ml *ModelLoader) Close() {
	if !ml.singletonMode {
		return
	}
	ml.singletonLock.Unlock()
}

func (ml *ModelLoader) lockBackend() {
	if !ml.singletonMode {
		return
	}
	ml.singletonLock.Lock()
}
func (ml *ModelLoader) Load(dbLoader *config.DBLoader, opts ...Option) (grpc.Backend, error) {
	ml.lockBackend() // grab the singleton lock if needed

	o := NewOptions(opts...)
	modelInfo, err := services.GetModelInfoByName(o.modelID, dbLoader.ModelInfoRepo())
	if err != nil {
		ml.Close() // make sure to release the lock in case of failure
		return nil, fmt.Errorf("failed to find model info in db: %s", err.Error())
	}
	log.Info().Msgf("Loading model name: %s, type: %s, backend: %s", modelInfo.Name, modelInfo.Type, modelInfo.GrpcExecutableFileName)
	o.backendString = modelInfo.GrpcExecutableFileName
	o.modelInfo = modelInfo
	// Return earlier if we have a model already loaded
	// (avoid looping through all the backends)
	if m := ml.CheckIsLoaded(o.modelID); m != nil {
		log.Debug().Msgf("Model '%s' already loaded", o.modelID)

		return m.GRPC(o.parallelRequests, ml.wd), nil
	}

	ml.stopActiveBackends(o.modelID, ml.singletonMode)

	if o.backendString != "" {
		log.Info().Str("modelID", o.modelID).Str("backend", o.backendString).Str("o.model", o.model).Msg("BackendLoader starting")

		backend := strings.ToLower(o.backendString)

		model, err := ml.LoadModel(o.modelID, o.model, ml.grpcModel(dbLoader, backend, o))
		if err != nil {
			return nil, err
		}

		return model.GRPC(o.parallelRequests, ml.wd), nil
	}
	ml.Close() // make sure to release the lock in case of failure
	return nil, fmt.Errorf("could not load model - no backend specified")
}
