/*
Copyright 2015 The Kubernetes Authors.

Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at

    http://www.apache.org/licenses/LICENSE-2.0

Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/

package prober

import (
	"fmt"
	"testing"
	"time"

	v1 "k8s.io/api/core/v1"
	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
	"k8s.io/apimachinery/pkg/util/wait"
	utilfeature "k8s.io/apiserver/pkg/util/feature"
	"k8s.io/client-go/kubernetes/fake"
	featuregatetesting "k8s.io/component-base/featuregate/testing"
	"k8s.io/kubernetes/pkg/features"
	kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
	kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
	"k8s.io/kubernetes/pkg/kubelet/prober/results"
	"k8s.io/kubernetes/pkg/kubelet/status"
	statustest "k8s.io/kubernetes/pkg/kubelet/status/testing"
	kubeletutil "k8s.io/kubernetes/pkg/kubelet/util"
	"k8s.io/kubernetes/pkg/probe"
	"k8s.io/kubernetes/test/utils/ktesting"
)

func init() {
}

// newTestWorkerWithRestartableInitContainer creates a test worker with an init container setup
func newTestWorkerWithRestartableInitContainer(m *manager, probeType probeType) *worker {
	pod := getTestPod()

	// Set up init container with restart policy
	initContainer := pod.Spec.Containers[0]
	initContainer.Name = testContainerName
	restartPolicy := v1.ContainerRestartPolicyAlways
	initContainer.RestartPolicy = &restartPolicy

	// Move container to init containers and add a regular container
	pod.Spec.InitContainers = []v1.Container{initContainer}
	pod.Spec.Containers = []v1.Container{{
		Name: "main-container",
	}}

	return newWorker(m, probeType, pod, initContainer)
}

func TestDoProbe(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()

	for _, probeType := range [...]probeType{liveness, readiness, startup} {
		// Test statuses.
		runningStatus := getTestRunningStatusWithStarted(probeType != startup)
		pendingStatus := getTestRunningStatusWithStarted(probeType != startup)
		pendingStatus.ContainerStatuses[0].State.Running = nil
		terminatedStatus := getTestRunningStatusWithStarted(probeType != startup)
		terminatedStatus.ContainerStatuses[0].State.Running = nil
		terminatedStatus.ContainerStatuses[0].State.Terminated = &v1.ContainerStateTerminated{
			StartedAt: metav1.Now(),
		}
		otherStatus := getTestRunningStatusWithStarted(probeType != startup)
		otherStatus.ContainerStatuses[0].Name = "otherContainer"
		failedStatus := getTestRunningStatusWithStarted(probeType != startup)
		failedStatus.Phase = v1.PodFailed

		tests := []struct {
			probe                v1.Probe
			podStatus            *v1.PodStatus
			expectContinue       map[string]bool
			expectSet            bool
			expectedResult       results.Result
			setDeletionTimestamp bool
		}{
			{ // No status.
				expectContinue: map[string]bool{
					liveness.String():  true,
					readiness.String(): true,
					startup.String():   true,
				},
			},
			{ // Pod failed
				podStatus: &failedStatus,
			},
			{ // Pod deletion
				podStatus:            &runningStatus,
				setDeletionTimestamp: true,
				expectSet:            true,
				expectContinue: map[string]bool{
					readiness.String(): true,
				},
				expectedResult: results.Success,
			},
			{ // No container status
				podStatus: &otherStatus,
				expectContinue: map[string]bool{
					liveness.String():  true,
					readiness.String(): true,
					startup.String():   true,
				},
			},
			{ // Container waiting
				podStatus: &pendingStatus,
				expectContinue: map[string]bool{
					liveness.String():  true,
					readiness.String(): true,
					startup.String():   true,
				},
				expectSet:      true,
				expectedResult: results.Failure,
			},
			{ // Container terminated
				podStatus:      &terminatedStatus,
				expectSet:      true,
				expectedResult: results.Failure,
			},
			{ // Probe successful.
				podStatus: &runningStatus,
				expectContinue: map[string]bool{
					liveness.String():  true,
					readiness.String(): true,
					startup.String():   true,
				},
				expectSet:      true,
				expectedResult: results.Success,
			},
			{ // Initial delay passed
				podStatus: &runningStatus,
				probe: v1.Probe{
					InitialDelaySeconds: -100,
				},
				expectContinue: map[string]bool{
					liveness.String():  true,
					readiness.String(): true,
					startup.String():   true,
				},
				expectSet:      true,
				expectedResult: results.Success,
			},
		}

		for i, test := range tests {
			w := newTestWorker(m, probeType, test.probe)
			if test.podStatus != nil {
				m.statusManager.SetPodStatus(logger, w.pod, *test.podStatus)
			}
			if test.setDeletionTimestamp {
				now := metav1.Now()
				w.pod.ObjectMeta.DeletionTimestamp = &now
			}
			if c := w.doProbe(ctx); c != test.expectContinue[probeType.String()] {
				t.Errorf("[%s-%d] Expected continue to be %v but got %v", probeType, i, test.expectContinue[probeType.String()], c)
			}
			result, ok := resultsManager(m, probeType).Get(testContainerID)
			if ok != test.expectSet {
				t.Errorf("[%s-%d] Expected to have result: %v but got %v", probeType, i, test.expectSet, ok)
			}
			if result != test.expectedResult {
				t.Errorf("[%s-%d] Expected result: %v but got %v", probeType, i, test.expectedResult, result)
			}

			m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(), &statustest.FakePodDeletionSafetyProvider{}, kubeletutil.NewPodStartupLatencyTracker())
			resultsManager(m, probeType).Remove(testContainerID)
		}
	}
}

func TestDoProbeWithContainerRestartRules(t *testing.T) {
	featuregatetesting.SetFeatureGatesDuringTest(t, utilfeature.DefaultFeatureGate, featuregatetesting.FeatureOverrides{
		features.ContainerRestartRules: true,
	})
	TestDoProbe(t)

	var (
		restartPolicyAlways    = v1.ContainerRestartPolicyAlways
		restartPolicyNever     = v1.ContainerRestartPolicyNever
		restartPolicyOnFailure = v1.ContainerRestartPolicyOnFailure
	)

	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()
	for _, probeType := range [...]probeType{liveness, readiness, startup} {
		testcases := []struct {
			name           string
			container      v1.Container
			podStatus      v1.PodStatus
			expectContinue bool
		}{
			{
				name: "container failed with container restartPolicy=OnFailure",
				container: v1.Container{
					RestartPolicy: &restartPolicyOnFailure,
				},
				podStatus:      getTestRunningStatusWithFailedContainer(),
				expectContinue: true,
			},
			{
				name: "container succeeded with containerRestartPolicy=OnFailure",
				container: v1.Container{
					RestartPolicy: &restartPolicyOnFailure,
				},
				podStatus:      getTestRunningStatusWithSucceededContainer(),
				expectContinue: false,
			},
			{
				name: "container failed with containerRestartPolicy=Always",
				container: v1.Container{
					RestartPolicy: &restartPolicyAlways,
				},
				podStatus:      getTestRunningStatusWithFailedContainer(),
				expectContinue: true,
			},
			{
				name: "container succeeded with containerRestartPolicy=Always",
				container: v1.Container{
					RestartPolicy: &restartPolicyAlways,
				},
				podStatus:      getTestRunningStatusWithSucceededContainer(),
				expectContinue: true,
			},
			{
				name: "container failed with containerRestartPolicy=Never",
				container: v1.Container{
					RestartPolicy: &restartPolicyNever,
				},
				podStatus:      getTestRunningStatusWithFailedContainer(),
				expectContinue: false,
			},
			{
				name: "container succeeded with containerRestartPolicy=Never",
				container: v1.Container{
					RestartPolicy: &restartPolicyNever,
				},
				podStatus:      getTestRunningStatusWithSucceededContainer(),
				expectContinue: false,
			},
			{
				name: "container terminated with matching restartPolicyRules",
				container: v1.Container{
					RestartPolicy: &restartPolicyNever,
					RestartPolicyRules: []v1.ContainerRestartRule{{
						Action: v1.ContainerRestartRuleActionRestart,
						ExitCodes: &v1.ContainerRestartRuleOnExitCodes{
							Operator: v1.ContainerRestartRuleOnExitCodesOpIn,
							Values:   []int32{1},
						},
					}},
				},
				podStatus:      getTestRunningStatusWithFailedContainer(),
				expectContinue: true,
			},
			{
				name: "container terminated with non-matching restartPolicyRules",
				container: v1.Container{
					RestartPolicy: &restartPolicyNever,
					RestartPolicyRules: []v1.ContainerRestartRule{{
						Action: v1.ContainerRestartRuleActionRestart,
						ExitCodes: &v1.ContainerRestartRuleOnExitCodes{
							Operator: v1.ContainerRestartRuleOnExitCodesOpIn,
							Values:   []int32{99},
						},
					}},
				},
				podStatus:      getTestRunningStatusWithFailedContainer(),
				expectContinue: false,
			},
		}

		for _, tc := range testcases {
			pod := getTestPod()
			setTestProbe(pod, probeType, v1.Probe{})
			c := pod.Spec.Containers[0]
			c.RestartPolicy = tc.container.RestartPolicy
			c.RestartPolicyRules = tc.container.RestartPolicyRules
			pod.Spec.Containers[0] = c
			w := newWorker(m, probeType, pod, pod.Spec.Containers[0])

			m.statusManager.SetPodStatus(logger, w.pod, tc.podStatus)

			if c := w.doProbe(ctx); c != tc.expectContinue {
				t.Errorf("[%s-%s] Expected continue to be %v but got %v", probeType, tc.name, tc.expectContinue, c)
			}
		}
	}
}

func TestDoProbeWithContainerRestartAllContainers(t *testing.T) {
	featuregatetesting.SetFeatureGatesDuringTest(t, utilfeature.DefaultFeatureGate, featuregatetesting.FeatureOverrides{
		features.ContainerRestartRules:                true,
		features.NodeDeclaredFeatures:                 true,
		features.RestartAllContainersOnContainerExits: true,
	})
	TestDoProbe(t)
	TestDoProbeWithContainerRestartRules(t)

	var (
		restartPolicyNever = v1.ContainerRestartPolicyNever
	)

	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()
	for _, probeType := range [...]probeType{liveness, readiness, startup} {
		testcases := []struct {
			name           string
			pod            func() v1.Pod
			podStatus      func() v1.PodStatus
			expectContinue bool
		}{
			{
				name: "container terminated with matching restartAllContainers",
				pod: func() v1.Pod {
					pod := getTestPod()
					setTestProbe(pod, probeType, v1.Probe{})
					c := pod.Spec.Containers[0]
					c.RestartPolicy = &restartPolicyNever
					c.RestartPolicyRules = []v1.ContainerRestartRule{{
						Action: v1.ContainerRestartRuleActionRestartAllContainers,
						ExitCodes: &v1.ContainerRestartRuleOnExitCodes{
							Operator: v1.ContainerRestartRuleOnExitCodesOpIn,
							Values:   []int32{1},
						},
					}}
					pod.Spec.Containers[0] = c
					return *pod
				},
				podStatus:      getTestRunningStatusWithFailedContainer,
				expectContinue: true,
			},
			{
				name: "container terminated by restartAllContainers",
				pod: func() v1.Pod {
					pod := getTestPod()
					setTestProbe(pod, probeType, v1.Probe{})
					triggerContainer := v1.Container{
						Name:          "trigger",
						RestartPolicy: &restartPolicyNever,
						RestartPolicyRules: []v1.ContainerRestartRule{{
							Action: v1.ContainerRestartRuleActionRestartAllContainers,
							ExitCodes: &v1.ContainerRestartRuleOnExitCodes{
								Operator: v1.ContainerRestartRuleOnExitCodesOpIn,
								Values:   []int32{1},
							},
						}},
					}
					pod.Spec.Containers = append(pod.Spec.Containers, triggerContainer)
					return *pod
				},
				podStatus: func() v1.PodStatus {
					status := getTestRunningStatusWithFailedContainer()
					// Mark the trigger container as terminated.
					status.ContainerStatuses = append(status.ContainerStatuses, v1.ContainerStatus{
						Name: "trigger",
						State: v1.ContainerState{
							Terminated: &v1.ContainerStateTerminated{
								ExitCode: 1,
							},
						},
					})
					return status
				},
				expectContinue: true,
			},
			{
				name: "container cleaned up by restartAllContainers",
				pod: func() v1.Pod {
					pod := getTestPod()
					setTestProbe(pod, probeType, v1.Probe{})
					triggerContainer := v1.Container{
						Name:          "trigger",
						RestartPolicy: &restartPolicyNever,
						RestartPolicyRules: []v1.ContainerRestartRule{{
							Action: v1.ContainerRestartRuleActionRestartAllContainers,
							ExitCodes: &v1.ContainerRestartRuleOnExitCodes{
								Operator: v1.ContainerRestartRuleOnExitCodesOpIn,
								Values:   []int32{1},
							},
						}},
					}
					pod.Spec.Containers = append(pod.Spec.Containers, triggerContainer)
					return *pod
				},
				// After cleanup, the container will be in Waiting state, and pod in Pending state
				podStatus:      getTestPendingStatus,
				expectContinue: true,
			},
		}

		for _, tc := range testcases {
			pod := tc.pod()
			podStatus := tc.podStatus()
			w := newWorker(m, probeType, &pod, pod.Spec.Containers[0])

			m.statusManager.SetPodStatus(logger, w.pod, podStatus)

			if c := w.doProbe(ctx); c != tc.expectContinue {
				t.Errorf("[%s: %s] Expected continue to be %v but got %v", probeType, tc.name, tc.expectContinue, c)
			}
		}
	}
}

func TestInitialDelay(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()

	for _, probeType := range [...]probeType{liveness, readiness, startup} {
		w := newTestWorker(m, probeType, v1.Probe{
			InitialDelaySeconds: 10,
		})
		m.statusManager.SetPodStatus(logger, w.pod, getTestRunningStatusWithStarted(probeType != startup))

		expectContinue(t, w, w.doProbe(ctx), "during initial delay")
		// Default value depends on probe, Success for liveness, Failure for readiness, Unknown for startup
		switch probeType {
		case liveness:
			expectResult(t, w, results.Success, "during initial delay")
		case readiness:
			expectResult(t, w, results.Failure, "during initial delay")
		case startup:
			expectResult(t, w, results.Unknown, "during initial delay")
		}

		// 100 seconds later...
		laterStatus := getTestRunningStatusWithStarted(probeType != startup)
		laterStatus.ContainerStatuses[0].State.Running.StartedAt.Time =
			time.Now().Add(-100 * time.Second)
		m.statusManager.SetPodStatus(logger, w.pod, laterStatus)

		// Second call should succeed (already waited).
		expectContinue(t, w, w.doProbe(ctx), "after initial delay")
		expectResult(t, w, results.Success, "after initial delay")
	}
}

func TestFailureThreshold(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()
	w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3})
	m.statusManager.SetPodStatus(logger, w.pod, getTestRunningStatus())

	for i := 0; i < 2; i++ {
		// First probe should succeed.
		m.prober.exec = fakeExecProber{probe.Success, nil}

		for j := 0; j < 3; j++ {
			msg := fmt.Sprintf("%d success (%d)", j+1, i)
			expectContinue(t, w, w.doProbe(ctx), msg)
			expectResult(t, w, results.Success, msg)
		}

		// Prober starts failing :(
		m.prober.exec = fakeExecProber{probe.Failure, nil}

		// Next 2 probes should still be "success".
		for j := 0; j < 2; j++ {
			msg := fmt.Sprintf("%d failing (%d)", j+1, i)
			expectContinue(t, w, w.doProbe(ctx), msg)
			expectResult(t, w, results.Success, msg)
		}

		// Third & following fail.
		for j := 0; j < 3; j++ {
			msg := fmt.Sprintf("%d failure (%d)", j+3, i)
			expectContinue(t, w, w.doProbe(ctx), msg)
			expectResult(t, w, results.Failure, msg)
		}
	}
}

func TestSuccessThreshold(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()
	w := newTestWorker(m, readiness, v1.Probe{SuccessThreshold: 3, FailureThreshold: 1})
	m.statusManager.SetPodStatus(logger, w.pod, getTestRunningStatus())

	// Start out failure.
	w.resultsManager.Set(testContainerID, results.Failure, &v1.Pod{})

	for i := 0; i < 2; i++ {
		// Probe defaults to Failure.
		for j := 0; j < 2; j++ {
			msg := fmt.Sprintf("%d success (%d)", j+1, i)
			expectContinue(t, w, w.doProbe(ctx), msg)
			expectResult(t, w, results.Failure, msg)
		}

		// Continuing success!
		for j := 0; j < 3; j++ {
			msg := fmt.Sprintf("%d success (%d)", j+3, i)
			expectContinue(t, w, w.doProbe(ctx), msg)
			expectResult(t, w, results.Success, msg)
		}

		// Prober flakes :(
		m.prober.exec = fakeExecProber{probe.Failure, nil}
		msg := fmt.Sprintf("1 failure (%d)", i)
		expectContinue(t, w, w.doProbe(ctx), msg)
		expectResult(t, w, results.Failure, msg)

		// Back to success.
		m.prober.exec = fakeExecProber{probe.Success, nil}
	}
}

func TestStartupProbeSuccessThreshold(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()
	successThreshold := 1
	failureThreshold := 3
	w := newTestWorker(m, startup, v1.Probe{SuccessThreshold: int32(successThreshold), FailureThreshold: int32(failureThreshold)})
	m.statusManager.SetPodStatus(logger, w.pod, getTestNotRunningStatus())
	m.prober.exec = fakeExecProber{probe.Success, nil}

	for i := 0; i < successThreshold+1; i++ {
		if i < successThreshold {
			// Probe should not be on hold and will continue to be excuted
			// until successThreshold is met
			if w.onHold {
				t.Errorf("Prober should not be on hold")
			}
			msg := fmt.Sprintf("%d success", i+1)
			expectContinue(t, w, w.doProbe(ctx), msg)
			expectResult(t, w, results.Success, msg)
		} else {
			// Probe should be on hold and will not be executed anymore
			// when successThreshold is met
			if !w.onHold {
				t.Errorf("Prober should be on hold because successThreshold is exceeded")
			}
			// Meeting or exceeding successThreshold should cause resultRun to reset to 0
			if w.resultRun != 0 {
				t.Errorf("Prober resultRun should be 0, but %d", w.resultRun)
			}
		}
	}
}

func TestStartupProbeFailureThreshold(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()
	successThreshold := 1
	failureThreshold := 3
	w := newTestWorker(m, startup, v1.Probe{SuccessThreshold: int32(successThreshold), FailureThreshold: int32(failureThreshold)})
	m.statusManager.SetPodStatus(logger, w.pod, getTestNotRunningStatus())
	m.prober.exec = fakeExecProber{probe.Failure, nil}

	for i := 0; i < failureThreshold+1; i++ {
		if i < failureThreshold {
			// Probe should not be on hold and will continue to be excuted
			// until failureThreshold is met
			if w.onHold {
				t.Errorf("Prober should not be on hold")
			}
			msg := fmt.Sprintf("%d failure", i+1)
			expectContinue(t, w, w.doProbe(ctx), msg)
			switch i {
			case 0, 1:
				// At this point, the expected result is Unknown
				// because w.resultsManager.Set() will be called after FailureThreshold is reached
				expectResult(t, w, results.Unknown, msg)
				// resultRun should be incremented until failureThreshold is met
				if w.resultRun != i+1 {
					t.Errorf("Prober resultRun should be %d, but %d", i+1, w.resultRun)
				}
			case 2:
				// The expected result is Failure
				// because w.resultsManager.Set() will be called due to resultRun reaching failureThreshold,
				// updating the cached result to Failure.
				// After that, resultRun will be reset to 0.
				expectResult(t, w, results.Failure, msg)
				// Meeting failureThreshold should cause resultRun to reset to 0
				if w.resultRun != 0 {
					t.Errorf("Prober resultRun should be 0, but %d", w.resultRun)
				}
			}
		} else {
			// Probe should be on hold and will not be executed anymore
			// when failureThreshold is met
			if !w.onHold {
				t.Errorf("Prober should be on hold because failureThreshold is exceeded")
			}
			// Exceeding failureThreshold should cause resultRun to reset to 0
			if w.resultRun != 0 {
				t.Errorf("Prober resultRun should be 0, but %d", w.resultRun)
			}
		}
	}
}

func TestCleanUp(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()

	for _, probeType := range [...]probeType{liveness, readiness, startup} {
		key := probeKey{testPodUID, testContainerName, probeType}
		w := newTestWorker(m, probeType, v1.Probe{})
		m.statusManager.SetPodStatus(logger, w.pod, getTestRunningStatusWithStarted(probeType != startup))
		go w.run(ctx)
		m.workers[key] = w

		// Wait for worker to run.
		condition := func() (bool, error) {
			ready, _ := resultsManager(m, probeType).Get(testContainerID)
			return ready == results.Success, nil
		}
		if ready, _ := condition(); !ready {
			if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, condition); err != nil {
				t.Fatalf("[%s] Error waiting for worker ready: %v", probeType, err)
			}
		}

		for i := 0; i < 10; i++ {
			w.stop() // Stop should be callable multiple times without consequence.
		}
		if err := waitForWorkerExit(t, m, []probeKey{key}); err != nil {
			t.Fatalf("[%s] error waiting for worker exit: %v", probeType, err)
		}

		if _, ok := resultsManager(m, probeType).Get(testContainerID); ok {
			t.Errorf("[%s] Expected result to be cleared.", probeType)
		}
		if _, ok := m.workers[key]; ok {
			t.Errorf("[%s] Expected worker to be cleared.", probeType)
		}
	}
}

func expectResult(t *testing.T, w *worker, expectedResult results.Result, msg string) {
	result, ok := resultsManager(w.probeManager, w.probeType).Get(w.containerID)
	if !ok {
		t.Errorf("[%s - %s] Expected result to be set, but was not set", w.probeType, msg)
	} else if result != expectedResult {
		t.Errorf("[%s - %s] Expected result to be %v, but was %v",
			w.probeType, msg, expectedResult, result)
	}
}

func expectContinue(t *testing.T, w *worker, c bool, msg string) {
	if !c {
		t.Errorf("[%s - %s] Expected to continue, but did not", w.probeType, msg)
	}
}

func resultsManager(m *manager, probeType probeType) results.Manager {
	switch probeType {
	case readiness:
		return m.readinessManager
	case liveness:
		return m.livenessManager
	case startup:
		return m.startupManager
	}
	panic(fmt.Errorf("Unhandled case: %v", probeType))
}

func TestOnHoldOnLivenessOrStartupCheckFailure(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()

	for _, probeType := range [...]probeType{liveness, startup} {
		w := newTestWorker(m, probeType, v1.Probe{SuccessThreshold: 1, FailureThreshold: 1})
		status := getTestRunningStatusWithStarted(probeType != startup)
		m.statusManager.SetPodStatus(logger, w.pod, status)

		// First probe should fail.
		m.prober.exec = fakeExecProber{probe.Failure, nil}
		msg := "first probe"
		expectContinue(t, w, w.doProbe(ctx), msg)
		expectResult(t, w, results.Failure, msg)
		if !w.onHold {
			t.Errorf("Prober should be on hold due to %s check failure", probeType)
		}
		// Set fakeExecProber to return success. However, the result will remain
		// failure because the worker is on hold and won't probe.
		m.prober.exec = fakeExecProber{probe.Success, nil}
		msg = "while on hold"
		expectContinue(t, w, w.doProbe(ctx), msg)
		expectResult(t, w, results.Failure, msg)
		if !w.onHold {
			t.Errorf("Prober should be on hold due to %s check failure", probeType)
		}

		// Set a new container ID to lift the hold. The next probe will succeed.
		status.ContainerStatuses[0].ContainerID = "test://newCont_ID"
		m.statusManager.SetPodStatus(logger, w.pod, status)
		msg = "hold lifted"
		expectContinue(t, w, w.doProbe(ctx), msg)
		expectResult(t, w, results.Success, msg)
		if probeType == liveness && w.onHold {
			t.Errorf("Prober should not be on hold anymore")
		} else if probeType == startup && !w.onHold {
			t.Errorf("Prober should be on hold due to %s check success", probeType)
		}
	}
}

func TestResultRunOnLivenessCheckFailure(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()
	w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3})
	m.statusManager.SetPodStatus(logger, w.pod, getTestRunningStatus())

	m.prober.exec = fakeExecProber{probe.Success, nil}
	msg := "initial probe success"
	expectContinue(t, w, w.doProbe(ctx), msg)
	expectResult(t, w, results.Success, msg)
	if w.resultRun != 1 {
		t.Errorf("Prober resultRun should be 1")
	}

	m.prober.exec = fakeExecProber{probe.Failure, nil}
	msg = "probe failure, result success"
	expectContinue(t, w, w.doProbe(ctx), msg)
	expectResult(t, w, results.Success, msg)
	if w.resultRun != 1 {
		t.Errorf("Prober resultRun should be 1")
	}

	m.prober.exec = fakeExecProber{probe.Failure, nil}
	msg = "2nd probe failure, result success"
	expectContinue(t, w, w.doProbe(ctx), msg)
	expectResult(t, w, results.Success, msg)
	if w.resultRun != 2 {
		t.Errorf("Prober resultRun should be 2")
	}

	// Exceeding FailureThreshold should cause resultRun to
	// reset to 0 so that the probe on the restarted pod
	// also gets FailureThreshold attempts to succeed.
	m.prober.exec = fakeExecProber{probe.Failure, nil}
	msg = "3rd probe failure, result failure"
	expectContinue(t, w, w.doProbe(ctx), msg)
	expectResult(t, w, results.Failure, msg)
	if w.resultRun != 0 {
		t.Errorf("Prober resultRun should be reset to 0")
	}
}

func TestResultRunOnStartupCheckFailure(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)

	m := newTestManager()
	w := newTestWorker(m, startup, v1.Probe{SuccessThreshold: 1, FailureThreshold: 3})
	m.statusManager.SetPodStatus(logger, w.pod, getTestRunningStatusWithStarted(false))

	// Below FailureThreshold leaves probe state unchanged
	// which is failed for startup at first.
	m.prober.exec = fakeExecProber{probe.Failure, nil}
	msg := "probe failure, result unknown"
	expectContinue(t, w, w.doProbe(ctx), msg)
	expectResult(t, w, results.Unknown, msg)
	if w.resultRun != 1 {
		t.Errorf("Prober resultRun should be 1")
	}

	m.prober.exec = fakeExecProber{probe.Failure, nil}
	msg = "2nd probe failure, result unknown"
	expectContinue(t, w, w.doProbe(ctx), msg)
	expectResult(t, w, results.Unknown, msg)
	if w.resultRun != 2 {
		t.Errorf("Prober resultRun should be 2")
	}

	// Exceeding FailureThreshold should cause resultRun to
	// reset to 0 so that the probe on the restarted pod
	// also gets FailureThreshold attempts to succeed.
	m.prober.exec = fakeExecProber{probe.Failure, nil}
	msg = "3rd probe failure, result failure"
	expectContinue(t, w, w.doProbe(ctx), msg)
	expectResult(t, w, results.Failure, msg)
	if w.resultRun != 0 {
		t.Errorf("Prober resultRun should be reset to 0")
	}
}

func TestDoProbe_TerminatedRestartableInitContainerWithRestartPolicyNever(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()

	// Test restartable init container (sidecar) behavior
	w := newTestWorkerWithRestartableInitContainer(m, startup)

	// Set pod restart policy to Never
	w.pod.Spec.RestartPolicy = v1.RestartPolicyNever

	// Create terminated status for init container
	terminatedStatus := getTestRunningStatus()
	terminatedStatus.InitContainerStatuses = []v1.ContainerStatus{{
		Name:        testContainerName,
		ContainerID: "test://test_container_id",
		State: v1.ContainerState{
			Terminated: &v1.ContainerStateTerminated{
				StartedAt: metav1.Now(),
			},
		},
	}}
	terminatedStatus.ContainerStatuses[0].State.Running = nil

	m.statusManager.SetPodStatus(logger, w.pod, terminatedStatus)

	// Test: Terminated restartable init container with restartPolicy=Always should continue probing
	// even when pod has restartPolicy=Never
	if !w.doProbe(ctx) {
		t.Error("Expected to continue probing for terminated restartable init container with pod restart policy Never")
	}

	// Verify result is set to Failure for terminated container
	expectResult(t, w, results.Failure, "restartable init container with pod restart policy Never")
}

func TestDoProbe_TerminatedContainerWithRestartPolicyNever(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()

	// Test that regular containers (without RestartPolicy) still work as before
	w := newTestWorker(m, startup, v1.Probe{})

	// Regular container without explicit restart policy
	w.container.RestartPolicy = nil

	// Set pod restart policy to Never
	w.pod.Spec.RestartPolicy = v1.RestartPolicyNever

	// Create terminated status
	terminatedStatus := getTestRunningStatus()
	terminatedStatus.ContainerStatuses[0].State.Running = nil
	terminatedStatus.ContainerStatuses[0].State.Terminated = &v1.ContainerStateTerminated{
		StartedAt: metav1.Now(),
	}

	m.statusManager.SetPodStatus(logger, w.pod, terminatedStatus)

	// Should stop probing (existing behavior preserved)
	if w.doProbe(ctx) {
		t.Error("Expected to stop probing for regular container with pod RestartPolicy=Never")
	}

	// Verify result is set to Failure for terminated container
	expectResult(t, w, results.Failure, "regular container with pod restart policy Never")
}

func TestLivenessProbeDisabledByStarted(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)
	m := newTestManager()
	w := newTestWorker(m, liveness, v1.Probe{SuccessThreshold: 1, FailureThreshold: 1})
	m.statusManager.SetPodStatus(logger, w.pod, getTestRunningStatusWithStarted(false))
	// livenessProbe fails, but is disabled
	m.prober.exec = fakeExecProber{probe.Failure, nil}
	msg := "Not started, probe failure, result success"
	expectContinue(t, w, w.doProbe(ctx), msg)
	expectResult(t, w, results.Success, msg)
	// setting started state
	m.statusManager.SetContainerStartup(logger, w.pod.UID, w.containerID, true)
	// livenessProbe fails
	m.prober.exec = fakeExecProber{probe.Failure, nil}
	msg = "Started, probe failure, result failure"
	expectContinue(t, w, w.doProbe(ctx), msg)
	expectResult(t, w, results.Failure, msg)
}

func TestStartupProbeDisabledByStarted(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)

	m := newTestManager()
	w := newTestWorker(m, startup, v1.Probe{SuccessThreshold: 1, FailureThreshold: 2})
	m.statusManager.SetPodStatus(logger, w.pod, getTestRunningStatusWithStarted(false))
	// startupProbe fails < FailureThreshold, stays unknown
	m.prober.exec = fakeExecProber{probe.Failure, nil}
	msg := "Not started, probe failure, result unknown"
	expectContinue(t, w, w.doProbe(ctx), msg)
	expectResult(t, w, results.Unknown, msg)
	// startupProbe succeeds
	m.prober.exec = fakeExecProber{probe.Success, nil}
	msg = "Started, probe success, result success"
	expectContinue(t, w, w.doProbe(ctx), msg)
	expectResult(t, w, results.Success, msg)
	// setting started state
	m.statusManager.SetContainerStartup(logger, w.pod.UID, w.containerID, true)
	// startupProbe fails, but is disabled
	m.prober.exec = fakeExecProber{probe.Failure, nil}
	msg = "Started, probe failure, result success"
	expectContinue(t, w, w.doProbe(ctx), msg)
	expectResult(t, w, results.Success, msg)
}

func TestChangeContainerStatusOnKubeletRestart(t *testing.T) {
	logger, ctx := ktesting.NewTestContext(t)

	tests := []struct {
		name           string
		featureEnabled bool
		isRestart      bool
		probeType      probeType
		initialValue   results.Result
		expectSet      bool
	}{
		{
			name:           "feature enabled, is restart, readiness",
			featureEnabled: true,
			isRestart:      true,
			probeType:      readiness,
			initialValue:   results.Failure,
			expectSet:      true,
		},
		{
			name:           "feature enabled, is restart, liveness",
			featureEnabled: true,
			isRestart:      true,
			probeType:      liveness,
			initialValue:   results.Success,
			expectSet:      true,
		},
		{
			name:           "feature enabled, is restart, startup",
			featureEnabled: true,
			isRestart:      true,
			probeType:      startup,
			initialValue:   results.Unknown,
			expectSet:      true,
		},
		{
			name:           "feature enabled, not restart, readiness",
			featureEnabled: true,
			isRestart:      false,
			probeType:      readiness,
			initialValue:   results.Failure,
			expectSet:      true,
		},
		{
			name:           "feature enabled, not restart, liveness",
			featureEnabled: true,
			isRestart:      false,
			probeType:      liveness,
			initialValue:   results.Success,
			expectSet:      true,
		},
		{
			name:           "feature enabled, not restart, startup",
			featureEnabled: true,
			isRestart:      false,
			probeType:      startup,
			initialValue:   results.Unknown,
			expectSet:      true,
		},
		{
			name:           "feature disabled, is restart, readiness",
			featureEnabled: false,
			isRestart:      true,
			probeType:      readiness,
			expectSet:      false,
		},
		{
			name:           "feature disabled, is restart, liveness",
			featureEnabled: false,
			isRestart:      true,
			probeType:      liveness,
			expectSet:      false,
		},
		{
			name:           "feature disabled, is restart, startup",
			featureEnabled: false,
			isRestart:      true,
			probeType:      startup,
			expectSet:      false,
		},
		{
			name:           "feature disabled, not restart, readiness",
			featureEnabled: false,
			isRestart:      false,
			probeType:      readiness,
			initialValue:   results.Failure,
			expectSet:      true,
		},
		{
			name:           "feature disabled, not restart, liveness",
			featureEnabled: false,
			isRestart:      false,
			probeType:      liveness,
			initialValue:   results.Success,
			expectSet:      true,
		},
		{
			name:           "feature disabled, not restart, startup",
			featureEnabled: false,
			isRestart:      false,
			probeType:      startup,
			initialValue:   results.Unknown,
			expectSet:      true,
		},
	}

	for _, tc := range tests {
		t.Run(tc.name, func(t *testing.T) {
			featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.ChangeContainerStatusOnKubeletRestart, tc.featureEnabled)

			m := newTestManager()
			podStatus := getTestRunningStatus()
			podStatus.ContainerStatuses[0].ContainerID = "test://container-id"
			if tc.isRestart {
				podStatus.ContainerStatuses[0].State.Running.StartedAt = metav1.Time{Time: m.start.Add(-5 * time.Minute)}
			} else {
				podStatus.ContainerStatuses[0].State.Running.StartedAt = metav1.Time{Time: m.start.Add(5 * time.Minute)}
			}

			w := newTestWorker(m, tc.probeType, v1.Probe{InitialDelaySeconds: 1000})
			m.statusManager.SetPodStatus(logger, w.pod, podStatus)

			w.doProbe(ctx)

			containerID := kubecontainer.ParseContainerID(podStatus.ContainerStatuses[0].ContainerID)
			result, ok := resultsManager(m, tc.probeType).Get(containerID)

			if ok != tc.expectSet {
				t.Errorf("Expected result to be set: %v, but got: %v", tc.expectSet, ok)
			}
			if tc.expectSet && result != tc.initialValue {
				t.Errorf("Expected result %v, but got: %v", tc.initialValue, result)
			}
		})
	}
}
