// SPDX-License-Identifier: Apache-2.0
// Copyright Authors of Cilium

package iptables

import (
	"bufio"
	"context"
	"fmt"
	"log/slog"
	"net"
	"net/netip"
	"os"
	"regexp"
	"slices"
	"strconv"
	"strings"

	"github.com/blang/semver/v4"
	"github.com/cilium/hive/cell"
	"github.com/cilium/hive/job"
	"github.com/cilium/statedb"
	"github.com/mattn/go-shellwords"
	"github.com/vishvananda/netlink"
	"k8s.io/utils/clock"

	"github.com/cilium/cilium/daemon/cmd/cni"
	"github.com/cilium/cilium/pkg/byteorder"
	"github.com/cilium/cilium/pkg/cidr"
	"github.com/cilium/cilium/pkg/command/exec"
	"github.com/cilium/cilium/pkg/container/set"
	"github.com/cilium/cilium/pkg/datapath/iptables/ipset"
	"github.com/cilium/cilium/pkg/datapath/linux/linux_defaults"
	"github.com/cilium/cilium/pkg/datapath/linux/route"
	"github.com/cilium/cilium/pkg/datapath/linux/safenetlink"
	"github.com/cilium/cilium/pkg/datapath/linux/sysctl"
	"github.com/cilium/cilium/pkg/datapath/tables"
	"github.com/cilium/cilium/pkg/datapath/tunnel"
	datapath "github.com/cilium/cilium/pkg/datapath/types"
	"github.com/cilium/cilium/pkg/defaults"
	"github.com/cilium/cilium/pkg/fqdn/proxy/ipfamily"
	ipamOption "github.com/cilium/cilium/pkg/ipam/option"
	lb "github.com/cilium/cilium/pkg/loadbalancer"
	"github.com/cilium/cilium/pkg/lock"
	"github.com/cilium/cilium/pkg/logging/logfields"
	"github.com/cilium/cilium/pkg/node"
	"github.com/cilium/cilium/pkg/time"
	"github.com/cilium/cilium/pkg/versioncheck"
)

const (
	oldCiliumPrefix       = "OLD_"
	ciliumInputChain      = "CILIUM_INPUT"
	ciliumOutputChain     = "CILIUM_OUTPUT"
	ciliumOutputRawChain  = "CILIUM_OUTPUT_raw"
	ciliumPostNatChain    = "CILIUM_POST_nat"
	ciliumOutputNatChain  = "CILIUM_OUTPUT_nat"
	ciliumPreNatChain     = "CILIUM_PRE_nat"
	ciliumPostMangleChain = "CILIUM_POST_mangle"
	ciliumPreMangleChain  = "CILIUM_PRE_mangle"
	ciliumPreRawChain     = "CILIUM_PRE_raw"
	ciliumForwardChain    = "CILIUM_FORWARD"
	feederDescription     = "cilium-feeder:"
	encryptionDescription = "cilium-encryption-notrack:"
)

// Minimum iptables versions supporting the -w and -w<seconds> flags
var (
	isWaitMinVersion        = versioncheck.MustCompile(">=1.4.20")
	isWaitSecondsMinVersion = versioncheck.MustCompile(">=1.4.22")
	noTrackPorts            = func(port uint16) []*lb.L4Addr {
		return []*lb.L4Addr{
			{
				Protocol: lb.TCP,
				Port:     port,
			},
			{
				Protocol: lb.UDP,
				Port:     port,
			},
		}
	}

	noTrackSupportedProtos = []lb.L4Type{
		lb.TCP, lb.UDP,
	}
)

const (
	waitString = "-w"
)

type runnable interface {
	runProgOutput(args []string) (string, error)
	runProg(args []string) error
}

type iptablesInterface interface {
	runnable

	getProg() string
	getIpset() string
}

type ipt struct {
	logger   *slog.Logger
	prog     string
	ipset    string
	waitArgs []string
}

func (ipt *ipt) initLogger(logger *slog.Logger) {
	ipt.logger = logger
}

func (ipt *ipt) initArgs(ctx context.Context, waitSeconds int) {
	v, err := ipt.getVersion(ctx)
	if err == nil {
		switch {
		case isWaitSecondsMinVersion(v):
			ipt.waitArgs = []string{waitString, fmt.Sprintf("%d", waitSeconds)}
		case isWaitMinVersion(v):
			ipt.waitArgs = []string{waitString}
		}
	}
}

func (ipt *ipt) getProg() string {
	return ipt.prog
}

func (ipt *ipt) getIpset() string {
	return ipt.ipset
}

func (ipt *ipt) getVersion(ctx context.Context) (semver.Version, error) {
	b, err := exec.CommandContext(ctx, ipt.prog, "--version").CombinedOutput(ipt.logger, false)
	if err != nil {
		return semver.Version{}, err
	}
	v := regexp.MustCompile(`v([0-9]+(\.[0-9]+)+)`)
	vString := v.FindStringSubmatch(string(b))
	if vString == nil {
		return semver.Version{}, fmt.Errorf("no iptables version found in string: %s", string(b))
	}
	return versioncheck.Version(vString[1])
}

func (ipt *ipt) runProgOutput(args []string) (string, error) {
	fullCommand := fmt.Sprintf("%s %s", ipt.getProg(), strings.Join(args, " "))

	ipt.logger.Debug("Running command", logfields.Cmd, fullCommand)

	// Add wait argument to deal with concurrent calls that would fail otherwise
	iptArgs := make([]string, 0, len(ipt.waitArgs)+len(args))
	iptArgs = append(iptArgs, ipt.waitArgs...)
	iptArgs = append(iptArgs, args...)
	out, err := exec.WithTimeout(defaults.ExecTimeout, ipt.prog, iptArgs...).Output(ipt.logger, false)

	if err != nil {
		return "", fmt.Errorf("unable to run '%s' iptables command: %w", fullCommand, err)
	}
	return string(out), nil
}

func (ipt *ipt) runProg(args []string) error {
	_, err := ipt.runProgOutput(args)
	return err
}

func reverseRule(rule string) ([]string, error) {
	if strings.HasPrefix(rule, "-A") {
		// From: -A POSTROUTING -m comment [...]
		// To:   -D POSTROUTING -m comment [...]
		return shellwords.Parse(strings.Replace(rule, "-A", "-D", 1))
	}

	if strings.HasPrefix(rule, "-I") {
		// From: -I POSTROUTING -m comment [...]
		// To:   -D POSTROUTING -m comment [...]
		return shellwords.Parse(strings.Replace(rule, "-I", "-D", 1))
	}

	return []string{}, nil
}

func ruleReferencesDisabledChain(disableIptablesFeederRules []string, rule string) (bool, string) {
	for _, disabledChain := range disableIptablesFeederRules {
		if strings.Contains(rule, " "+strings.ToUpper(disabledChain)+" ") {
			return true, disabledChain
		}
	}

	return false, ""
}

func isDisabledChain(disableIptablesFeederRules []string, chain string) bool {
	for _, disabledChain := range disableIptablesFeederRules {
		if strings.EqualFold(chain, disabledChain) {
			return true
		}
	}

	return false
}

func (m *Manager) removeCiliumRules(table string, prog runnable, match string) error {
	rules, err := prog.runProgOutput([]string{"-t", table, "-S"})
	if err != nil {
		return err
	}

	scanner := bufio.NewScanner(strings.NewReader(rules))
	for scanner.Scan() {
		rule := scanner.Text()

		// All rules installed by cilium either belong to a chain with
		// the name CILIUM_ or call a chain with the name CILIUM_:
		// -A CILIUM_FORWARD -o cilium_host -m comment --comment "cilium: any->cluster on cilium_host forward accept" -j ACCEPT
		// -A POSTROUTING -m comment --comment "cilium-feeder: CILIUM_POST" -j CILIUM_POST
		if !strings.Contains(rule, match) {
			continue
		}

		// do not remove feeder for chains that are set to be disabled
		// ie catch the beginning of the rule like -A POSTROUTING to match it against
		// disabled chains
		if skip, disabledChain := ruleReferencesDisabledChain(m.cfg.DisableIptablesFeederRules, rule); skip {
			m.logger.Info(
				"Skipping the removal of feeder chain",
				logfields.Chain, disabledChain,
			)
			continue
		}

		reversedRule, err := reverseRule(rule)
		if err != nil {
			m.logger.Warn(
				"Unable to parse rule into slice. Leaving rule behind.",
				logfields.Error, err,
				logfields.Prog, prog,
			)
			continue
		}

		if len(reversedRule) > 0 {
			deleteRule := append([]string{"-t", table}, reversedRule...)
			if err := prog.runProg(deleteRule); err != nil {
				return err
			}
		}
	}

	return nil
}

type podAndNameSpace struct {
	podName, namespace string
}

// noTrackHostPortsByPod stores the ports passed in with the annotation /no-track-host-ports
// indexed by pod name+namespace
type noTrackHostPortsByPod map[podAndNameSpace]set.Set[lb.L4Addr]

func (ports noTrackHostPortsByPod) flatten() set.Set[lb.L4Addr] {
	result := set.Set[lb.L4Addr]{}

	for _, p := range ports {
		result.Merge(p)
	}

	return result
}

func (ports noTrackHostPortsByPod) exclude(key podAndNameSpace) noTrackHostPortsByPod {
	result := make(noTrackHostPortsByPod)

	for k, p := range ports {
		if key == k {
			continue
		}
		result[k] = p
	}

	return result
}

// Manager manages the iptables-related configuration for Cilium.
type Manager struct {
	logger *slog.Logger
	// This lock ensures there are no concurrent executions of the doInstallRules() and
	// GetProxyPort() methods.
	lock lock.Mutex

	ip4tables, ip6tables iptablesInterface

	sysctl sysctl.Sysctl

	cfg       Config
	sharedCfg SharedConfig

	argsInit  *lock.StoppableWaitGroup
	startDone lock.DoneFunc

	// anything that can trigger a reconciliation
	reconcilerParams reconcilerParams

	haveIp6tables        bool
	haveSocketMatch      bool
	haveBPFSocketAssign  bool
	ipEarlyDemuxDisabled bool
	cniConfigManager     cni.CNIConfigManager
}

type reconcilerParams struct {
	clock               clock.WithTicker
	localNodeStore      *node.LocalNodeStore
	db                  *statedb.DB
	devices             statedb.Table[*tables.Device]
	proxies             chan reconciliationRequest[proxyInfo]
	addNoTrackPod       chan reconciliationRequest[noTrackPodInfo]
	delNoTrackPod       chan reconciliationRequest[noTrackPodInfo]
	addNoTrackHostPorts chan reconciliationRequest[noTrackHostPortsPodInfo]
	delNoTrackHostPorts chan reconciliationRequest[podAndNameSpace]
}

type params struct {
	cell.In

	Logger    *slog.Logger
	Lifecycle cell.Lifecycle

	Sysctl           sysctl.Sysctl
	CNIConfigManager cni.CNIConfigManager
	LocalNodeStore   *node.LocalNodeStore

	Cfg       Config
	SharedCfg SharedConfig

	JobGroup job.Group
	DB       *statedb.DB
	Devices  statedb.Table[*tables.Device]

	TunnelCfg tunnel.Config
}

func newIptablesManager(p params) datapath.IptablesManager {
	iptMgr := &Manager{
		logger:    p.Logger,
		sysctl:    p.Sysctl,
		cfg:       p.Cfg,
		sharedCfg: p.SharedCfg,
		argsInit:  lock.NewStoppableWaitGroup(),
		reconcilerParams: reconcilerParams{
			clock:               clock.RealClock{},
			localNodeStore:      p.LocalNodeStore,
			db:                  p.DB,
			devices:             p.Devices,
			proxies:             make(chan reconciliationRequest[proxyInfo]),
			addNoTrackPod:       make(chan reconciliationRequest[noTrackPodInfo]),
			delNoTrackPod:       make(chan reconciliationRequest[noTrackPodInfo]),
			addNoTrackHostPorts: make(chan reconciliationRequest[noTrackHostPortsPodInfo]),
			delNoTrackHostPorts: make(chan reconciliationRequest[podAndNameSpace]),
		},
		haveIp6tables:    true,
		cniConfigManager: p.CNIConfigManager,
	}

	ip4tables := &ipt{prog: "iptables", ipset: ipset.CiliumNodeIPSetV4}
	ip6tables := &ipt{prog: "ip6tables", ipset: ipset.CiliumNodeIPSetV6}

	iptMgr.ip4tables = ip4tables
	iptMgr.ip6tables = ip6tables

	// IPTables manager cannot start properly if MASQ is enabled with no
	// egressMasqeuradingInterfaces for any IPAM mode other then ClusterPool or
	// Kubernetes.
	//
	// This is because a PodCIDR is only guaranteed to be available
	// with the above IPAM modes, and PodCIDR and without this IPTable MASQ
	// rules MUST be interface-specific and cannot rely on selecting a source
	// range. See the IPTable's MASQ rule creation bits for more detail.
	if (iptMgr.sharedCfg.IptablesMasqueradingIPv4Enabled || iptMgr.sharedCfg.IptablesMasqueradingIPv6Enabled) &&
		len(iptMgr.sharedCfg.MasqueradeInterfaces) == 0 && (iptMgr.sharedCfg.IPAM != ipamOption.IPAMClusterPool && iptMgr.sharedCfg.IPAM != ipamOption.IPAMKubernetes) {
		panic("Egress masquerading interfaces cannot be empty when IP masquerading is enabled with IPAM mode other than ClusterPool or Kubernetes")
	}

	// init iptables/ip6tables wait arguments before using them in the reconciler or in the manager (e.g: GetProxyPorts)
	initDone := iptMgr.argsInit.Add()
	p.Lifecycle.Append(cell.Hook{
		OnStart: func(ctx cell.HookContext) error {
			defer initDone()
			ip4tables.initLogger(p.Logger)
			ip6tables.initLogger(p.Logger)

			ip4tables.initArgs(ctx, int(p.Cfg.IPTablesLockTimeout/time.Second))
			if p.SharedCfg.EnableIPv6 {
				ip6tables.initArgs(ctx, int(p.Cfg.IPTablesLockTimeout/time.Second))
			}
			return nil
		},
	})

	// init haveIp6tables argument before using it in a reconciliation loop
	iptMgr.startDone = iptMgr.argsInit.Add()

	p.JobGroup.Add(
		job.OneShot("iptables-reconciliation-loop", func(ctx context.Context, health cell.Health) error {
			// each job runs in an independent goroutine, so we need to explicitly wait for both
			// ip{4,6}tables and haveIp6tables initialization before starting the reconciler.
			iptMgr.argsInit.Stop()
			select {
			case <-ctx.Done():
				return nil
			case <-iptMgr.argsInit.WaitChannel():
			}
			return reconciliationLoop(
				ctx, p.Logger, health,
				iptMgr.sharedCfg.InstallIptRules, &iptMgr.reconcilerParams,
				iptMgr.doInstallRules,
				iptMgr.doInstallProxyRules,
				iptMgr.installNoTrackRules,
				iptMgr.removeNoTrackRules,
				iptMgr.setNoTrackHostPorts,
				iptMgr.removeNoTrackHostPorts,
			)
		}),
	)

	// Add the manager after the reconciler, otherwise there is a deadlock on shutdown
	// between closing and draining the channels.
	p.Lifecycle.Append(iptMgr)

	return iptMgr
}

// Start initializes the iptables manager and checks for iptables kernel modules availability.
func (m *Manager) Start(ctx cell.HookContext) error {
	defer m.startDone()

	if os.Getenv("CILIUM_PREPEND_IPTABLES_CHAIN") != "" {
		m.logger.Warn("CILIUM_PREPEND_IPTABLES_CHAIN env var has been deprecated. Please use 'CILIUM_PREPEND_IPTABLES_CHAINS' " +
			"env var or '--prepend-iptables-chains' command line flag instead")
	}

	if err := enableIPForwarding(m.sysctl, m.sharedCfg.EnableIPv6); err != nil {
		m.logger.Warn("enabling IP forwarding via sysctl failed", logfields.Error, err)
	}

	if m.sharedCfg.EnableIPSec && m.sharedCfg.EnableL7Proxy {
		m.disableIPEarlyDemux()
	}

	for _, table := range []string{"nat", "mangle", "raw", "filter"} {
		if err := m.ip4tables.runProg([]string{"-t", table, "-L", "-n"}); err != nil {
			if m.sharedCfg.InstallIptRules {
				m.logger.Warn("iptables table is not available on this system",
					logfields.Error, err,
					logfields.Table, table,
				)
			}
		}
	}

	for _, table := range []string{"mangle", "raw", "filter"} {
		if err := m.ip6tables.runProg([]string{"-t", table, "-L", "-n"}); err != nil {
			if m.sharedCfg.InstallIptRules {
				m.logger.Debug("ip6tables table is not available on this system",
					logfields.Error, err,
					logfields.Table, table,
				)
			}
			m.haveIp6tables = false
		}
	}
	if !m.haveIp6tables {
		if m.sharedCfg.EnableIPv6 {
			return fmt.Errorf("IPv6 is enabled, but the needed ip6tables tables are unavailable (try disabling IPv6 in Cilium or installing ip6tables and kernel modules: ip6_tables, ip6table_mangle, ip6table_raw, ip6table_filter)")
		}
	} else {
		ipv6Disabled, err := os.ReadFile("/sys/module/ipv6/parameters/disable")
		if err != nil {
			if m.sharedCfg.EnableIPv6 {
				return fmt.Errorf(
					"IPv6 is enabled but IPv6 kernel support probing failed with: %w", err)
			}
			m.logger.Warn(
				"Unable to read /sys/module/ipv6/parameters/disable, disabling IPv6 iptables support",
				logfields.Error, err,
			)
			m.haveIp6tables = false
		} else if strings.TrimSuffix(string(ipv6Disabled), "\n") == "1" {
			m.logger.Debug(
				"Kernel does not support IPv6, disabling IPv6 iptables support")
			m.haveIp6tables = false
		}
	}

	if err := m.ip4tables.runProg([]string{"-t", "mangle", "-L", "-m", "socket", "-n"}); err != nil {
		if m.sharedCfg.InstallIptRules {
			m.logger.Warn("iptables match socket is not available (try installing xt_socket kernel module)", logfields.Error, err)
		}
		if !m.sharedCfg.TunnelingEnabled {
			// xt_socket module is needed to circumvent an explicit drop in ip_forward()
			// logic for packets for which a local socket is found by ip early
			// demux. xt_socket performs a local socket match and sets an skb mark on
			// match, which will divert the packet to the local stack using our policy
			// routing rule, thus avoiding being processed by ip_forward() at all.
			//
			// If xt_socket module does not exist we can disable ip early demux to to
			// avoid the explicit drop in ip_forward(). This is not needed in tunneling
			// modes, as then we'll set the skb mark in the bpf logic before the policy
			// routing stage so that the packet is routed locally instead of being
			// forwarded by ip_forward().
			//
			// We would not need the xt_socket at all if the datapath universally would
			// set the "to proxy" skb mark bits on before the packet hits policy routing
			// stage. Currently this is not true for endpoint routing modes.
			if m.sharedCfg.EnableXTSocketFallback {
				m.disableIPEarlyDemux()
			}
		}
	} else {
		m.haveSocketMatch = true
	}
	m.haveBPFSocketAssign = m.sharedCfg.EnableBPFTProxy

	return nil
}

func (m *Manager) Stop(ctx cell.HookContext) error {
	close(m.reconcilerParams.proxies)
	close(m.reconcilerParams.addNoTrackPod)
	close(m.reconcilerParams.delNoTrackPod)
	close(m.reconcilerParams.addNoTrackHostPorts)
	close(m.reconcilerParams.delNoTrackHostPorts)
	return nil
}

func (m *Manager) disableIPEarlyDemux() {
	if m.ipEarlyDemuxDisabled {
		return
	}

	disabled := m.sysctl.Disable([]string{"net", "ipv4", "ip_early_demux"}) == nil
	if disabled {
		m.ipEarlyDemuxDisabled = true
		m.logger.Info("Disabled ip_early_demux to allow proxy redirection with original source/destination address without xt_socket support also in non-tunneled datapath modes.")
	} else {
		m.logger.Warn("Could not disable ip_early_demux, traffic redirected due to an HTTP policy or visibility may be dropped unexpectedly")
	}
}

// SupportsOriginalSourceAddr tells if an L7 proxy can use POD's original source address and port in
// the upstream connection to allow the destination to properly derive the source security ID from
// the source IP address.
func (m *Manager) SupportsOriginalSourceAddr() bool {
	// Original source address use works if xt_socket match is supported, or if ip early demux
	// is disabled
	return m.haveSocketMatch || m.ipEarlyDemuxDisabled
}

// removeRules removes iptables rules installed by Cilium.
func (m *Manager) removeRules(prefix string) error {
	// Set of tables that have had iptables rules in any Cilium version
	tables := []string{"nat", "mangle", "raw", "filter"}
	for _, t := range tables {
		if err := m.removeCiliumRules(t, m.ip4tables, prefix+"CILIUM_"); err != nil {
			return err
		}

		if m.haveIp6tables {
			if err := m.removeCiliumRules(t, m.ip6tables, prefix+"CILIUM_"); err != nil {
				return err
			}
		}
	}

	for _, c := range ciliumChains {
		c.name = prefix + c.name
		if err := c.remove(true, m.haveIp6tables, m.ip4tables, m.ip6tables); err != nil {
			return err
		}
	}

	return nil
}

// renameChains renames iptables chains installed by Cilium.
func (m *Manager) renameChains(prefix string) error {
	for _, c := range ciliumChains {
		if err := c.rename(true, m.haveIp6tables, prefix+c.name, m.ip4tables, m.ip6tables); err != nil {
			return err
		}
	}

	return nil
}

func (m *Manager) inboundProxyRedirectRule(cmd string) []string {
	// Mark host proxy transparent connections to be routed to the local stack.
	// This comes before the TPROXY rules in the chain, and setting the mark
	// without the proxy port number will make the TPROXY rule to not match,
	// as we do not want to try to tproxy packets that are going to the stack
	// already.
	// This rule is needed for couple of reasons:
	// 1. route return traffic to the proxy
	// 2. route original direction traffic that would otherwise be intercepted
	//    by ip_early_demux
	// Explicitly support chaining Envoy listeners via the loopback device by
	// excluding traffic for the loopback device.
	toProxyMark := fmt.Sprintf("%#08x", linux_defaults.MagicMarkIsToProxy)
	matchFromIPSecEncrypt := fmt.Sprintf("%#08x/%#08x", linux_defaults.RouteMarkEncrypt, linux_defaults.RouteMarkMask)
	matchSkipTProxy := fmt.Sprintf("%#08x/%#08x", linux_defaults.MarkSkipTProxy, linux_defaults.RouteMarkMask)
	return []string{
		"-t", "mangle",
		cmd, ciliumPreMangleChain,
		"-m", "socket", "--transparent",
		"!", "-o", "lo",
		"-m", "mark", "!", "--mark", matchFromIPSecEncrypt,
		"-m", "mark", "!", "--mark", matchSkipTProxy,
		"-m", "comment", "--comment", "cilium: any->pod redirect proxied traffic to host proxy",
		"-j", "MARK",
		"--set-mark", toProxyMark}
}

func (m *Manager) iptProxyRule(rules string, prog runnable, l4proto, ip string, proxyPort uint16, name string) error {
	// Match
	port := uint32(byteorder.HostToNetwork16(proxyPort)) << 16
	markMatch := fmt.Sprintf("%#x", linux_defaults.MagicMarkIsToProxy|port)
	// TPROXY params
	tProxyMark := fmt.Sprintf("%#x", linux_defaults.MagicMarkIsToProxy)
	tProxyPort := fmt.Sprintf("%d", proxyPort)

	existingRuleRegex := regexp.MustCompile(fmt.Sprintf("-A CILIUM_PRE_mangle -p %s -m mark --mark %s.*--on-ip %s", l4proto, markMatch, ip))
	if existingRuleRegex.MatchString(rules) {
		return nil
	}

	// NOTE: Proxy port restoration depends on the comment string below, see doGetProxyPorts()
	rule := []string{
		"-t", "mangle",
		"-A", ciliumPreMangleChain,
		"-p", l4proto,
		"-m", "mark", "--mark", markMatch,
		"-m", "comment", "--comment", "cilium: TPROXY to host " + name + " proxy",
		"-j", "TPROXY",
		"--tproxy-mark", tProxyMark,
		"--on-ip", ip,
		"--on-port", tProxyPort,
	}
	return prog.runProg(rule)
}

// addCiliumTunnelRules adds the iptables rules for the tunnel (node to node) traffic.
// here we have both:
// - a NOTRACK rule (to avoid creating ct entry in the kernel for tunnel traffic)
// - an ACCEPT rule in the output chain in the filter table (to explicitly allow it to pass)
func (m *Manager) addCiliumTunnelRules() (err error) {
	port := m.sharedCfg.TunnelPort

	if !m.sharedCfg.TunnelingEnabled || port == 0 {
		return nil
	}

	if err := m.addCiliumAcceptTunnelRules(port); err != nil {
		return err
	}

	return m.installTunnelNoTrackRules(port)
}

// addCiliumAcceptTunnelRules adds the ACCEPT rule in the cilium output chain
// for udp destination port at `tunnelPort`.
func (m *Manager) addCiliumAcceptTunnelRules(tunelPort uint16) (err error) {
	cmd := []string{
		"-t", "filter",
		"-A", ciliumOutputChain,
		"-p", "udp",
		"--dport", strconv.Itoa(int(tunelPort)),
		"-m", "comment", "--comment", "cilium: ACCEPT for tunnel traffic",
		"-j", "ACCEPT",
	}

	if m.sharedCfg.EnableIPv4 {
		if err := m.ip4tables.runProg(cmd); err != nil {
			return err
		}
	}

	if m.sharedCfg.EnableIPv6 {
		if err := m.ip6tables.runProg(cmd); err != nil {
			return err
		}
	}

	return nil
}

// addCiliumAcceptTunnelRules adds the NOTRACK rule in the cilium raw prerouting
// and output raw chains for udp destination port at `tunnelPort`.
func (m *Manager) installTunnelNoTrackRules(tunelPort uint16) error {
	input := []string{
		"-t", "raw",
		"-A", ciliumPreRawChain,
		"-p", "udp",
		"--dport", strconv.Itoa(int(tunelPort)),
		"-m", "comment", "--comment", "cilium: NOTRACK for tunnel traffic",
		"-j", "CT", "--notrack",
	}

	output := []string{
		"-t", "raw",
		"-A", ciliumOutputRawChain,
		"-p", "udp",
		"--dport", strconv.Itoa(int(tunelPort)),
		"-m", "comment", "--comment", "cilium: NOTRACK for tunnel traffic",
		"-j", "CT", "--notrack",
	}

	if m.sharedCfg.EnableIPv4 {
		if err := m.ip4tables.runProg(input); err != nil {
			return err
		}
		if err := m.ip4tables.runProg(output); err != nil {
			return err
		}
	}

	if m.sharedCfg.EnableIPv6 {
		if err := m.ip6tables.runProg(input); err != nil {
			return err
		}
		if err := m.ip6tables.runProg(output); err != nil {
			return err
		}
	}

	return nil
}

func (m *Manager) installStaticProxyRules() error {
	// match traffic to a proxy (upper 16 bits has the proxy port, which is masked out)
	matchToProxy := fmt.Sprintf("%#08x/%#08x", linux_defaults.MagicMarkIsToProxy, linux_defaults.MagicMarkHostMask)
	// proxy return traffic has 0 ID in the mask
	matchProxyReply := fmt.Sprintf("%#08x/%#08x", linux_defaults.MagicMarkIsProxy, linux_defaults.MagicMarkProxyNoIDMask)
	// proxy forward traffic
	matchProxyForward := fmt.Sprintf("%#08x/%#08x", linux_defaults.MagicMarkEgress, linux_defaults.MagicMarkHostMask)
	// L7 proxy upstream return traffic has Endpoint ID in the mask
	matchL7ProxyUpstream := fmt.Sprintf("%#08x/%#08x", linux_defaults.MagicMarkIsProxyEPID, linux_defaults.MagicMarkProxyMask)
	// match traffic from a proxy (either in forward or in return direction)
	matchFromProxy := fmt.Sprintf("%#08x/%#08x", linux_defaults.MagicMarkIsProxy, linux_defaults.MagicMarkProxyMask)

	if m.sharedCfg.EnableIPv4 {
		// No conntrack for traffic to proxy
		if err := m.ip4tables.runProg([]string{
			"-t", "raw",
			"-A", ciliumPreRawChain,
			"-m", "mark", "--mark", matchToProxy,
			"-m", "comment", "--comment", "cilium: NOTRACK for proxy traffic",
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}

		// Explicit ACCEPT for the proxy traffic. Needed when the INPUT defaults to DROP.
		// Matching needs to be the same as for the NOTRACK rule above.
		if err := m.ip4tables.runProg([]string{
			"-t", "filter",
			"-A", ciliumInputChain,
			"-m", "mark", "--mark", matchToProxy,
			"-m", "comment", "--comment", "cilium: ACCEPT for proxy traffic",
			"-j", "ACCEPT"}); err != nil {
			return err
		}

		// No conntrack for proxy return traffic that is heading to lxc+
		if err := m.ip4tables.runProg([]string{
			"-t", "raw",
			"-A", ciliumOutputRawChain,
			"-o", "lxc+",
			"-m", "mark", "--mark", matchProxyReply,
			"-m", "comment", "--comment", "cilium: NOTRACK for proxy return traffic",
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}

		// No conntrack for proxy return traffic that is heading to cilium_host
		if err := m.ip4tables.runProg([]string{
			"-t", "raw",
			"-A", ciliumOutputRawChain,
			"-o", defaults.HostDevice,
			"-m", "mark", "--mark", matchProxyReply,
			"-m", "comment", "--comment", "cilium: NOTRACK for proxy return traffic",
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}

		// No conntrack for proxy forward traffic that is heading to cilium_host
		if m.sharedCfg.EnableIPSec {
			if err := m.ip4tables.runProg([]string{
				"-t", "raw",
				"-A", ciliumOutputRawChain,
				"-o", defaults.HostDevice,
				"-m", "mark", "--mark", matchProxyForward,
				"-m", "comment", "--comment", "cilium: NOTRACK for proxy forward traffic",
				"-j", "CT", "--notrack"}); err != nil {
				return err
			}
		}

		// No conntrack for proxy upstream traffic that is heading to lxc+
		if err := m.ip4tables.runProg([]string{
			"-t", "raw",
			"-A", ciliumOutputRawChain,
			"-o", "lxc+",
			"-m", "mark", "--mark", matchL7ProxyUpstream,
			"-m", "comment", "--comment", "cilium: NOTRACK for L7 proxy upstream traffic",
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}

		// No conntrack for proxy upstream traffic that is heading to cilium_host
		if err := m.ip4tables.runProg([]string{
			"-t", "raw",
			"-A", ciliumOutputRawChain,
			"-o", defaults.HostDevice,
			"-m", "mark", "--mark", matchL7ProxyUpstream,
			"-m", "comment", "--comment", "cilium: NOTRACK for L7 proxy upstream traffic",
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}

		// Explicit ACCEPT for the proxy return traffic. Needed when the OUTPUT defaults to DROP.
		// Matching needs to be the same as for the NOTRACK rule above.
		if err := m.ip4tables.runProg([]string{
			"-t", "filter",
			"-A", ciliumOutputChain,
			"-m", "mark", "--mark", matchFromProxy,
			"-m", "comment", "--comment", "cilium: ACCEPT for proxy traffic",
			"-j", "ACCEPT"}); err != nil {
			return err
		}

		// Explicit ACCEPT for the l7 proxy upstream traffic. Needed when the OUTPUT defaults to DROP.
		// TODO: See if this is really needed. We do not have an ACCEPT for normal proxy upstream traffic.
		if err := m.ip4tables.runProg([]string{
			"-t", "filter",
			"-A", ciliumOutputChain,
			"-m", "mark", "--mark", matchL7ProxyUpstream,
			"-m", "comment", "--comment", "cilium: ACCEPT for l7 proxy upstream traffic",
			"-j", "ACCEPT"}); err != nil {
			return err
		}

		if m.haveSocketMatch {
			// Direct inbound TPROXYed traffic towards the socket
			if err := m.ip4tables.runProg(m.inboundProxyRedirectRule("-A")); err != nil {
				return err
			}
		}
	}

	if m.sharedCfg.EnableIPv6 {
		// No conntrack for traffic to ingress proxy
		if err := m.ip6tables.runProg([]string{
			"-t", "raw",
			"-A", ciliumPreRawChain,
			"-m", "mark", "--mark", matchToProxy,
			"-m", "comment", "--comment", "cilium: NOTRACK for proxy traffic",
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}

		// Explicit ACCEPT for the proxy traffic. Needed when the INPUT defaults to DROP.
		// Matching needs to be the same as for the NOTRACK rule above.
		if err := m.ip6tables.runProg([]string{
			"-t", "filter",
			"-A", ciliumInputChain,
			"-m", "mark", "--mark", matchToProxy,
			"-m", "comment", "--comment", "cilium: ACCEPT for proxy traffic",
			"-j", "ACCEPT"}); err != nil {
			return err
		}

		// No conntrack for proxy return traffic that is heading to lxc+
		if err := m.ip6tables.runProg([]string{
			"-t", "raw",
			"-A", ciliumOutputRawChain,
			"-o", "lxc+",
			"-m", "mark", "--mark", matchProxyReply,
			"-m", "comment", "--comment", "cilium: NOTRACK for proxy return traffic",
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}

		// No conntrack for proxy return traffic that is heading to cilium_host
		if err := m.ip6tables.runProg([]string{
			"-t", "raw",
			"-A", ciliumOutputRawChain,
			"-o", defaults.HostDevice,
			"-m", "mark", "--mark", matchProxyReply,
			"-m", "comment", "--comment", "cilium: NOTRACK for proxy return traffic",
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}

		// No conntrack for proxy forward traffic that is heading to cilium_host
		if m.sharedCfg.EnableIPSec {
			if err := m.ip6tables.runProg([]string{
				"-t", "raw",
				"-A", ciliumOutputRawChain,
				"-o", defaults.HostDevice,
				"-m", "mark", "--mark", matchProxyForward,
				"-m", "comment", "--comment", "cilium: NOTRACK for proxy forward traffic",
				"-j", "CT", "--notrack"}); err != nil {
				return err
			}
		}

		// No conntrack for proxy upstream traffic that is heading to lxc+
		if err := m.ip6tables.runProg([]string{
			"-t", "raw",
			"-A", ciliumOutputRawChain,
			"-o", "lxc+",
			"-m", "mark", "--mark", matchL7ProxyUpstream,
			"-m", "comment", "--comment", "cilium: NOTRACK for L7 proxy upstream traffic",
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}

		// No conntrack for proxy upstream traffic that is heading to cilium_host
		if err := m.ip6tables.runProg([]string{
			"-t", "raw",
			"-A", ciliumOutputRawChain,
			"-o", defaults.HostDevice,
			"-m", "mark", "--mark", matchL7ProxyUpstream,
			"-m", "comment", "--comment", "cilium: NOTRACK for L7 proxy upstream traffic",
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}

		// Explicit ACCEPT for the proxy return traffic. Needed when the OUTPUT defaults to DROP.
		// Matching needs to be the same as for the NOTRACK rule above.
		if err := m.ip6tables.runProg([]string{
			"-t", "filter",
			"-A", ciliumOutputChain,
			"-m", "mark", "--mark", matchFromProxy,
			"-m", "comment", "--comment", "cilium: ACCEPT for proxy traffic",
			"-j", "ACCEPT"}); err != nil {
			return err
		}

		if m.haveSocketMatch {
			// Direct inbound TPROXYed traffic towards the socket
			if err := m.ip6tables.runProg(m.inboundProxyRedirectRule("-A")); err != nil {
				return err
			}
		}
	}

	return nil
}

func (m *Manager) doCopyProxyRules(prog iptablesInterface, table string, re *regexp.Regexp, match, oldChain, newChain string) error {
	rules, err := prog.runProgOutput([]string{"-t", table, "-S"})
	if err != nil {
		return err
	}

	scanner := bufio.NewScanner(strings.NewReader(rules))
	for scanner.Scan() {
		rule := scanner.Text()
		if !re.MatchString(rule) || !strings.Contains(rule, match) {
			continue
		}

		args, err := shellwords.Parse(strings.Replace(rule, oldChain, newChain, 1))
		if err != nil {
			m.logger.Warn(
				"Unable to parse TPROXY rule, disruption to traffic selected by L7 policy possible",
				logfields.Error, err,
				logfields.Object, rule,
				logfields.Table, table,
				logfields.Prog, prog.getProg(),
			)
			continue
		}

		copyRule := append([]string{"-t", table}, args...)
		if err := prog.runProg(copyRule); err != nil {
			return err
		}
	}

	return nil
}

var tproxyMatch = regexp.MustCompile("CILIUM_PRE_mangle .*cilium: TPROXY")

// copies old proxy rules
func (m *Manager) copyProxyRules(oldChain string, match string) error {
	if m.sharedCfg.EnableIPv4 {
		if err := m.doCopyProxyRules(m.ip4tables, "mangle", tproxyMatch, match, oldChain, ciliumPreMangleChain); err != nil {
			return err
		}
	}

	if m.sharedCfg.EnableIPv6 {
		if err := m.doCopyProxyRules(m.ip6tables, "mangle", tproxyMatch, match, oldChain, ciliumPreMangleChain); err != nil {
			return err
		}
	}

	return nil
}

// Redirect packets to the host proxy via TPROXY, as directed by the Cilium
// datapath bpf programs via skb marks.
func (m *Manager) addProxyRules(prog runnable, ip string, proxyPort uint16, name string) error {
	rules, err := prog.runProgOutput([]string{"-t", "mangle", "-S"})
	if err != nil {
		return err
	}

	for _, proto := range []string{"tcp", "udp"} {
		if err := m.iptProxyRule(rules, prog, proto, ip, proxyPort, name); err != nil {
			return err
		}
	}

	// Delete all other rules for this same proxy name
	// These may accumulate if there is a bind failure on a previously used port
	portAndIPMatch := fmt.Sprintf("TPROXY --on-port %d --on-ip %s ", proxyPort, ip)
	scanner := bufio.NewScanner(strings.NewReader(rules))
	for scanner.Scan() {
		rule := scanner.Text()
		if !strings.Contains(rule, "-A CILIUM_PRE_mangle ") || !strings.Contains(rule, "cilium: TPROXY to host "+name+" proxy") || strings.Contains(rule, portAndIPMatch) {
			continue
		}

		args, err := shellwords.Parse(strings.Replace(rule, "-A", "-D", 1))
		if err != nil {
			m.logger.Warn(
				"Unable to parse TPROXY rule",
				logfields.Error, err,
				logfields.Prog, prog,
				logfields.Object, rule,
			)
			continue
		}

		deleteRule := append([]string{"-t", "mangle"}, args...)
		if err := prog.runProg(deleteRule); err != nil {
			return err
		}
	}

	return nil
}

func (m *Manager) endpointNoTrackRules(prog runnable, cmd string, IP string, port *lb.L4Addr) error {
	var err error

	protocol := strings.ToLower(port.Protocol)
	p := strconv.FormatUint(uint64(port.Port), 10)

	// currently the only use case for this is node-local-dns
	// with LRP, node-local-dns should be deployed as a non-host-namespaced
	// pod and  we want to skip kernel conntrack for any traffic between the
	// application pod and the node-local-dns pod
	// There are 4 types of packets that we want to skip conntrack:
	// 1. From a non-host pod to the node-local-dns pod
	// 2. From the node-local-dns pod to a non-host pod
	// 3. From a hostNetwork pod to the node-local-dns pod
	// 4. From the node-local-dns pod to a hostNetwork pod

	// 1. The following 2 rules cover packets from non-host pod to node-local-dns
	if err = prog.runProg([]string{
		"-t", "raw",
		cmd, ciliumPreRawChain,
		"-p", protocol,
		"-d", IP,
		"--dport", p,
		"-j", "CT",
		"--notrack"}); err != nil {
		m.logger.Warn("Failed to enforce endpoint notrack", logfields.Error, err)
	}
	if err = prog.runProg([]string{
		"-t", "filter",
		cmd, ciliumForwardChain,
		"-p", protocol,
		"-d", IP,
		"--dport",
		p, "-j",
		"ACCEPT"}); err != nil {
		m.logger.Warn("Failed to enforce endpoint notrack", logfields.Error, err)
	}

	// 2. The following 2 rules cover packets from node-local-dns to
	// non-host pod
	if err = prog.runProg([]string{
		"-t", "raw",
		cmd, ciliumPreRawChain,
		"-p", protocol,
		"-s", IP,
		"--sport", p,
		"-j", "CT",
		"--notrack"}); err != nil {
		m.logger.Warn("Failed to enforce endpoint notrack", logfields.Error, err)
	}
	if err = prog.runProg([]string{
		"-t", "filter",
		cmd, ciliumForwardChain,
		"-p", protocol,
		"-s", IP,
		"--sport",
		p, "-j",
		"ACCEPT"}); err != nil {
		m.logger.Warn("Failed to enforce endpoint notrack", logfields.Error, err)
	}

	// 3. The following 2 rules cover packets from host namespaced pod to
	// node-local-dns
	if err = prog.runProg([]string{
		"-t", "raw",
		cmd, ciliumOutputRawChain,
		"-p", protocol,
		"-d", IP,
		"--dport", p,
		"-j", "CT",
		"--notrack"}); err != nil {
		m.logger.Warn("Failed to enforce endpoint notrack", logfields.Error, err)
	}
	if err = prog.runProg([]string{
		"-t", "filter",
		cmd, ciliumOutputChain,
		"-p", protocol,
		"-d", IP,
		"--dport", p,
		"-j", "ACCEPT"}); err != nil {
		m.logger.Warn("Failed to enforce endpoint notrack", logfields.Error, err)
	}

	// 4. The following rule (and the prerouting rule in case 2)
	// covers packets from node-local-dns to host namespaced pod
	if err = prog.runProg([]string{
		"-t", "filter",
		cmd, ciliumInputChain,
		"-p", protocol,
		"-s", IP,
		"--sport",
		p, "-j",
		"ACCEPT"}); err != nil {
		m.logger.Warn("Failed to enforce endpoint notrack", logfields.Error, err)
	}

	// The following rules are kept for compatibility with host-namespaced
	// node-local-dns if user already deploys in the legacy mode without
	// LRP.
	if err = prog.runProg([]string{
		"-t", "raw",
		cmd, ciliumOutputRawChain,
		"-p", protocol,
		"-s", IP,
		"--sport", p,
		"-j", "CT",
		"--notrack"}); err != nil {
		m.logger.Warn("Failed to enforce endpoint notrack", logfields.Error, err)
	}
	if err = prog.runProg([]string{
		"-t", "filter",
		cmd, ciliumOutputChain,
		"-p", protocol,
		"-s", IP,
		"--sport", p,
		"-j", "ACCEPT"}); err != nil {
		m.logger.Warn("Failed to enforce endpoint notrack", logfields.Error, err)
	}
	if err = prog.runProg([]string{
		"-t", "filter",
		cmd, ciliumInputChain,
		"-p", protocol,
		"-d", IP,
		"--dport",
		p, "-j",
		"ACCEPT"}); err != nil {
		m.logger.Warn("Failed to enforce endpoint notrack", logfields.Error, err)
	}
	return err
}

// InstallNoTrackRules is explicitly called when a pod has valid "policy.cilium.io/no-track-port" annotation.
// When InstallNoConntrackIptRules flag is set, a super set of v4 NOTRACK rules will be automatically
// installed upon agent bootstrap (via function addNoTrackPodTrafficRules) and this function will be skipped.
// When InstallNoConntrackIptRules is not set, this function will be executed to install NOTRACK rules.
// The rules installed by this function is very specific, for now, the only user is node-local-dns pods.
func (m *Manager) InstallNoTrackRules(ip netip.Addr, port uint16) {
	if m.skipPodTrafficConntrack(ip) {
		return
	}

	reconciled := make(chan struct{})
	m.reconcilerParams.addNoTrackPod <- reconciliationRequest[noTrackPodInfo]{noTrackPodInfo{ip, port}, reconciled}
	<-reconciled
}

// See comments for InstallNoTrackRules.
func (m *Manager) RemoveNoTrackRules(ip netip.Addr, port uint16) {
	if m.skipPodTrafficConntrack(ip) {
		return
	}

	reconciled := make(chan struct{})
	m.reconcilerParams.delNoTrackPod <- reconciliationRequest[noTrackPodInfo]{noTrackPodInfo{ip, port}, reconciled}
	<-reconciled
}

func (m *Manager) AddNoTrackHostPorts(namespace, name string, ports []string) {
	if !m.sharedCfg.InstallNoConntrackIptRules {
		return
	}

	podName := podAndNameSpace{podName: name, namespace: namespace}

	reconciled := make(chan struct{})
	m.reconcilerParams.addNoTrackHostPorts <- reconciliationRequest[noTrackHostPortsPodInfo]{noTrackHostPortsPodInfo{podName, ports}, reconciled}
	<-reconciled
}

func (m *Manager) RemoveNoTrackHostPorts(namespace, name string) {
	if !m.sharedCfg.InstallNoConntrackIptRules {
		return
	}

	podName := podAndNameSpace{podName: name, namespace: namespace}

	reconciled := make(chan struct{})
	m.reconcilerParams.delNoTrackHostPorts <- reconciliationRequest[podAndNameSpace]{podName, reconciled}
	<-reconciled
}

func (m *Manager) InstallProxyRules(proxyPort uint16, name string) {
	reconciled := make(chan struct{})
	m.reconcilerParams.proxies <- reconciliationRequest[proxyInfo]{proxyInfo{name, proxyPort}, reconciled}
	<-reconciled
}

func (m *Manager) doInstallProxyRules(proxyPort uint16, name string) error {
	// We could fail if netfilter was compiled out from the kernel, so bail
	// out without error in this case, too.
	if m.haveBPFSocketAssign || !m.sharedCfg.InstallIptRules {
		return nil
	}

	if m.sharedCfg.EnableIPv4 {
		if err := m.addProxyRules(m.ip4tables, "127.0.0.1", proxyPort, name); err != nil {
			return err
		}
	}
	if m.sharedCfg.EnableIPv6 {
		if err := m.addProxyRules(m.ip6tables, "::1", proxyPort, name); err != nil {
			return err
		}
	}

	return nil
}

// GetProxyPorts enumerates all existing TPROXY rules in the datapath installed earlier with
// InstallProxyRules and returns all proxy ports found.
func (m *Manager) GetProxyPorts() map[string]uint16 {
	prog := m.ip4tables
	if !m.sharedCfg.EnableIPv4 {
		prog = m.ip6tables
	}

	return m.doGetProxyPorts(prog)
}

func (m *Manager) doGetProxyPorts(prog iptablesInterface) map[string]uint16 {
	portMap := make(map[string]uint16)

	m.lock.Lock()
	defer m.lock.Unlock()

	rules, err := prog.runProgOutput([]string{"-t", "mangle", "-n", "-L", ciliumPreMangleChain})
	if err != nil {
		return portMap
	}

	re := regexp.MustCompile(
		"/\\* cilium: TPROXY to host ([^ ]+) proxy \\*/ TPROXY redirect " +
			"(0.0.0.0|" + ipfamily.IPv4().Localhost +
			"|::|" + ipfamily.IPv6().Localhost + ")" +
			":([1-9][0-9]*) mark",
	)
	strs := re.FindAllString(rules, -1)
	for _, str := range strs {
		// Pick the name and port number from each match
		name := re.ReplaceAllString(str, "$1")
		portStr := re.ReplaceAllString(str, "$3")
		portUInt64, err := strconv.ParseUint(portStr, 10, 16)
		if err == nil {
			portMap[name] = uint16(portUInt64)
		}
	}
	return portMap
}

func (m *Manager) getDeliveryInterface(ifName string) string {
	switch {
	case m.sharedCfg.EnableEndpointRoutes:
		// aws-cni creates container interfaces with names like eni621c0fc8425.
		if m.cniConfigManager.GetChainingMode() == "aws-cni" {
			return "eni+"
		}
		return "lxc+"

	case m.sharedCfg.IPAM == ipamOption.IPAMENI ||
		m.sharedCfg.IPAM == ipamOption.IPAMAlibabaCloud:
		return "lxc+"

	default:
		return ifName
	}
}

func (m *Manager) installForwardChainRules(ifName, localDeliveryInterface, forwardChain string) error {
	if m.sharedCfg.EnableIPv4 {
		if err := m.installForwardChainRulesIpX(m.ip4tables, ifName, localDeliveryInterface, forwardChain); err != nil {
			return err
		}
	}
	if m.sharedCfg.EnableIPv6 {
		return m.installForwardChainRulesIpX(m.ip6tables, ifName, localDeliveryInterface, forwardChain)
	}

	return nil
}

func (m *Manager) installForwardChainRulesIpX(prog runnable, ifName, localDeliveryInterface, forwardChain string) error {
	// While kube-proxy does change the policy of the iptables FORWARD chain
	// it doesn't seem to handle all cases, e.g. host network pods that use
	// the node IP which would still end up in default DENY. Similarly, for
	// plain Docker setup, we would otherwise hit default DENY in FORWARD chain.
	// Also, k8s 1.15 introduced "-m conntrack --ctstate INVALID -j DROP" which
	// in the direct routing case can drop EP replies.
	//
	// Therefore, add the rules below to avoid having a user to manually opt-in.
	// See also: https://github.com/kubernetes/kubernetes/issues/39823
	// In here can only be basic ACCEPT rules, nothing more complicated.
	//
	// The 2nd and 3rd rule are for the case of nodeport traffic where the backend is
	// remote. The traffic flow in FORWARD is as follows:
	//
	//  - Node serving nodeport request:
	//      IN=eno1 OUT=cilium_host
	//      IN=cilium_host OUT=eno1
	//
	//  - Node running backend:
	//       IN=eno1 OUT=cilium_host
	//       IN=lxc... OUT=eno1
	if err := prog.runProg([]string{
		"-A", forwardChain,
		"-o", ifName,
		"-m", "comment", "--comment", "cilium: any->cluster on " + ifName + " forward accept",
		"-j", "ACCEPT"}); err != nil {
		return err
	}
	if err := prog.runProg([]string{
		"-A", forwardChain,
		"-i", ifName,
		"-m", "comment", "--comment", "cilium: cluster->any on " + ifName + " forward accept (nodeport)",
		"-j", "ACCEPT"}); err != nil {
		return err
	}
	if err := prog.runProg([]string{
		"-A", forwardChain,
		"-i", "lxc+",
		"-m", "comment", "--comment", "cilium: cluster->any on lxc+ forward accept",
		"-j", "ACCEPT"}); err != nil {
		return err
	}
	// Proxy return traffic to a remote source needs '-i cilium_net'.
	if ifName == defaults.HostDevice {
		ifPeerName := defaults.SecondHostDevice
		if err := prog.runProg([]string{
			"-A", forwardChain,
			"-i", ifPeerName,
			"-m", "comment", "--comment", "cilium: cluster->any on " + ifPeerName + " forward accept (nodeport)",
			"-j", "ACCEPT"}); err != nil {
			return err
		}
	}
	// In case the delivery interface and the host interface are not the
	// same (enable-endpoint-routes), a separate set of rules to allow
	// from/to delivery interface is required.
	if localDeliveryInterface != ifName {
		if err := prog.runProg([]string{
			"-A", forwardChain,
			"-o", localDeliveryInterface,
			"-m", "comment", "--comment", "cilium: any->cluster on " + localDeliveryInterface + " forward accept",
			"-j", "ACCEPT"}); err != nil {
			return err
		}
		if err := prog.runProg([]string{
			"-A", forwardChain,
			"-i", localDeliveryInterface,
			"-m", "comment", "--comment", "cilium: cluster->any on " + localDeliveryInterface + " forward accept (nodeport)",
			"-j", "ACCEPT"}); err != nil {
			return err
		}
	}
	return nil
}

func (m *Manager) installMasqueradeRules(
	prog iptablesInterface, nativeDevices []string,
	localDeliveryInterface, snatDstExclusionCIDR, allocRange, hostMasqueradeIP string,
) error {
	devices := nativeDevices

	if m.sharedCfg.NodeIpsetNeeded {
		cmds := nodeIpsetNATCmds(allocRange, prog.getIpset(), m.sharedCfg.MasqueradeInterfaces)
		for _, cmd := range cmds {
			if err := prog.runProg(cmd); err != nil {
				return err
			}
		}
	}

	// Masquerade egress traffic leaving the node based on source routing
	//
	// If this option is enabled, then it takes precedence over the catch-all
	// MASQUERADE further below.
	if m.sharedCfg.EnableMasqueradeRouteSource {
		var defaultRoutes []netlink.Route

		if len(m.sharedCfg.MasqueradeInterfaces) > 0 {
			devices = m.sharedCfg.MasqueradeInterfaces
		}
		family := netlink.FAMILY_V4
		if prog == m.ip6tables {
			family = netlink.FAMILY_V6
		}
		initialPass := true
		if routes, err := safenetlink.RouteList(nil, family); err == nil {
		nextPass:
			for _, r := range routes {
				var link netlink.Link
				match := false
				if r.LinkIndex > 0 {
					link, err = netlink.LinkByIndex(r.LinkIndex)
					if err != nil {
						continue
					}
					// Routes are dedicated to the specific interface, so we
					// need to install the SNAT rules also for that interface
					// via -o. If we cannot correlate to anything because no
					// devices were specified, we need to bail out.
					if len(devices) == 0 {
						return fmt.Errorf("cannot correlate source route device for generating masquerading rules")
					}
					for _, device := range devices {
						filter := tables.DeviceFilter{device}
						m, reverse := filter.Match(link.Attrs().Name)
						if m {
							match = !reverse
							break
						}
					}
				} else {
					// There might be next hop groups where ifindex is zero
					// and the underlying next hop devices might not be known
					// to Cilium. In this case, assume match and don't encode
					// -o device.
					match = true
				}
				_, exclusionCIDR, err := net.ParseCIDR(snatDstExclusionCIDR)
				if !match || r.Src == nil || (err == nil && cidr.Equal(r.Dst, exclusionCIDR)) {
					continue
				}
				if initialPass && cidr.Equal(r.Dst, cidr.ZeroNet(r.Family)) {
					defaultRoutes = append(defaultRoutes, r)
					continue
				}
				progArgs := []string{
					"-t", "nat",
					"-A", ciliumPostNatChain,
					"-s", allocRange,
				}
				if cidr.Equal(r.Dst, cidr.ZeroNet(r.Family)) {
					progArgs = append(
						progArgs,
						"!", "-d", snatDstExclusionCIDR)
				} else {
					progArgs = append(
						progArgs,
						"-d", r.Dst.String())
				}
				if link != nil {
					progArgs = append(
						progArgs,
						"-o", link.Attrs().Name)
				} else {
					progArgs = append(
						progArgs,
						"!", "-o", "cilium_+")
				}
				progArgs = append(
					progArgs,
					"-m", "comment", "--comment", "cilium snat non-cluster via source route",
					"-j", "SNAT",
					"--to-source", r.Src.String())
				if m.cfg.IPTablesRandomFully {
					progArgs = append(progArgs, "--random-fully")
				}
				if err := prog.runProg(progArgs); err != nil {
					return err
				}
			}
			if initialPass {
				initialPass = false
				routes = defaultRoutes
				goto nextPass
			}
		}
	} else {
		// Masquerade all egress traffic leaving the node (catch-all)
		//
		// This rule must be first as the node ipset rule as it has different
		// exclusion criteria than the other rules in this table.
		//
		// The following conditions must be met:
		// * May not leave on a cilium_ interface, this excludes all
		//   tunnel traffic
		// * Must originate from an IP in the local allocation range
		// * Must not be reply if BPF NodePort is enabled
		// * Tunnel mode:
		//   * May not be targeted to an IP in the local allocation
		//     range
		// * Non-tunnel mode:
		//   * May not be targeted to an IP in the cluster range
		cmds := allEgressMasqueradeCmds(allocRange, snatDstExclusionCIDR, m.sharedCfg.MasqueradeInterfaces,
			m.cfg.IPTablesRandomFully)
		for _, cmd := range cmds {
			if err := prog.runProg(cmd); err != nil {
				return err
			}
		}
	}

	// The following rule exclude traffic from the remaining rules in this chain.
	// If this rule matches, none of the remaining rules in this chain
	// are considered.

	// Exclude proxy return traffic from the masquarade rules.
	if err := prog.runProg([]string{
		"-t", "nat",
		"-A", ciliumPostNatChain,
		// Don't match proxy (return) traffic
		"-m", "mark", "--mark", fmt.Sprintf("%#08x/%#08x", linux_defaults.MagicMarkIsProxy, linux_defaults.MagicMarkProxyMask),
		"-m", "comment", "--comment", "exclude proxy return traffic from masquerade",
		"-j", "ACCEPT"}); err != nil {
		return err
	}

	if m.sharedCfg.TunnelingEnabled {
		// Masquerade all traffic from the host into the ifName
		// interface if the source is not in the node's pod CIDR.
		//
		// The following conditions must be met:
		// * Must be targeted for the ifName interface
		// * Must be targeted to an IP that is not local
		// * May not already be originating from the node's pod CIDR.
		if err := prog.runProg([]string{
			"-t", "nat",
			"-A", ciliumPostNatChain,
			"!", "-s", allocRange,
			"!", "-d", allocRange,
			"-o", defaults.HostDevice,
			"-m", "comment", "--comment", "cilium host->cluster masquerade",
			"-j", "SNAT", "--to-source", hostMasqueradeIP}); err != nil {
			return err
		}
	}

	loopbackAddr := "127.0.0.1"
	if prog == m.ip6tables {
		loopbackAddr = "::1"
	}

	// Masquerade all traffic from the host into local
	// endpoints if the source is 127.0.0.1. This is
	// required to force replies out of the endpoint's
	// network namespace.
	//
	// The following conditions must be met:
	// * Must be targeted for local endpoint
	// * Must be from 127.0.0.1
	if err := prog.runProg([]string{
		"-t", "nat",
		"-A", ciliumPostNatChain,
		"-s", loopbackAddr,
		"-o", localDeliveryInterface,
		"-m", "comment", "--comment", "cilium host->cluster from " + loopbackAddr + " masquerade",
		"-j", "SNAT", "--to-source", hostMasqueradeIP}); err != nil {
		return err
	}

	// Masquerade all traffic that originated from a local
	// pod and thus carries a security identity and that
	// was also DNAT'ed. It must be masqueraded to ensure
	// that reverse NAT can be performed. Otherwise the
	// reply traffic would be sent directly to the pod
	// without traversing the Linux stack again.
	//
	// This is only done if EnableEndpointRoutes is
	// disabled, if EnableEndpointRoutes is enabled, then
	// all traffic always passes through the stack anyway.
	//
	// This is required for:
	//  - portmap/host if both source and destination are
	//    on the same node
	//  - some proxy if source and server are on the same node
	if !m.sharedCfg.EnableEndpointRoutes {
		if err := prog.runProg([]string{
			"-t", "nat",
			"-A", ciliumPostNatChain,
			"-m", "mark", "--mark", fmt.Sprintf("%#08x/%#08x", linux_defaults.MagicMarkIdentity, linux_defaults.MagicMarkHostMask),
			"-o", localDeliveryInterface,
			"-m", "conntrack", "--ctstate", "DNAT",
			"-m", "comment", "--comment", "hairpin traffic that originated from a local pod",
			"-j", "SNAT", "--to-source", hostMasqueradeIP}); err != nil {
			return err
		}
	}

	return nil
}

func (m *Manager) installHostTrafficMarkRule(prog runnable) error {
	// Mark all packets sourced from processes running on the host with a
	// special marker so that we can differentiate traffic sourced locally
	// vs. traffic from the outside world that was masqueraded to appear
	// like it's from the host.
	//
	// Originally we set this mark only for traffic destined to the
	// ifName device, to ensure that any traffic directly reaching
	// to a Cilium-managed IP could be classified as from the host.
	//
	// However, there's another case where a local process attempts to
	// reach a service IP which is backed by a Cilium-managed pod. The
	// service implementation is outside of Cilium's control, for example,
	// handled by kube-proxy. We can tag even this traffic with a magic
	// mark, then when the service implementation proxies it back into
	// Cilium the BPF will see this mark and understand that the packet
	// originated from the host.
	matchFromIPSecDecrypt := fmt.Sprintf("%#08x/%#08x", linux_defaults.RouteMarkDecrypt, linux_defaults.RouteMarkMask)
	matchEncrypted := fmt.Sprintf("%#08x/%#08x", linux_defaults.RouteMarkEncrypt, linux_defaults.RouteMarkMask)
	matchOverlay := fmt.Sprintf("%#08x/%#08x", linux_defaults.MagicMarkOverlay, linux_defaults.MagicMarkHostMask)
	matchFromProxy := fmt.Sprintf("%#08x/%#08x", linux_defaults.MagicMarkIsProxy, linux_defaults.MagicMarkProxyMask)
	matchFromProxyEPID := fmt.Sprintf("%#08x/%#08x", linux_defaults.MagicMarkIsProxyEPID, linux_defaults.MagicMarkProxyMask)
	markAsFromHost := fmt.Sprintf("%#08x/%#08x", linux_defaults.MagicMarkHost, linux_defaults.MagicMarkHostMask)

	return prog.runProg([]string{
		"-t", "filter",
		"-A", ciliumOutputChain,
		"-m", "mark", "!", "--mark", matchFromIPSecDecrypt, // Don't match ipsec traffic
		"-m", "mark", "!", "--mark", matchEncrypted, // Don't match Cilium's encrypted traffic
		"-m", "mark", "!", "--mark", matchOverlay, // Don't match Cilium's overlay traffic
		"-m", "mark", "!", "--mark", matchFromProxy, // Don't match proxy traffic
		"-m", "mark", "!", "--mark", matchFromProxyEPID, // Don't match proxy traffic
		"-m", "comment", "--comment", "cilium: host->any mark as from host",
		"-j", "MARK", "--set-xmark", markAsFromHost})
}

func (m *Manager) doInstallRules(state desiredState, firstInit bool) error {
	m.lock.Lock()
	defer m.lock.Unlock()

	// Make sure we have no old "backups"
	if err := m.removeRules(oldCiliumPrefix); err != nil {
		// We could fail if netfilter was compiled out from the kernel
		if !state.installRules {
			return nil
		}
		return fmt.Errorf("failed to remove old backup rules: %w", err)
	}

	if err := m.renameChains(oldCiliumPrefix); err != nil {
		return fmt.Errorf("failed to rename chains: %w", err)
	}

	// install rules if needed
	if state.installRules {
		if err := m.installRules(state); err != nil {
			return fmt.Errorf("failed to install rules: %w", err)
		}

		// copy old proxy rules over at initialization
		if firstInit {
			if err := m.copyProxyRules(oldCiliumPrefix+ciliumPreMangleChain, "cilium-dns-egress"); err != nil {
				return fmt.Errorf("cannot copy old proxy rules, disruption to traffic selected by L7 policy possible: %w", err)
			}
		}

		for _, proxy := range state.proxies {
			if err := m.doInstallProxyRules(proxy.port, proxy.name); err != nil {
				return fmt.Errorf("cannot install proxy rules for %s: %w", proxy.name, err)
			}
		}
	}

	if err := m.removeRules(oldCiliumPrefix); err != nil {
		return fmt.Errorf("failed to remove old rules: %w", err)
	}

	return nil
}

// installRules installs iptables rules for Cilium in specific use-cases
// (most specifically, interaction with kube-proxy).
func (m *Manager) installRules(state desiredState) error {
	// Install new rules
	for _, c := range ciliumChains {
		if err := c.add(m.sharedCfg.EnableIPv4, m.sharedCfg.EnableIPv6, m.ip4tables, m.ip6tables); err != nil {
			// do not return error for chain creation that are linked to disabled feeder rules
			if isDisabledChain(m.cfg.DisableIptablesFeederRules, c.hook) {
				m.logger.Warn(
					fmt.Sprintf("ignoring creation of chain since feeder rules for %s is disabled", c.hook),
					logfields.Chain, c.name,
				)
				continue
			}

			return fmt.Errorf("cannot add custom chain %s: %w", c.name, err)
		}
	}

	if err := m.addCiliumTunnelRules(); err != nil {
		return fmt.Errorf("cannot install tunnel rules: %w", err)
	}

	if err := m.installStaticProxyRules(); err != nil {
		return fmt.Errorf("cannot install static proxy rules: %w", err)
	}

	if err := m.addCiliumAcceptEncryptionRules(); err != nil {
		return fmt.Errorf("cannot install encryption rules: %w", err)
	}

	localDeliveryInterface := m.getDeliveryInterface(defaults.HostDevice)

	if err := m.installForwardChainRules(defaults.HostDevice, localDeliveryInterface, ciliumForwardChain); err != nil {
		return fmt.Errorf("cannot install forward chain rules to %s: %w", ciliumForwardChain, err)
	}

	if m.sharedCfg.EnableIPv4 {
		if err := m.installHostTrafficMarkRule(m.ip4tables); err != nil {
			return fmt.Errorf("cannot install host traffic mark rule: %w", err)
		}

		if m.sharedCfg.IptablesMasqueradingIPv4Enabled && state.localNodeInfo.internalIPv4 != nil {
			if err := m.installMasqueradeRules(m.ip4tables, state.devices.UnsortedList(), localDeliveryInterface,
				m.remoteSNATDstAddrExclusionCIDR(state.localNodeInfo.ipv4NativeRoutingCIDR, state.localNodeInfo.ipv4AllocCIDR),
				state.localNodeInfo.ipv4AllocCIDR,
				state.localNodeInfo.internalIPv4.String(),
			); err != nil {
				return fmt.Errorf("cannot install masquerade rules: %w", err)
			}
		}
	}

	if m.sharedCfg.EnableIPv6 {
		if err := m.installHostTrafficMarkRule(m.ip6tables); err != nil {
			return fmt.Errorf("cannot install host traffic mark rule: %w", err)
		}

		if m.sharedCfg.IptablesMasqueradingIPv6Enabled && state.localNodeInfo.internalIPv6 != nil {
			if err := m.installMasqueradeRules(m.ip6tables, state.devices.UnsortedList(), localDeliveryInterface,
				m.remoteSNATDstAddrExclusionCIDR(state.localNodeInfo.ipv6NativeRoutingCIDR, state.localNodeInfo.ipv6AllocCIDR),
				state.localNodeInfo.ipv6AllocCIDR,
				state.localNodeInfo.internalIPv6.String(),
			); err != nil {
				return fmt.Errorf("cannot install masquerade rules: %w", err)
			}
		}
	}

	// AWS ENI requires to mark packets ingressing on the primary interface
	// and route them back the same way even if the pod responding is using
	// the IP of a different interface. Please see note in Reinitialize()
	// in pkg/datapath/loader for more details.
	if m.sharedCfg.IPAM == ipamOption.IPAMENI || m.sharedCfg.IPAM == ipamOption.IPAMAlibabaCloud {
		if err := m.addCiliumENIRules(); err != nil {
			return fmt.Errorf("cannot install rules for ENI multi-node NodePort: %w", err)
		}
	}

	if m.sharedCfg.EnableIPSec || m.sharedCfg.EnableWireguard {
		if err := m.addCiliumNoTrackEncryptionRules(); err != nil {
			return fmt.Errorf("cannot install encryption rules: %w", err)
		}
	}

	podsCIDR := state.localNodeInfo.ipv4NativeRoutingCIDR
	if m.sharedCfg.InstallNoConntrackIptRules && podsCIDR != "" {
		if err := m.addNoTrackPodTrafficRules(m.ip4tables, podsCIDR); err != nil {
			return fmt.Errorf("cannot install pod traffic no CT rules: %w", err)
		}
	}

	for noTrackPodInfo := range state.noTrackPods {
		if err := m.installNoTrackRules(noTrackPodInfo.ip, noTrackPodInfo.port); err != nil {
			return err
		}
	}

	noTrackPorts := groupL4AddrsByProto(state.noTrackHostPorts.flatten().AsSlice())
	for _, proto := range noTrackSupportedProtos {
		if ports, ok := noTrackPorts[proto]; ok && len(ports) > 0 {
			if err := m.installHostNoTrackRules(proto, ports); err != nil {
				return err
			}
		}

	}

	for _, c := range ciliumChains {
		// do not install feeder for chains that are set to be disabled
		if isDisabledChain(m.cfg.DisableIptablesFeederRules, c.hook) {
			m.logger.Info(
				"Skipping the install of feeder rule",
				logfields.Chain, c.hook,
			)
			continue
		}

		if err := c.installFeeder(m.sharedCfg.EnableIPv4, m.sharedCfg.EnableIPv6, m.cfg.PrependIptablesChains, m.ip4tables, m.ip6tables); err != nil {
			return fmt.Errorf("cannot install feeder rule: %w", err)
		}
	}

	return nil
}

func (m *Manager) remoteSNATDstAddrExclusionCIDR(nativeRoutingCIDR, allocCIDR string) string {
	if nativeRoutingCIDR != "" {
		// ip{v4,v6}-native-routing-cidr is set, so use it
		return nativeRoutingCIDR
	}

	return allocCIDR
}

func (m *Manager) ciliumNoTrackEncryptionRules(prog iptablesInterface, input string) error {
	matchDecrypt := fmt.Sprintf("%#08x/%#08x", linux_defaults.RouteMarkDecrypt, linux_defaults.RouteMarkMask)
	matchEncrypt := fmt.Sprintf("%#08x/%#08x", linux_defaults.RouteMarkEncrypt, linux_defaults.RouteMarkMask)

	for _, match := range []string{matchDecrypt, matchEncrypt} {
		if err := prog.runProg([]string{
			"-t", "raw", input, ciliumPreRawChain,
			"-m", "mark", "--mark", match,
			"-m", "comment", "--comment", encryptionDescription,
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}
	}

	for _, match := range []string{matchDecrypt, matchEncrypt} {
		if err := prog.runProg([]string{
			"-t", "raw", input, ciliumOutputRawChain,
			"-m", "mark", "--mark", match,
			"-m", "comment", "--comment", encryptionDescription,
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}
	}
	return nil
}

// Exclude crypto traffic from the filter and nat table rules.
// This avoids encryption bits and keyID, 0x*d00 for decryption
// and 0x*e00 for encryption, colliding with existing rules. Needed
// for kube-proxy for example.
func (m *Manager) addCiliumAcceptEncryptionRules() error {
	if !m.sharedCfg.EnableIPSec && !m.sharedCfg.EnableWireguard {
		return nil
	}

	insertAcceptEncrypt := func(ipt iptablesInterface, table, chain string) error {
		matchDecrypt := fmt.Sprintf("%#08x/%#08x", linux_defaults.RouteMarkDecrypt, linux_defaults.RouteMarkMask)
		matchEncrypt := fmt.Sprintf("%#08x/%#08x", linux_defaults.RouteMarkEncrypt, linux_defaults.RouteMarkMask)

		comment := "exclude encrypt/decrypt marks from " + table + " " + chain + " chain"

		if err := ipt.runProg([]string{
			"-t", table,
			"-A", chain,
			"-m", "mark", "--mark", matchEncrypt,
			"-m", "comment", "--comment", comment,
			"-j", "ACCEPT"}); err != nil {
			return err
		}

		return ipt.runProg([]string{
			"-t", table,
			"-A", chain,
			"-m", "mark", "--mark", matchDecrypt,
			"-m", "comment", "--comment", comment,
			"-j", "ACCEPT"})
	}

	for _, chain := range ciliumChains {
		switch chain.table {
		case "filter", "nat":
			if m.sharedCfg.EnableIPv4 {
				if err := insertAcceptEncrypt(m.ip4tables, chain.table, chain.name); err != nil {
					return err
				}
			}
			// ip6tables chain exists only if chain.ipv6 is true
			if m.sharedCfg.EnableIPv6 && chain.ipv6 {
				if err := insertAcceptEncrypt(m.ip6tables, chain.table, chain.name); err != nil {
					return err
				}
			}
		}
	}
	return nil
}

func (m *Manager) addCiliumNoTrackEncryptionRules() (err error) {
	if m.sharedCfg.EnableIPv4 {
		if err = m.ciliumNoTrackEncryptionRules(m.ip4tables, "-I"); err != nil {
			return
		}
	}
	if m.sharedCfg.EnableIPv6 {
		return m.ciliumNoTrackEncryptionRules(m.ip6tables, "-I")
	}
	return nil
}

func (m *Manager) installNoTrackRules(addr netip.Addr, port uint16) error {
	// Do not install per endpoint NOTRACK rules if we are already skipping
	// conntrack for all pod traffic.
	if m.skipPodTrafficConntrack(addr) {
		return nil
	}

	prog := m.ip4tables
	if addr.Is6() {
		prog = m.ip6tables
	}
	for _, p := range noTrackPorts(port) {
		if err := m.endpointNoTrackRules(prog, "-A", addr.String(), p); err != nil {
			return err
		}
	}
	return nil
}

func (m *Manager) removeNoTrackRules(addr netip.Addr, port uint16) error {
	// Do not remove per endpoint NOTRACK rules if we are already skipping
	// conntrack for all pod traffic.
	if m.skipPodTrafficConntrack(addr) {
		return nil
	}

	prog := m.ip4tables
	if addr.Is6() {
		prog = m.ip6tables
	}
	for _, p := range noTrackPorts(port) {
		if err := m.endpointNoTrackRules(prog, "-D", addr.String(), p); err != nil {
			return err
		}
	}
	return nil
}

// skipPodTrafficConntrack returns true if i) it's possible to install iptables
// `-j CT --notrack` rules to skip tracking pod traffic, ii) if rule installation
// was disabled completely.
func (m *Manager) skipPodTrafficConntrack(addr netip.Addr) bool {
	if !m.sharedCfg.InstallIptRules {
		return true
	}
	if addr.Is4() && m.sharedCfg.InstallNoConntrackIptRules {
		return true
	}
	return false
}

func (m *Manager) addNoTrackPodTrafficRules(prog runnable, podsCIDR string) error {
	for _, chain := range []string{ciliumPreRawChain, ciliumOutputRawChain} {
		if err := prog.runProg([]string{
			"-t", "raw",
			"-I", chain,
			"-s", podsCIDR,
			"-m", "comment", "--comment", "cilium: NOTRACK for pod traffic",
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}

		if err := prog.runProg([]string{
			"-t", "raw",
			"-I", chain,
			"-d", podsCIDR,
			"-m", "comment", "--comment", "cilium: NOTRACK for pod traffic",
			"-j", "CT", "--notrack"}); err != nil {
			return err
		}
	}

	return nil
}

func (m *Manager) addCiliumENIRules() error {
	if !m.sharedCfg.EnableIPv4 {
		return nil
	}

	iface, err := route.NodeDeviceWithDefaultRoute(m.logger, m.sharedCfg.EnableIPv4, m.sharedCfg.EnableIPv6)
	if err != nil {
		return fmt.Errorf("failed to find interface with default route: %w", err)
	}

	nfmask := fmt.Sprintf("%#08x", linux_defaults.MarkMultinodeNodeport)
	ctmask := fmt.Sprintf("%#08x", linux_defaults.MaskMultinodeNodeport)

	// Note: these rules need the xt_connmark module (iptables usually
	// loads it when required, unless loading modules after boot has been
	// disabled).
	if err := m.ip4tables.runProg([]string{
		"-t", "mangle",
		"-A", ciliumPreMangleChain,
		"-i", iface.Attrs().Name,
		"-m", "comment", "--comment", "cilium: primary ENI",
		"-m", "addrtype", "--dst-type", "LOCAL", "--limit-iface-in",
		"-j", "CONNMARK", "--set-xmark", nfmask + "/" + ctmask}); err != nil {
		return err
	}

	return m.ip4tables.runProg([]string{
		"-t", "mangle",
		"-A", ciliumPreMangleChain,
		"-i", "lxc+",
		"-m", "comment", "--comment", "cilium: primary ENI",
		"-j", "CONNMARK", "--restore-mark", "--nfmask", nfmask, "--ctmask", ctmask})
}

func nodeIpsetNATCmds(allocRange string, ipset string, masqueradeInterfaces []string) [][]string {
	// Exclude traffic to nodes from masquerade.
	preArgs := []string{
		"-t", "nat",
		"-A", ciliumPostNatChain,
	}

	postArgs := []string{
		"-m", "set", "--match-set", ipset, "dst",
		"-m", "comment", "--comment", "exclude traffic to cluster nodes from masquerade",
		"-j", "ACCEPT",
	}

	if len(masqueradeInterfaces) == 0 {
		cmd := append(preArgs, "-s", allocRange)
		return [][]string{append(cmd, postArgs...)}
	}

	// If MasqueradeInterfaces is set, we need to mirror base condition of the
	// "cilium masquerade non-cluster" rule below, as the allocRange might not
	// be valid in such setups (e.g. in ENI mode).
	cmds := make([][]string, 0, len(masqueradeInterfaces))
	for _, inf := range masqueradeInterfaces {
		cmd := append(preArgs, "-o", inf)
		cmds = append(cmds, append(cmd, postArgs...))
	}
	return cmds
}

func allEgressMasqueradeCmds(allocRange string, snatDstExclusionCIDR string,
	masqueradeInterfaces []string, iptablesRandomFully bool) [][]string {
	preArgs := []string{
		"-t", "nat",
		"-A", ciliumPostNatChain,
		"!", "-d", snatDstExclusionCIDR,
	}

	postArgs := []string{
		"-m", "comment", "--comment", "cilium masquerade non-cluster",
		"-j", "MASQUERADE",
	}

	if len(masqueradeInterfaces) == 0 {
		cmd := append(preArgs,
			"-s", allocRange,
			"!", "-o", "cilium_+",
		)
		cmd = append(cmd, postArgs...)
		if iptablesRandomFully {
			cmd = append(cmd, "--random-fully")
		}
		return [][]string{cmd}
	}

	cmds := make([][]string, 0, len(masqueradeInterfaces))
	for _, inf := range masqueradeInterfaces {
		cmd := append(preArgs, "-o", inf)
		cmd = append(cmd, postArgs...)
		if iptablesRandomFully {
			cmd = append(cmd, "--random-fully")
		}
		cmds = append(cmds, cmd)
	}
	return cmds
}

// hostNoTrackMultiPorts installs or removes a notrack rule matching multiple ports.
// the use case for this is to skip conntrack when a pod uses hostNetwork to improve performance (pps/rps)
// since conntrack affects the performance under load - which can occur under DDoS or traffic spikes for instance.
func (m *Manager) hostNoTrackMultiPorts(prog iptablesInterface, cmd, proto string, ports []uint16) error {
	// sort the slice containing the ports, and turn them into strings
	slices.Sort(ports)
	strPorts := make([]string, len(ports))
	for i, p := range ports {
		strPorts[i] = strconv.FormatUint(uint64(p), 10)
	}

	if err := prog.runProg([]string{
		"-t", "raw",
		cmd, ciliumPreRawChain,
		"-p", strings.ToLower(proto),
		"--match", "multiport",
		"--dports", strings.Join(strPorts, ","),
		"-m", "comment", "--comment", "cilium no-track-host-ports",
		"-j", "CT",
		"--notrack"}); err != nil {
		return err
	}

	if err := prog.runProg([]string{
		"-t", "raw",
		cmd, ciliumOutputRawChain,
		"-p", strings.ToLower(proto),
		"--match", "multiport",
		"--sports", strings.Join(strPorts, ","),
		"-m", "comment", "--comment", "cilium no-track-host-ports return traffic",
		"-j", "CT",
		"--notrack"}); err != nil {
		return err
	}

	return nil
}

// groupL4AddrsByProto iterates over a slice of ports and returns a map with the port numbers
// grouped by protocol.
func groupL4AddrsByProto(ports []lb.L4Addr) map[lb.L4Type][]uint16 {
	result := make(map[lb.L4Type][]uint16)

	for _, p := range ports {
		result[p.Protocol] = append(result[p.Protocol], p.Port)
	}

	return result
}

// replaceNoTrackHostPortRules replaces noTrackHostPort rules on a state change. the new ruleset is added, and the previous one is removed.
func (m *Manager) replaceNoTrackHostPortRules(oldPorts, newPorts map[lb.L4Type][]uint16) error {
	for _, proto := range noTrackSupportedProtos {
		oldP := set.NewSet(oldPorts[proto]...)
		newP := set.NewSet(newPorts[proto]...)

		if newP.Equal(oldP) {
			continue
		}

		if !newP.Empty() {
			if err := m.installHostNoTrackRules(proto, newP.AsSlice()); err != nil {
				return err
			}
		}

		if !oldP.Empty() {
			if err := m.cleanupHostNoTrackRules(proto, oldP.AsSlice()); err != nil {
				return err
			}
		}
	}

	return nil
}

// installHostNoTrackRules installs a hostNoTrack multiport rule
func (m *Manager) installHostNoTrackRules(proto lb.L4Type, p []uint16) error {
	if m.sharedCfg.EnableIPv4 {
		if err := m.hostNoTrackMultiPorts(m.ip4tables, "-A", proto, p); err != nil {
			return err
		}
	}

	if m.sharedCfg.EnableIPv6 {
		if err := m.hostNoTrackMultiPorts(m.ip6tables, "-A", proto, p); err != nil {
			return err
		}
	}

	return nil
}

// cleanupHostNoTrackRules cleans up a hostNoTrack multiport rule
func (m *Manager) cleanupHostNoTrackRules(proto lb.L4Type, p []uint16) error {
	if m.sharedCfg.EnableIPv4 {
		if err := m.hostNoTrackMultiPorts(m.ip4tables, "-D", proto, p); err != nil {
			return err
		}
	}

	if m.sharedCfg.EnableIPv6 {
		if err := m.hostNoTrackMultiPorts(m.ip6tables, "-D", proto, p); err != nil {
			return err
		}
	}

	return nil
}

// removeNoTrackHostPorts removes notrack rules if the global set changes after removing an entry for the pod.
func (m *Manager) removeNoTrackHostPorts(currentState noTrackHostPortsByPod, podName podAndNameSpace) error {
	oldPorts := groupL4AddrsByProto(currentState.flatten().AsSlice())
	delete(currentState, podName)
	newPorts := groupL4AddrsByProto(currentState.flatten().AsSlice())

	return m.replaceNoTrackHostPortRules(oldPorts, newPorts)
}

// setNoTrackHostPorts ensures that the notrack rules for host network pods are in place.
// it removes the previous ruleset and adds the new ruleset if the global set of ports have changed.
func (m *Manager) setNoTrackHostPorts(currentState noTrackHostPortsByPod, podName podAndNameSpace, ports []string) error {
	parsedPorts := make([]lb.L4Addr, 0, len(ports))

	for _, p := range ports {
		if p == "" {
			continue
		}

		parsed, err := lb.L4AddrFromString(p)
		if err != nil {
			return fmt.Errorf("failed to parse port/proto for %s: %w", p, err)
		}

		switch parsed.Protocol {
		case lb.TCP, lb.UDP:
			parsedPorts = append(parsedPorts, parsed)
		default:
			return fmt.Errorf("protocol %s is not unsupported for no-track-host-ports", parsed.Protocol)
		}
	}

	newSet := set.NewSet(parsedPorts...)
	if newSet.Empty() {
		return m.removeNoTrackHostPorts(currentState, podName)
	}

	currentPodPorts, ok := currentState[podName]
	if ok && currentPodPorts.Equal(newSet) {
		// no changes
		return nil
	}

	// grab the previously installed state
	oldPorts := groupL4AddrsByProto(currentState.flatten().AsSlice())

	// update current state, since we now know it has changed (or is a new entry altogether)
	currentState[podName] = newSet

	newPorts := groupL4AddrsByProto(currentState.flatten().AsSlice())

	return m.replaceNoTrackHostPortRules(oldPorts, newPorts)

}
