---
version: v1alpha1
machine:
  ca:
    crt: op://kubernetes/talos/MACHINE_CA_CRT
    {% if ENV.IS_CONTROLLER %}
    key: op://kubernetes/talos/MACHINE_CA_KEY
    {% endif %}
  features:
    apidCheckExtKeyUsage: true
    diskQuotaSupport: true
    hostDNS:
      enabled: true
      forwardKubeDNSToHost: true # Requires Cilium socketLB features
      resolveMemberNames: true
    kubePrism:
      enabled: true
      port: 7445
    {% if ENV.IS_CONTROLLER %}
    kubernetesTalosAPIAccess:
      allowedKubernetesNamespaces:
        - actions-runner-system
        - system-upgrade
      allowedRoles:
        - os:admin
      enabled: true
    {% endif %}
    rbac: true
    stableHostname: true
  files:
    - op: create
      path: /etc/cri/conf.d/20-customization.part
      content: |
        [plugins."io.containerd.cri.v1.images"]
          discard_unpacked_layers = false
        [plugins."io.containerd.cri.v1.runtime"]
          cdi_spec_dirs = ["/var/cdi/static", "/var/cdi/dynamic"]
          device_ownership_from_security_context = true
    - op: overwrite
      path: /etc/nfsmount.conf
      permissions: 0o644
      content: |
        [ NFSMount_Global_Options ]
        nfsvers=4.2
        hard=True
        nconnect=16
        noatime=True
  install:
    diskSelector:
      model: Samsung SSD 870
    image: factory.talos.dev/metal-installer/cc720ffb8efc8f12637b8660a86448a88b3f89f516995fb9c67bf80564e723b1:v1.11.5
    wipe: false
  kernel:
    modules:
      - name: nbd
      - name: thunderbolt
      - name: thunderbolt_net
  kubelet:
    defaultRuntimeSeccompProfileEnabled: true
    disableManifestsDirectory: true
    extraConfig:
      featureGates:
        ImageVolume: true
      serializeImagePulls: false
    image: ghcr.io/siderolabs/kubelet:v1.34.2
    nodeIP:
      validSubnets:
        - 192.168.42.0/24
  network:
    interfaces:
      - interface: bond0
        bond:
          deviceSelectors:
            - driver: igc
              hardwareAddr: 88:ae:dd:72:*
          mode: active-backup
        dhcp: true
        mtu: 9000
        vlans:
          - # IOT
            vlanId: 70
            dhcp: false
            mtu: 9000
          - # VPN
            vlanId: 90
            dhcp: false
            mtu: 9000
  nodeLabels:
    node.kubernetes.io/gpu: "true"
    topology.kubernetes.io/region: main
  sysctls:
    fs.inotify.max_user_instances: "8192"
    fs.inotify.max_user_watches: "1048576"
    net.core.default_qdisc: fq
    net.core.rmem_max: "67108864"
    net.core.wmem_max: "67108864"
    net.ipv4.neigh.default.gc_thresh1: "4096"
    net.ipv4.neigh.default.gc_thresh2: "8192"
    net.ipv4.neigh.default.gc_thresh3: "16384"
    net.ipv4.ping_group_range: 0 2147483647
    net.ipv4.tcp_congestion_control: bbr
    net.ipv4.tcp_fastopen: "3"
    net.ipv4.tcp_mtu_probing: "1"
    net.ipv4.tcp_notsent_lowat: "131072"
    net.ipv4.tcp_rmem: 4096 87380 33554432
    net.ipv4.tcp_slow_start_after_idle: "0"
    net.ipv4.tcp_window_scaling: "1"
    net.ipv4.tcp_wmem: 4096 65536 33554432
    sunrpc.tcp_max_slot_table_entries: "128"
    sunrpc.tcp_slot_table_entries: "128"
    user.max_user_namespaces: "11255"
    vm.nr_hugepages: "1024"
  token: op://kubernetes/talos/MACHINE_TOKEN
  udev:
    rules:
      - # Thunderbolt - Disable Power Management
        ACTION=="add", SUBSYSTEM=="thunderbolt", ATTR{power/control}="on"
cluster:
  ca:
    crt: op://kubernetes/talos/CLUSTER_CA_CRT
    {% if ENV.IS_CONTROLLER %}
    key: op://kubernetes/talos/CLUSTER_CA_KEY
    {% endif %}
  clusterName: main
  controlPlane:
    endpoint: https://k8s.internal:6443
  discovery:
    enabled: true
    registries:
      kubernetes:
        disabled: true
      service:
        disabled: false
  id: op://kubernetes/talos/CLUSTER_ID
  network:
    cni:
      name: none
    dnsDomain: cluster.local
    podSubnets:
      - 10.42.0.0/16
    serviceSubnets:
      - 10.43.0.0/16
  secret: op://kubernetes/talos/CLUSTER_SECRET
  token: op://kubernetes/talos/CLUSTER_TOKEN
  {% if ENV.IS_CONTROLLER %}
  aggregatorCA:
    crt: op://kubernetes/talos/CLUSTER_AGGREGATORCA_CRT
    key: op://kubernetes/talos/CLUSTER_AGGREGATORCA_KEY
  allowSchedulingOnControlPlanes: true
  apiServer:
    auditPolicy:
      apiVersion: audit.k8s.io/v1
      kind: Policy
      rules:
        - level: Metadata
    certSANs:
      - k8s.internal
    disablePodSecurityPolicy: true
    extraArgs:
      enable-aggregator-routing: "true"
      feature-gates: ImageVolume=true,MutatingAdmissionPolicy=true
      runtime-config: admissionregistration.k8s.io/v1beta1=true
    image: registry.k8s.io/kube-apiserver:v1.34.2
  controllerManager:
    extraArgs:
      bind-address: 0.0.0.0
    image: registry.k8s.io/kube-controller-manager:v1.34.2
  coreDNS:
    disabled: true
  etcd:
    advertisedSubnets:
      - 192.168.42.0/24
    ca:
      crt: op://kubernetes/talos/CLUSTER_ETCD_CA_CRT
      key: op://kubernetes/talos/CLUSTER_ETCD_CA_KEY
    extraArgs:
      listen-metrics-urls: http://0.0.0.0:2381
  proxy:
    disabled: true
    image: registry.k8s.io/kube-proxy:v1.34.2
  scheduler:
    config:
      apiVersion: kubescheduler.config.k8s.io/v1
      kind: KubeSchedulerConfiguration
      profiles:
        - schedulerName: default-scheduler
          plugins:
            score:
              disabled:
                - name: ImageLocality
          pluginConfig:
            - name: PodTopologySpread
              args:
                defaultingType: List
                defaultConstraints:
                  - maxSkew: 1
                    topologyKey: kubernetes.io/hostname
                    whenUnsatisfiable: ScheduleAnyway
    extraArgs:
      bind-address: 0.0.0.0
    image: registry.k8s.io/kube-scheduler:v1.34.2
  secretboxEncryptionSecret: op://kubernetes/talos/CLUSTER_SECRETBOXENCRYPTIONSECRET
  serviceAccount:
    key: op://kubernetes/talos/CLUSTER_SERVICEACCOUNT_KEY
  {% endif %}
---
apiVersion: v1alpha1
kind: UserVolumeConfig
name: local-hostpath
provisioning:
  diskSelector:
    match: disk.model == "Corsair MP600 MICRO" && !system_disk
  minSize: 1TB
---
apiVersion: v1alpha1
kind: WatchdogTimerConfig
device: /dev/watchdog0
timeout: 5m
