import { V1Affinity, V1EphemeralContainer, V1HostAlias, V1PodAffinityTerm, V1PodDNSConfig, V1PodReadinessGate, V1PodTemplateSpec, V1Toleration, V1TopologySpreadConstraint, V1Volume } from "@kubernetes/client-node"
import { DiffResult, ExecuteContext, KVString, PlanContextData } from "yam-types"
import { DeployWorkload } from "../types"
import ContainerFactory from "./container"

export default class PodTemplateFactory {

  private workloadName: string
  private planContextData: PlanContextData
  private applyCtx: ExecuteContext
  private workloadSpec: DeployWorkload
  private containerFactory: ContainerFactory
  private diff: DiffResult<DeployWorkload>

  constructor(planContextData: PlanContextData, workloadSpec: DeployWorkload, diff: DiffResult<DeployWorkload>) {
    this.planContextData = planContextData
    this.workloadSpec = workloadSpec
    this.workloadName = planContextData.workloadName
    this.diff = diff
    this.containerFactory = new ContainerFactory(planContextData, workloadSpec)
  }

  generatePodTemplate(applyCtx: ExecuteContext): V1PodTemplateSpec {
    this.applyCtx = applyCtx
    this.containerFactory.setLogger(applyCtx.log)
    const app = this.planContextData.app
    const spec = this.workloadSpec
    const { initContainers, containers, containsLatest } = this.containerFactory.generatePodContainers()
    const annotations = {} as KVString
    if (containsLatest) {
      // if any container image contains 'latest' tag, once re-deployed, the Pod should be re-created anyway
      // additional annotation could archive this, and imagePullPolicy will be always if contains 'latest'
      annotations['restartedAt'] = new Date().toISOString()
    }
    return {
      metadata: { labels: { app, workload: this.workloadName, version: spec.version || ''}, annotations },
      spec: {
        // yam-engine built-in definitions, extension of native kubernetes API
        initContainers,
        containers,
        volumes: this.generatePodTemplateVolumes(),
        affinity: this.generateAffinitySpec(),
        imagePullSecrets: spec.imagePullSecrets?.split(',').map(x => { return { name: x.trim() } }),
        restartPolicy: spec.type.indexOf('Job') !== -1 ? 'OnFailure' : 'Always',
        securityContext: spec.advancedFeatures?.podSecurityContext,
        serviceAccountName: this.planContextData.serviceAccountName,

        // compatible with native kubernetes podTemplate definitions
        hostname: spec.hostname as string,
        hostAliases: spec.hostAliases as V1HostAlias[],
        ephemeralContainers: spec.ephemeralContainers as V1EphemeralContainer[],
        dnsPolicy: spec.dnsPolicy as string || (spec.advancedFeatures?.hostNetwork ? 'ClusterFirstWithHostNet' : 'ClusterFirst'),
        dnsConfig: spec.dnsConfig as V1PodDNSConfig,
        nodeName: spec.nodeName as string,
        overhead: spec.overhead as KVString,
        preemptionPolicy: spec.preemptionPolicy as string,
        priority: spec.priority as number,
        priorityClassName: spec.priorityClassName as string,
        readinessGates: spec.readinessGates as V1PodReadinessGate[],
        activeDeadlineSeconds: spec.activeDeadlineSeconds as number,
        automountServiceAccountToken: spec.automountServiceAccountToken as boolean,
        enableServiceLinks: spec.enableServiceLinks as boolean,
        terminationGracePeriodSeconds: spec.gracefulPeriodSeconds || 30,
        hostIPC: spec.advancedFeatures?.hostIPC,
        hostNetwork: spec.advancedFeatures?.hostNetwork,
        hostPID: spec.advancedFeatures?.hostPID,
        runtimeClassName: spec.runtimeClassName as string,
        schedulerName: spec.schedulerName as string,
        setHostnameAsFQDN: spec.setHostnameAsFQDN as boolean,
        shareProcessNamespace: spec.shareProcessNamespace as boolean,
        subdomain: spec.subdomain as string,
        tolerations: spec.tolerations as V1Toleration[],
        topologySpreadConstraints: spec.topologySpreadConstraints as V1TopologySpreadConstraint[]
      }
    }
  }

  generateAffinitySpec(): V1Affinity {
    const selectors = this.workloadSpec.nodeSelector
    const result: V1Affinity = this.workloadSpec.affinity ? (this.workloadSpec.affinity as V1Affinity) : {}
    // nodeAffinity to enforce pods scheduled to correct node groups
    if (selectors) {
      result.nodeAffinity = {
        requiredDuringSchedulingIgnoredDuringExecution: {
          nodeSelectorTerms: [{
            matchExpressions: selectors.split(',').map(x => {
              const tmp = x.split('=')
              if (tmp.length > 1) {
                return {
                  key: tmp[0].trim(),
                  operator: 'In',
                  values: [tmp[1].trim()]
                }
              } else {
                return {
                  key: x.trim(),
                  operator: 'In',
                  values: ["true"]
                }
              }
            })
          }]
        }
      }
    }
    // podAntiAffinity to prefer/enforce pods scattered
    if (this.diff.modifiedItems.length > 0) {
      const previousScatterStrategy = this.diff.modifiedItems[0].previous.advancedFeatures?.forceScatterPods
      const currentScatterStrategy = this.diff.modifiedItems[0].current.advancedFeatures?.forceScatterPods
      if (previousScatterStrategy && !currentScatterStrategy) {
        this.applyCtx.log.warn(`change 'forceScatterPods' may not take effect, please edit/replace the 'affinity' block of the deployment directly`)
      }
    }
    const scatter: V1PodAffinityTerm = {
      topologyKey: "kubernetes.io/hostname",
      labelSelector: {
        matchExpressions: [{
          key: 'workload',
          operator: 'In',
          values: [this.workloadName]
        }]
      }
    }
    if (this.workloadSpec.advancedFeatures?.forceScatterPods) {
      result.podAntiAffinity = {
        requiredDuringSchedulingIgnoredDuringExecution: [scatter]
      }
    } else {
      result.podAntiAffinity = {
        preferredDuringSchedulingIgnoredDuringExecution: [{
          podAffinityTerm: scatter,
          weight: 100
        }]
      }
    }
    return result
  }

  generatePodTemplateVolumes(): V1Volume[] {
    const volumes: V1Volume[] = this.workloadSpec.volumes ? (this.workloadSpec.volumes as V1Volume[]) : []
    this.workloadSpec.mounts?.forEach(m => {
      if (m.type === "emptyDir") {
        volumes.push({
          name: m.name,
          emptyDir: {}
        })
      } else if (m.type === "hostPath") {
        volumes.push({
          name: m.name,
          hostPath: {
            path: m.hostPath || m.path,
            type: m.hostPathType || 'DirectoryOrCreate'
          }
        })
      } else if (m.type === "storage" && this.workloadSpec.type !== "StatefulSet") {
        // as for StatefulSet, just specify volumeMounts is fine, because storage declared in 'volumeClaimTemplates'
        volumes.push({
          name: m.name,
          persistentVolumeClaim: {
            claimName: this.workloadName + '-' + m.name
          }
        })
      }
    })
    return volumes
  }
}