
import {
  KubernetesObject, loadAllYaml, loadYaml, V1beta1PodDisruptionBudget, V1ClusterRole, V1ClusterRoleBinding, V1CronJobSpec, V1DaemonSetSpec,
  V1DeploymentSpec, V1DeploymentStrategy, V1JobSpec, V1PersistentVolumeClaim, V1PodTemplate, V1PodTemplateSpec, V1Role, V1RoleBinding, V1ServiceAccount, V1StatefulSetSpec, V1StatefulSetUpdateStrategy
} from "@kubernetes/client-node"
import { DiffResult, ExecuteContext, K8SResourceType, KV, Operator, PlanContext, PlanContextData } from "yam-types"
import { DeployWorkload, KubernetesPermissionRequest } from "../types.d"
import { DEFAULT_JOB_COMPLETIONS, DEFAULT_REPLICA_DEPLOYMENT, DEFAULT_REPLICA_STATEFUL, DEFAULT_REV_HISTORY_LIMIT, PERSISTENT_VOLUME_DEFAULT_SIZE, RBAC_API_GROUP, ROLE_VERB_ALIAS_MAP } from "./constants"
import PodTemplateFactory from "./pod-template"

export default class DeployWorkloadOperator implements Operator<DeployWorkload> {

  private workloadName: string
  private planContextData: PlanContextData
  private applyCtx: ExecuteContext
  private workloadSpec: DeployWorkload
  private podTemplateFactory: PodTemplateFactory
  private settings: KV

  async operate(plan: PlanContext, diff: DiffResult<DeployWorkload>, settings: KV): Promise<void> {
    this.settings = settings
    this.workloadSpec = diff.currentItems[0]
    if (!this.workloadSpec) {
      plan.log.warn(`no workload to deploy, skipped`)
      return
    }

    // version override, by CI tool
    const provisionedVersion = plan.data.customizedValues["version"]
    if (provisionedVersion) {
      plan.log.info(`workload version is overridden by command line, version is: ${provisionedVersion}`)
      this.workloadSpec.version = provisionedVersion as string
    }

    this.planContextData = plan.data
    this.workloadName = plan.data.workloadName
    this.podTemplateFactory = new PodTemplateFactory(this.planContextData, this.workloadSpec, diff)

    // compose yaml files
    plan.log.info(`build spec for workload type ${this.workloadSpec.type}, workload changed: ${diff.hasDiff}`)
    plan.action(this.buildWorkloadResources, "build-workload-resources:" + plan.data.workloadName, this)

    // append additional files
    if (this.workloadSpec.extraFiles) {
      await this.handleExtraFiles(plan, this.workloadSpec.extraFiles)
    }
  }

  async buildWorkloadResources(ctx: ExecuteContext): Promise<void> {
    this.applyCtx = ctx

    // Provide individual ServiceAccount for each application
    this.buildServiceAccount()

    // When requesting K8S API server permissions, assign role and bind role to ServiceAccount
    this.buildRoleAndRoleBinding()

    // Compose the deployment/statefulset/daemonset resource object
    this.buildWorkloadMain()

    // Add PDB to avoid service unavailable issue when draining nodes or eviction
    this.buildPodDisruptionBudget()
  }

  buildServiceAccount(): void {
    const serviceAccountName = this.planContextData.serviceAccountName
    this.applyCtx.mergeToYaml({
      name: serviceAccountName,
      content: {
        kind: K8SResourceType.ServiceAccount,
        metadata: {
          name: serviceAccountName
        }
      } as V1ServiceAccount
    })

  }

  buildWorkloadMain(): void {
    const wl = this.workloadSpec
    const workloadPrototype = {
      kind: wl.type,
      metadata: {
        name: this.workloadName,
        labels: { version: wl.version || '' } as KV
      },
      spec: {
        selector: { matchLabels: { workload: this.workloadName } }
      }
    }
    const restart = this.planContextData.customizedValues["restart"] === "true"
    if (restart) {
      workloadPrototype.metadata.labels.restart = new Date().toISOString()
      this.applyCtx.log.info(`'restart' flag detected, will force restart workload.`)
    }

    // compose pod template for all types of workloads
    const podTemplate: V1PodTemplateSpec = this.podTemplateFactory.generatePodTemplate(this.applyCtx)

    // for statefulset, use volumeClaimTemplates. for other workloads, append separate PVC resource to yaml list
    const volumeClaims = this.generateVolumeClaimTemplate()

    // handler map for different workloads
    const workloadPrototypeHandler = {
      /**
       * Deployment
       */
      [K8SResourceType.Deployment]: (spec: V1DeploymentSpec) => {
        spec.template = podTemplate
        spec.minReadySeconds = wl.minReadySeconds
        spec.replicas = wl.replicas || DEFAULT_REPLICA_DEPLOYMENT
        spec.revisionHistoryLimit = DEFAULT_REV_HISTORY_LIMIT
        spec.strategy = this.generateDeploymentRollingStrategy(spec.replicas)

        // compatible with kubernetes native API
        spec.progressDeadlineSeconds = wl.progressDeadlineSeconds as number
      },

      /**
       * StatefulSet
       */
      [K8SResourceType.StatefulSet]: (spec: V1StatefulSetSpec) => {
        spec.template = podTemplate
        spec.replicas = wl.replicas || DEFAULT_REPLICA_STATEFUL
        spec.revisionHistoryLimit = DEFAULT_REV_HISTORY_LIMIT
        spec.volumeClaimTemplates = volumeClaims
        // rolling update from index '0', indicates update all, not blue/green release
        spec.updateStrategy = wl.updateStrategy ? (wl.updateStrategy as V1StatefulSetUpdateStrategy) : {
          rollingUpdate: {
            partition: 0
          }
        }
        spec.podManagementPolicy = wl.podManagementPolicy as string
        spec.serviceName = wl.serviceName as string
      },

      /**
       * DaemonSet
       */
      [K8SResourceType.DaemonSet]: (spec: V1DaemonSetSpec) => {
        spec.template = podTemplate
        spec.minReadySeconds = wl.minReadySeconds
        spec.revisionHistoryLimit = DEFAULT_REV_HISTORY_LIMIT
        spec.updateStrategy = {
          rollingUpdate: {
            maxSurge: new Object(wl.rollingUpdate?.maxSurge),
            maxUnavailable: new Object(wl.rollingUpdate?.maxUnavailable),
          }
        }
      },

      /**
       * CronJob
       */
      [K8SResourceType.CronJob]: (spec: V1CronJobSpec) => {
        if (!wl.job?.cron) {
          throw new Error('empty/invalid cron expression for CronJob')
        }
        spec.schedule = wl.job?.cron
        spec.jobTemplate = {
          metadata: { labels: { workload: this.workloadName } },
          spec: this.generateJobTemplate(podTemplate)
        }
        spec.concurrencyPolicy = wl.job?.concurrencyPolicy
        spec.failedJobsHistoryLimit = wl.job?.failedJobsHistoryLimit as number
        spec.startingDeadlineSeconds = wl.job?.startingDeadlineSeconds as number
        spec.successfulJobsHistoryLimit = wl.job?.successfulJobsHistoryLimit as number
        spec.suspend = wl.job?.suspend as boolean
      },

      /**
       * Job
       */
      [K8SResourceType.Job]: (spec: V1JobSpec) => {
        Object.assign(spec, this.generateJobTemplate(podTemplate))
      },
    }
    // eslint-disable-next-line @typescript-eslint/no-explicit-any
    workloadPrototypeHandler[wl.type](workloadPrototype.spec as any)
    this.applyCtx.mergeToYaml({
      name: this.workloadName,
      content: workloadPrototype
    })
  }

  buildRoleAndRoleBinding(): void {
    const pms = this.workloadSpec.permissions
    if (!pms) {
      return
    }
    const groups = this.applyCtx._.groupBy(pms, (i) => i.clusterLevel ? 'true' : 'false')
    if (groups["true"].length > 0) {
      // cluster role / cluster role binding
      const clusterRules = groups["true"]
      const roleName = this.workloadName + '-cluster-role'
      const roleBindingName = this.workloadName + '-cluster-binding'
      this.generateRoleAndRoleBinding(true, clusterRules, roleName, roleBindingName)
    }
    if (groups["false"].length > 0) {
      // namespaced role / role binding
      const namespacedRules = groups["false"]
      const roleName = this.workloadName + '-role'
      const roleBindingName = this.workloadName + '-binding'
      this.generateRoleAndRoleBinding(false, namespacedRules, roleName, roleBindingName)
    }
  }

  generateRoleAndRoleBinding(clusterLevel: boolean, rules: KubernetesPermissionRequest[], roleName: string, roleBindingName: string): void {
    const splitAndTrim = (str?: string) => str?.split(',').map(s => s.trim())
    const role: V1Role | V1ClusterRole = {
      kind: clusterLevel ? K8SResourceType.ClusterRole : K8SResourceType.Role,
      metadata: {
        name: roleName,
        labels: {
          workload: this.workloadName
        }
      },
      rules: rules.map(x => {
        let verbs: string[] = []
        splitAndTrim(x.verbs)?.forEach(v => {
          if (ROLE_VERB_ALIAS_MAP[v]) {
            verbs = verbs.concat(ROLE_VERB_ALIAS_MAP[v])
          }
          verbs.push(v)
        })
        return {
          apiGroups: splitAndTrim(x.group) || [""],
          resources: splitAndTrim(x.resources),
          resourceNames: splitAndTrim(x.resourceNames),
          verbs
        }
      })
    }
    const roleBinding: V1RoleBinding | V1ClusterRoleBinding = {
      kind: clusterLevel ? K8SResourceType.ClusterRoleBinding : K8SResourceType.RoleBinding,
      metadata: {
        name: roleBindingName,
        labels: {
          workload: this.workloadName
        }
      },
      roleRef: {
        apiGroup: RBAC_API_GROUP,
        kind: K8SResourceType.Role,
        name: this.workloadName + '-role'
      },
      subjects: [{
        apiGroup: RBAC_API_GROUP,
        kind: K8SResourceType.ServiceAccount,
        name: this.planContextData.serviceAccountName
      }]
    }
    this.applyCtx.mergeToYaml({
      name: roleName,
      content: role
    })
    this.applyCtx.mergeToYaml({
      name: roleBindingName,
      content: roleBinding
    })
  }

  generateVolumeClaimTemplate(): V1PersistentVolumeClaim[] | undefined {
    const mounts = this.workloadSpec.mounts
    if (!mounts) {
      return
    }
    const storageClaims: V1PersistentVolumeClaim[] = this.workloadSpec.volumeClaimTemplates ?
      (this.workloadSpec.volumeClaimTemplates as V1PersistentVolumeClaim[]) : []
    mounts.forEach(m => {
      if (m.type === "storage") {
        const isStatefulSet = this.workloadSpec.type === "StatefulSet"
        if (this.workloadSpec.type === "DaemonSet") {
          throw new Error('can not bound persistent volume for daemonset')
        }
        if (!isStatefulSet && (this.workloadSpec.replicas || 2) > 1) {
          this.applyCtx.log.warn(`persistent volume: ${m.name} may fail when attempt to mount to multiple Pod replications`)
        }
        const pvcName = isStatefulSet ? m.name : (this.workloadName + '-' + m.name)
        const pvc: V1PersistentVolumeClaim = {
          metadata: {
            name: pvcName,
            labels: {
              app: this.planContextData.app,
              workload: this.workloadName
            }
          },
          spec: {
            storageClassName: m.storageClass,
            accessModes: [m.storageAccessMode || 'ReadWriteOnce'],
            volumeMode: 'Filesystem',
            resources: {
              requests: {
                storage: m.size || PERSISTENT_VOLUME_DEFAULT_SIZE
              }
            }
          }
        }
        // StatefulSet has field 'volumeClaimTemplates', but other workloads doesn't, create pvc by yaml
        this.applyCtx.mergeToYaml({
          name: pvc.metadata?.name || '',
          content: pvc
        })
        // record pvc templates for StatefulSet
        storageClaims.push(pvc)
      }
    })
    return storageClaims
  }

  generateDeploymentRollingStrategy(r: number): V1DeploymentStrategy {
    //const spec = this.workloadSpec
    let defaultSurge: string | number = r
    let defaultUnavailable: string | number = '50%'
    if (r < 4) {
      // when replica is small, allow larger surge to boost Pod upgrade process
      defaultSurge = r
      defaultUnavailable = '50%'
    } else if (r >= 4 && r <= 32) {
      // when replica is medium, increase percentage to boost Pod upgrade process
      defaultSurge = '50%'
      defaultUnavailable = '50%'
    } else {
      // default value of native kubernetes, suitable for large scale deployment
      // reducing the pressure of image distribution
      defaultSurge = '25%'
      defaultUnavailable = '25%'
    }
    return {
      rollingUpdate: {
        maxSurge: new Object(this.workloadSpec.rollingUpdate?.maxSurge || defaultSurge),
        maxUnavailable: new Object(this.workloadSpec.rollingUpdate?.maxUnavailable || defaultUnavailable),
      }
    }
  }

  generateJobTemplate(podTemplate: V1PodTemplate): V1JobSpec {
    const jobConf = this.workloadSpec.job
    return {
      template: podTemplate,
      backoffLimit: jobConf?.backoffLimit,
      ttlSecondsAfterFinished: jobConf?.ttlSeconds,
      completionMode: jobConf?.indexedPod ? 'Indexed' : 'NonIndexed',
      completions: (jobConf?.completions as number) || this.workloadSpec.replicas || DEFAULT_JOB_COMPLETIONS,
      parallelism: jobConf?.parallelism,

      activeDeadlineSeconds: jobConf?.activeDeadlineSeconds as number,
      manualSelector: jobConf?.manualSelector as boolean,
      suspend: jobConf?.suspend as boolean
    }
  }

  buildPodDisruptionBudget(): void {
    const spec = this.workloadSpec
    if (spec.type.indexOf('Job') !== -1) {
      return
    }
    if (this.settings.disableDefaultDisruptionBudget && !spec.disruptionBudgetPercent) {
      return
    }
    let pdbPercent = spec.disruptionBudgetPercent || '50%'
    if (!pdbPercent.endsWith('%')) {
      pdbPercent += + '%'
    }
    const pdbName = this.workloadName + '-pdb'
    this.applyCtx.mergeToYaml({
      name: pdbName,
      content: {
        kind: K8SResourceType.PodDisruptionBudget,
        metadata: {
          name: pdbName
        },
        spec: {
          maxUnavailable: new Object(pdbPercent),
          selector: {
            matchLabels: {
              workload: this.workloadName
            }
          }
        }
      } as V1beta1PodDisruptionBudget
    })
  }

  async handleExtraFiles(plan: PlanContext, extraFiles: string[]): Promise<void> {
    for (const extraFile of extraFiles) {
      const rawResource = await plan.renderTemplate(extraFile, false)
      try {
        plan.action(async (ctx) => {
          ctx.mergeToYaml({
            name: 'extra-file' + extraFile,
            content: loadYaml(rawResource) as KubernetesObject
          })
        }, 'add-extra-file:' + extraFile, this)
      } catch (e) {
        const resources = loadAllYaml(rawResource) as KubernetesObject[]
        // if loadAll succeed
        resources.forEach((resource, idx) => {
          plan.action(async (ctx) => {
            ctx.mergeToYaml({
              name: 'extra-file' + extraFile + '-' + idx,
              content: resource
            })
          }, 'add-extra-file:' + extraFile + '-' + idx, this)
        })
      }
    }
  }
}

