#!/usr/bin/env python3

NS_ANNOTATIONS = '  ' + '''
  annotations:
    istio-injection: "disabled"
    pod.network.openshift.io/multitenant.change-network: "global"
  labels:
    istio-injection: "disabled"
'''.strip()

DEPLOYMENT_TOLERATIONS = '\n      ' + '''
      tolerations:
      - key: "node-role.kubernetes.io/skiff"
        effect: NoSchedule
        operator: Exists
      - key: "node-role.kubernetes.io/master"
        effect: NoSchedule
        operator: Exists
'''.strip()

DEPLOYMENT_AFFINITY = '\n      ' +  '''
      affinity:
        nodeAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
            nodeSelectorTerms:
            - matchExpressions:
              - key: skiff/nlb-control-panel
                operator: In
                values:
                - "true"
        podAntiAffinity:
          requiredDuringSchedulingIgnoredDuringExecution:
          - labelSelector:
              matchExpressions:
              - key: {{labelKey}}
                operator: In
                values:
                - {{labelValue}}
            topologyKey: kubernetes.io/hostname
'''.strip()

NAMESPACE_YAML = '''
apiVersion: v1
kind: Namespace
metadata:
  name: nlb-system
{{nsAnnos}}

---
apiVersion: v1
kind: Namespace
metadata:
  name: nlb-runtime
{{nsAnnos}}
'''.strip()

BASIC_SERVICE_ACCOUNT_YAML = '''
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nlb-operator-service-account
  namespace: nlb-system

---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nlb-operator-cluster-role
rules:
  - apiGroups:
      - ""
    resources:
      - pods
      - services
      - services/status
      - services/finalizers
      - configmaps
    verbs:
      - "*"
  - apiGroups:
      - ""
    resources:
      - endpoints
      - events
    verbs:
      - get
      - list
      - watch
      - create
      - patch
      - update
  - apiGroups:
      - ""
    resources:
      - "*"
    verbs:
      - get
      - list
      - watch

---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nlb-operator-cluster-role-binding
  namespace: nlb-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nlb-operator-cluster-role
subjects:
  - kind: ServiceAccount
    name: nlb-operator-service-account
    namespace: nlb-system
'''.strip()

IMAGE_PULL_SECRET_YAML = '''
apiVersion: v1
kind: Secret
metadata:
  name: nlb-image-repo-secret
  namespace: nlb-system
type: kubernetes.io/dockerconfigjson
data:
  .dockerconfigjson: eyJhdXRocyI6eyJodWIuYy4xNjMuY29tIjp7InVzZXJuYW1lIjoiejU1Mzk5MjQ1M0AxNjMuY29tIiwicGFzc3dvcmQiOiJubGJwYXNzdzByZCIsImVtYWlsIjoiejU1Mzk5MjQ1M0AxNjMuY29tIiwiYXV0aCI6ImVqVTFNems1TWpRMU0wQXhOak11WTI5dE9tNXNZbkJoYzNOM01ISmsifX19
'''.strip()

IMAGE_PULL_SECRETS_JSON = '[ { "name": "nlb-image-repo-secret" } ]'

SERVICE_CONTROLLER_CONFIG_YAML = '''
apiVersion: v1
kind: ConfigMap
metadata:
  name: netease.nlb.nlbgw
  namespace: nlb-system
data:
  general: |
    {
      "KeystoneUrlPrefix": "{{keystone}}",
      "ServiceUser": "{{serviceUser}}",
      "ServicePass": "{{servicePass}}",
      "GwTenantId": "{{gwTenantId}}",
      "ProtonApiUrlPrefix": "{{proton}}",
      "PaaSVPCUrlPrefix": "{{paasVPC}}"
    }
'''.strip()

SERVICE_CONTROLLER_CONFIG_EXTERNAL_YAML = '''
apiVersion: v1
kind: ConfigMap
metadata:
  name: netease.nlb.nlbgw
  namespace: nlb-system
data:
  external: {{externalUrl}}
'''.strip()

SERVICE_CONTROLLER_CONFIG_AZ_NETWORK = '''
  "{{az}}.{{network}}": |
    {{json}}
'''.strip() # json = { GwAgentId, GwHostId, GwList: [ { HostPort } ], IPPool: [] }

USER_REF_CONFIG_YAML = '''
apiVersion: v1
kind: ConfigMap
metadata:
  name: cloud-configuration
  namespace: nlb-system
data:
  cloudconfig: |
    {
      "Tenant": "{{userTenantId}}",
      "ClusterConfigNamespace": "nlb-runtime"
    }
'''.strip()

USER_CONFIG_YAML = '''
apiVersion: v1
kind: ConfigMap
metadata:
  namespace: nlb-runtime
  name: {{userTenantId}}-network
data:
  netease.com_loadbalancer_az: "{{defaultAz}}"
  netease.com_loadbalancer_bandwidth: "{{defaultBandwidth}}"
  netease.com_loadbalancer_network: "{{defaultNetwork}}"
  network_id: "{{networkId}}"
  subnet_ids: "{{subnetId}}"
  tenant_id: "{{userTenantId}}"
'''.strip()

NSE_CONFIG_YAML = '''
apiVersion: v1
kind: ConfigMap
metadata:
  name: netease.nse
  namespace: nlb-system
data:
  address: "{{nseHostPort}}"
'''.strip()

SERVICE_CONTROLLER_DEPLOYMENT = '''
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: nlb-service-controller
  namespace: nlb-system
  labels:
    k8s-app: nlb-service-controller
spec:
  replicas: {{replicas}}
  selector:
    matchLabels:
      k8s-app: nlb-service-controller
  serviceName: nlb-service-controller
  template:
    metadata:
      labels:
        k8s-app: nlb-service-controller
    spec:
      volumes:
        - name: config
          configMap:
            name: cloud-configuration
      hostNetwork: true
      serviceAccountName: nlb-operator-service-account
      imagePullSecrets: {{imagePullSecrets}}
      containers:
        - name: nlb-service-controller
          args:
            - --cloud-config=/etc/kubernetes/cloudconfig
            - --concurrent-service-syncs=5
            - --leader-elect-resource-lock=configmaps
            - --cluster-name={{clusterName}}
          env:
            - name: USING_CLASSICAL
              value: "{{usingClassical}}"
          image: "{{imagePrefix}}/nlb-service-controller:{{version}}"
          imagePullPolicy: {{imagePullPolicy}}
          livenessProbe:
            failureThreshold: 5
            httpGet:
              host: 127.0.0.1
              path: /healthz
              port: 22253
              scheme: HTTP
            initialDelaySeconds: 15
            timeoutSeconds: 15
          volumeMounts:
            - name: config
              mountPath: /etc/kubernetes/
          resources:
            limits:
              cpu: 1000m
              memory: 2Gi
'''.strip() + DEPLOYMENT_TOLERATIONS + \
DEPLOYMENT_AFFINITY. \
    replace('{{labelKey}}', 'k8s-app'). \
    replace('{{labelValue}}', 'nlb-service-controller')

ENV_CONFIG_YAML = '''
apiVersion: v1
kind: ConfigMap
metadata:
  name: env
  namespace: nlb-system
data:
'''.strip()

NETWORK_TYPES_CONFIG = '''
  networkTypes.{{az}}: |
    {{network}}
'''.strip()

IP_CONFIG = '''
  ips.{{az}}.{{network}}: |
    {{ip}}
'''.strip()

PORTMAP_CONFIG = '''
  portmap.{{ip}}: "{{mapped}}"
'''

ENVOY_YAML = '''
apiVersion: v1
kind: Service
metadata:
  name: nlb-ingressgateway-{{az}}-{{network}}-{{suffix}}
  namespace: nlb-system
spec:
  selector:
    app: nlb-ingressgateway
    nlb: ingressgateway
    az: "{{az}}"
    network: "{{network}}"
  ports:
  - name: admin
    port: 80
    targetPort: 2
    nodePort: {{nodePort}}
  type: NodePort

---
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: nlb-ingressgateway-{{az}}-{{network}}-{{suffix}}
  namespace: nlb-system
spec:
  replicas: 1
  selector:
    matchLabels:
      app: nlb-ingressgateway
      nlb: ingressgateway
      az: "{{az}}"
      network: "{{network}}"
  serviceName: nlb-ingressgateway-{{az}}-{{network}}-{{suffix}}
  template:
    metadata:
      annotations:
        sidecar.istio.io/inject: "false"
      labels:
        app: nlb-ingressgateway
        nlb: ingressgateway
        az: "{{az}}"
        network: "{{network}}"
    spec:
      hostNetwork: {{hostNetwork}}
      imagePullSecrets: {{imagePullSecrets}}
      nodeSelector:
        kubernetes.io/hostname: "{{node}}"
      containers:
      - name: discovery
        env:
        - name: CLOUD_PROVIDER
          value: '{{cloudProvider}}'
        args:
        - discovery
        - --grpcAddr=127.4.4.4:4
        - --monitoringAddr=127.5.5.5:4
        - --httpAddr=0.0.0.0:6
        - --ctrlz_address=127.7.7.7
        - --ctrlz_port=4
        - --log_output_level=default:info
        - --domain
        - cluster.local
        - --secureGrpcAddr
        - ""
        - --trust-domain=cluster.local
        - --keepaliveMaxServerConnectionAge
        - 30m
        - --plugins
        - health
        - --disable-install-crds
        image: {{imagePrefix}}/nlb-istio-pilot:{{pilotVersion}}
        imagePullPolicy: {{pilotImagePullPolicy}}
        livenessProbe:
          failureThreshold: 2
          httpGet:
            path: /ready
            port: 6
            scheme: HTTP
          initialDelaySeconds: 5
          periodSeconds: 10
          successThreshold: 1
          timeoutSeconds: 5
        resources:
          requests:
            cpu: 10m
            memory: 100Mi
      - name: proxy
        args:
        - proxy
        - router
        - --domain
        - $(POD_NAMESPACE).svc.cluster.local
        - --proxyLogLevel=warning
        - --proxyComponentLogLevel=misc:error
        - --log_output_level=default:info
        - --drainDuration
        - 45s
        - --parentShutdownDuration
        - 1m0s
        - --connectTimeout
        - 10s
        - --serviceCluster
        - nlb-ingressgateway
        - --proxyAdminPort
        - "2"
        - --statusPort
        - "3"
        - --controlPlaneAuthPolicy
        - NONE
        - --discoveryAddress
        - 127.4.4.4:4
        - --trust-domain=cluster.local
        - --stsPort
        - "0"
        env:
        - name: NAMESPACE
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        - name: POD_NAMESPACE
          valueFrom:
            fieldRef:
              apiVersion: v1
              fieldPath: metadata.namespace
        - name: ISTIO_METAJSON_LABELS
          value: |
            {"app":"nlb-ingressgateway","nlb":"ingressgateway","az":"{{az}}","network":"{{network}}"}
        - name: SDS_ENABLED
          value: "false"
        image: {{imagePrefix}}/nlb-istio-proxy:{{proxyVersion}}
        imagePullPolicy: {{proxyImagePullPolicy}}
        readinessProbe:
          failureThreshold: 10
          httpGet:
            path: /healthz/ready
            port: 3
            scheme: HTTP
          initialDelaySeconds: 1
          periodSeconds: 2
          successThreshold: 1
          timeoutSeconds: 1
        resources:
          requests:
            cpu: 10m
            memory: 40Mi
        securityContext:
          capabilities:
            add: ['NET_ADMIN']
      restartPolicy: Always
      serviceAccountName: nlb-operator-service-account
'''.strip() + DEPLOYMENT_TOLERATIONS

CRD = '''
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
  name: loadbalancers.nlb.netease.com
spec:
  group: nlb.netease.com
  versions:
  - name: v1alpha1
    served: true
    storage: true
  scope: Namespaced
  names:
    plural: loadbalancers
    singular: loadbalancer
    kind: LoadBalancer
    shortNames:
    - nlb
  validation:
    openAPIV3Schema:
      type: object
      required:
      - spec
      properties:
        spec:
          type: object
          required:
          - network
          properties:
            bandwidth:
              type: integer
              minimum: 1
              maximum: 16000
            network:
              type: string
              # available enum values may vary
              # among different environments
              minLength: 1
              maxLength: 16
            enable:
              type: boolean
            listeners:
              type: array
              items:
                type: object
                required:
                - port
                properties:
                  name:
                    type: string
                    pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?'
                    minLength: 1
                    maxLength: 20
                  protocol:
                    type: string
                    enum: [ 'tcp' ]
                  port:
                    type: integer
                    # reserve some ports
                    minimum: 10
                    maximum: 65500
                  algorithm:
                    type: string
                    enum: [ 'roundrobin', 'leastconn' ]
                  routes:
                    type: array
                    items:
                      type: object
                      required: []
                      properties:
                        cluster:
                          type: string
                          pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?'
                          minLength: 1
                          maxLength: 20
                  strictGwRoute:
                    type: boolean
            clusters:
              type: array
              items:
                type: object
                required:
                - name
                - selector
                - targetPort
                properties:
                  name:
                    type: string
                    pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?'
                    minLength: 1
                    maxLength: 20
                  selector:
                    type: object
                    additionalProperties:
                      type: string
                  targetPort:
                    type: integer
                    minimum: 1
                    maximum: 65535
                  subsets:
                    type: array
                    items:
                      type: object
                      required:
                      - name
                      - labels
                      - weight
                      properties:
                        name:
                          type: string
                          pattern: '[a-z0-9]([-a-z0-9]*[a-z0-9])?'
                          minLength: 1
                          maxLength: 20
                        labels:
                          type: object
                          additionalProperties:
                            type: string
                        weight:
                          type: integer
                          minimum: 0
                          maximum: 100
  subresources:
    status:
      type: object
      properties:
        ipv4:
          type: string
        phase:
          type: string
  additionalPrinterColumns:
  - name: External-IP
    type: string
    JSONPath: .status.ipv4
  - name: Phase
    type: string
    JSONPath: .status.phase
  - name: Age
    type: date
    JSONPath: .metadata.creationTimestamp
'''.strip()

OPERATOR_SERVICE_ACCOUNT = '''
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nlb-operator-crd-cluster-role
rules:
  - apiGroups:
      - nlb.netease.com
    resources:
      - loadbalancers
      - loadbalancers/status
      - loadbalancers/finalizers
    verbs:
      - "*"

---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
  name: nlb-operator-crd-cluster-role-binding
  namespace: nlb-system
roleRef:
  apiGroup: rbac.authorization.k8s.io
  kind: ClusterRole
  name: nlb-operator-crd-cluster-role
subjects:
  - kind: ServiceAccount
    name: nlb-operator-service-account
    namespace: nlb-system
'''.strip()

OPERATOR_YAML = '''
apiVersion: apps/v1
kind: StatefulSet
metadata:
  name: nlb-operator
  namespace: nlb-system
  labels:
    k8s-app: nlb-operator
spec:
  replicas: {{replicas}}
  selector:
    matchLabels:
      k8s-app: nlb-operator
  serviceName: nlb-operator
  template:
    metadata:
      labels:
        k8s-app: nlb-operator
    spec:
      hostNetwork: false
      serviceAccountName: nlb-operator-service-account
      imagePullSecrets: {{imagePullSecrets}}
      containers:
        - name: nlb-operator
          args: []
          image: "{{imagePrefix}}/nlb-operator:{{version}}"
          imagePullPolicy: {{imagePullPolicy}}
          env:
          - name: WATCH_NAMESPACE
            value: ''
          - name: POD_NAME
            valueFrom:
              fieldRef:
                fieldPath: metadata.name
          - name: CLOUD_PROVIDER
            value: '{{cloudProvider}}'
          readinessProbe:
            failureThreshold: 5
            tcpSocket:
              port: 8383
            initialDelaySeconds: 15
            timeoutSeconds: 15
          resources:
            limits:
              cpu: 1000m
              memory: 2Gi
'''.strip() + DEPLOYMENT_TOLERATIONS + \
DEPLOYMENT_AFFINITY. \
    replace('{{labelKey}}', 'k8s-app'). \
    replace('{{labelValue}}', 'nlb-operator')

KEEPALIVED_CONF = '''
global_defs {
    router_id {{az}}-{{network}}
    script_user root
}
vrrp_script check_envoy {
    script "/usr/bin/curl --connect-timeout 1 --max-time 2 127.0.0.1:2/ready"
    interval 5
}
vrrp_instance VI_{{vrrp}} {
    state {{role}}
    interface {{nic}}
    virtual_router_id {{vrrp}}
    priority {{priority}}
    advert_int 1
    garp_master_refresh 30
    authentication {
        auth_type PASS
        auth_pass keepalived@{{vrrp}}
    }
    {{track_script}}
    virtual_ipaddress {
{{ips}}
    }
    unicast_peer {
{{peerIps}}
    }
}
'''.strip()

KEEPALIVED_TRACK_SCRIPT = '''
    track_script {
        check_envoy
    }
'''.strip()

FIREWALLD = '''
firewall-cmd --direct --permanent --add-rule ipv4 filter INPUT 0 --destination {{vip}} --protocol tcp -j ACCEPT
'''.strip()
FIREWALLD_RELOAD = '''
firewall-cmd --reload
'''.strip()

DPVS_EXPORTER = '''
apiVersion: v1
kind: Service
metadata:
  name: dpvs-exporter
  namespace: nlb-system
spec:
  selector:
    nlb-app: dpvs-exporter
  ports:
  - name: prometheus
    port: 80
    targetPort: 8
    nodePort: {{nodePort}}
  type: NodePort

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: dpvs-exporter
  namespace: nlb-system
spec:
  replicas: 1
  selector:
    matchLabels:
      nlb-app: dpvs-exporter
  template:
    metadata:
      labels:
        nlb-app: dpvs-exporter
    spec:
      hostNetwork: true
      serviceAccountName: nlb-operator-service-account
      imagePullSecrets: {{imagePullSecrets}}
      containers:
        - name: dpvs-exporter
          args:
          - '--gateways'
          - '{{gwList}}'
          - '--zone'
          - '{{clusterName}}'
          - '--external'
          - '{{external}}'
          image: "{{imagePrefix}}/dpvs-exporter:{{version}}"
          imagePullPolicy: {{imagePullPolicy}}
          livenessProbe:
            failureThreshold: 5
            tcpSocket:
              port: 8
            initialDelaySeconds: 15
            timeoutSeconds: 15
          resources:
            limits:
              cpu: 500m
              memory: 2Gi
'''.strip() + DEPLOYMENT_TOLERATIONS + \
DEPLOYMENT_AFFINITY. \
    replace('{{labelKey}}', 'nlb-app'). \
    replace('{{labelValue}}', 'dpvs-exporter')

EXPORTER_ADAPTOR = '''
apiVersion: v1
kind: Service
metadata:
  name: exporter-adaptor
  namespace: nlb-system
spec:
  selector:
    nlb-app: exporter-adaptor
  ports:
  - name: prometheus
    port: 80
    targetPort: 22290
  type: ClusterIP

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: exporter-adaptor
  namespace: nlb-system
spec:
  replicas: 1
  selector:
    matchLabels:
      nlb-app: exporter-adaptor
  template:
    metadata:
      labels:
        nlb-app: exporter-adaptor
    spec:
      hostNetwork: true
      serviceAccountName: nlb-operator-service-account
      imagePullSecrets: {{imagePullSecrets}}
      containers:
        - name: exporter-adaptor
          args:
          - '-t'
          - '{{adaptorType}}'
          - '-a'
          - '{{adaptorAddresses}}'
          - '-p'
          - '22290'
          env:
          - name: PYTHONUNBUFFERED
            value: '1'
          image: "{{imagePrefix}}/nlb-exporter-adaptor:{{version}}"
          imagePullPolicy: {{imagePullPolicy}}
          livenessProbe:
            failureThreshold: 5
            tcpSocket:
              port: 22290
            initialDelaySeconds: 15
            timeoutSeconds: 15
          resources:
            limits:
              cpu: 500m
              memory: 2Gi
'''.strip() + DEPLOYMENT_TOLERATIONS + \
DEPLOYMENT_AFFINITY. \
    replace('{{labelKey}}', 'nlb-app'). \
    replace('{{labelValue}}', 'exporter-adaptor')

SIDECAR_INJECTOR = '''
apiVersion: v1
kind: ServiceAccount
metadata:
  name: nlb-sidecar-injector
  namespace: nlb-system

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
  name: nlb-sidecar-injector
rules:
- apiGroups: [""]
  resources: ["configmaps"]
  verbs: ["get","watch","list"]

---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
  name: nlb-sidecar-injector
subjects:
  - kind: ServiceAccount
    name: nlb-sidecar-injector
    namespace: nlb-system
roleRef:
  kind: ClusterRole
  name: nlb-sidecar-injector
  apiGroup: rbac.authorization.k8s.io

---
apiVersion: v1
kind: Secret
metadata:
  name: nlb-sidecar-injector
  namespace: nlb-system
data:
  sidecar-injector.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUUyakNDQXNLZ0F3SUJBZ0lKQUxEWGRJeGVaUmVVTUEwR0NTcUdTSWIzRFFFQkN3VUFNSUdITVFzd0NRWUQKVlFRR0V3SkRUakVSTUE4R0ExVUVDQXdJV21obGFtbGhibWN4RVRBUEJnTlZCQWNNQ0VoaGJtZDZhRzkxTVJBdwpEZ1lEVlFRS0RBZE9aWFJsWVhObE1TQXdIZ1lEVlFRRERCZHViR0l0YzJsa1pXTmhjaTFwYm1wbFkzUnZjaTFEClFURWVNQndHQ1NxR1NJYjNEUUVKQVJZUGJuVnNiRUJmWDBSUFRVRkpUbDlmTUNBWERUSXdNRGd4T0RBNU1qZzAKTkZvWUR6UTNOVGd3TnpFMU1Ea3lPRFEwV2pCa01Rc3dDUVlEVlFRR0V3SkRUakVSTUE4R0ExVUVDQXdJV21obAphbWxoYm1jeEVUQVBCZ05WQkFjTUNFaGhibWQ2YUc5MU1SQXdEZ1lEVlFRS0RBZE9aWFJsWVhObE1SMHdHd1lEClZRUUREQlJ1YkdJdGMybGtaV05oY2kxcGJtcGxZM1J2Y2pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVAKQURDQ0FRb0NnZ0VCQUtBMHBUZHhIWkZ4cWwrWVMvZGJ5ZkJIc1RGczJGTHJ3d2kwUWxwdlpVWG1kMTZGbjFVUQpFUlNXLzVEdWg0K256cTVZVGFlVnZIcVlsM1BMc1c5L0lnOXFadWJ6OGY2bUJFaUR1bUd6M0wxZDhaSDJJR1EzCjBWcGFIN0xxZG5lbzk2S3hFUzNkV2ZzWlBXQ0FVaDhFeUs3OS9MWFB3SFRsMHRlSmpMaVJKVzQ0bnZ5K0VodnYKaHB1eW1yQTVlVitGYnJBc2pJajNEcm1lVGdZcXQ3UmF0VkNtZ0NxZXJSVWhWVFAxcjBnSjFRVnJ2Y0Y4K1lQNApoMGtpNVE4MVdUZHBSV25DUnRjOTVMR1kxMXJRTHZJR2psL0RQS3UwOW5pc1dxQTd6UE9GTnNzWTN3Y0ZTYW04CjNrVkdDUkMxOWV0emcwQ3NNalBzWlYwcnJ2eXRWQWJhR1FzQ0F3RUFBYU5wTUdjd1pRWURWUjBSQkY0d1hJSVUKYm14aUxYTnBaR1ZqWVhJdGFXNXFaV04wYjNLQ0gyNXNZaTF6YVdSbFkyRnlMV2x1YW1WamRHOXlMbTVzWWkxegplWE4wWlcyQ0kyNXNZaTF6YVdSbFkyRnlMV2x1YW1WamRHOXlMbTVzWWkxemVYTjBaVzB1YzNaak1BMEdDU3FHClNJYjNEUUVCQ3dVQUE0SUNBUUJHZVU2ZjU2RmhBQTlhTXRqUHdEcnhWSGExRCtGVDFVbU5vQ3JCdzFsRUphYm4KdlhjMEg3V3BtUFlORFFLZW4ybDNrUTdwVGNQOVd0SjBGSEJuWk51UHRzdHVGWmRueVhGOGxySk9rM2ROSEwzWQovT3dlV25Zak5oUEttL3J2NVhRWHM4SzdHN3RTcUdnaXZvV2tlT1N0YnM0R0gwd0pnRmcxZk12blpRcm5BWDlpCnFRR1BSdU9BdXRwd0c1SU9tRE1uK0RLaS9icklCcVlvRWdncnhhRUwxN2tVeUVaTk5xU3BxekRWTWxLN3JRdmMKQ3lnNXV0SHNvdVBSMmNlSk1LRGpZaDUvVHltbEN3WnUraDU4T2hCb3VWdFNSSHRSbVd1TmlKUjA0THhrRTJVagprbDlBRG9RbmpCd1NnWjBVMUVtUXdkbjlxT0dVOEhQYVRiMlN1bkRaRTRjUlVCVzJkK1hLTHV4VWRtdTVaSWQ4CnJsN1BtVU1kQVNvR2M0Q3pWN2w1V2JYdldaNUR2a016a1dCNWQzaklVbXdZeE9ieStRd1ZReml5Ui8xREtOdGsKdlcwT0ZvT1dlc0hrSmJjNHZwTmdCdWdMcDh4NDRjOHlSN3RTTFZPMzNRcExEdVVCRzg0bXdrcUg0ODJLVzdQRgoyNDl0cTFDU3NyL2ZuTlpzSG9XS3J5U2VGY2YzbUozM0F2OHVKMklXREdzZEdnSHhTelV4eUE4OWJ6QUNqd1dwCmFqSElYZTI4N0haUC9OLyt6RWVzdjJoNllxRmJUMVhzUjljNHpRaURnQlFHUUFwOWZZcm0vVE9yVGlzcWplQ0IKdGtoQlRVdmMwMFdKOVRWNUw3bmRxNGNNVlJzNVRldjVzelFEZkhnUlVZVzlaYjgreTZIQ0o5RWxRTUx6a1E9PQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0tCg==
  sidecar-injector.key: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb3dJQkFBS0NBUUVBb0RTbE4zRWRrWEdxWDVoTDkxdko4RWV4TVd6WVV1dkRDTFJDV205bFJlWjNYb1dmClZSQVJGSmIva082SGo2Zk9ybGhOcDVXOGVwaVhjOHV4YjM4aUQycG01dlB4L3FZRVNJTzZZYlBjdlYzeGtmWWcKWkRmUldsb2ZzdXAyZDZqM29yRVJMZDFaK3hrOVlJQlNId1RJcnYzOHRjL0FkT1hTMTRtTXVKRWxiamllL0w0UwpHKytHbTdLYXNEbDVYNFZ1c0N5TWlQY091WjVPQmlxM3RGcTFVS2FBS3A2dEZTRlZNL1d2U0FuVkJXdTl3WHo1CmcvaUhTU0xsRHpWWk4ybEZhY0pHMXoza3NaalhXdEF1OGdhT1g4TThxN1QyZUt4YW9Edk04NFUyeXhqZkJ3VkoKcWJ6ZVJVWUpFTFgxNjNPRFFLd3lNK3hsWFN1dS9LMVVCdG9aQ3dJREFRQUJBb0lCQURoUkNWc3ArbGpzZWhYTQp0YmVvbnA1R3RYVklhK3NLVWE4M25yT253UlFIWmlMbUdoS082Vkl4bW5sYVFIRXhiaWcvaGx6SStFNm9ZV1VsClQ2U0FMbnI0emwrVmxaWU15aWlybGROT3BPUTBwL3pGWnJHVUpaWDdaSFZDa282UjZTdFp6MDR1SjdsckwvWkMKWlp2ZDFLWWZScnlxNFY2Mmc1MmREc2k2elV1bk1Xa3RUV3ZsQXNRR3BMS2xGRCtjS1d1L05XVlBqcjhVdlpRYQppcHRvR0F0MUkyUzI4RjRSWitldXNYejYrdDQ4SDhGNjNTTlRCVE9zaEtxMU9wbDkzbXovaHZPWVZEVUdMUWtIClZVVUlTTmVlVzhoaDZrOW5uNWpYbHJ6aVdDeDR0VGwwaDI5cElvQkx2dGdpN0s4ZnJrbndMcGNLOVNoV2NESG8KRWgxUkQ1RUNnWUVBekdhb0lMcnhDRUNPTVExQmRMdWFpeDQ2UHhlV25Pc1ZXV0lGb2NGWCtVWWJGT3puMnZNRAp6SFdObHRLVHZBdEl6Y3JVcFhWQld1V1R2dlloR0hDKysrMHNlS2FQeGpZMDVPSzd5a1JXazk4THN1ZjFsRTNhCi9QWEIxQ1ZlR1hSQm1ac1VlRnJOSU5yLzZKcHFzQWU4SEZVN0V0S0NsMDFPcTk5eTFUY2MxRThDZ1lFQXlLWGgKRUtLTThXdlJJWFhnNVNRYUZPTHlUK0c3UUhaSjRFdjFSaENzRVFKVXE5N2JsdnZOcnQ1S3FHTFZFQ1ArNllBcwpkMTZjRGRoZlYwUXg4OExuY3VjZzF4eWVwendBdk9VOWwvSU55YjU4bXpsK2w2Vm9NS2dtNXhaZWg2YUc0OG9FCnJSVnAvdlhpSUsrR0Yxb0M4czJBTDBlRzlmWlBYeW1yeXptVWRJVUNnWUFQc242TTdPNWZ6TjFlcFA3MjRRVncKVTZUemZYTkNuOGNtOHlFdGwvU1NuekNyUGJwY2xUY2dkNUtsRjR6aTd4YnJQdjF2cTM2V1pYVWdRRFN3ZE5BbAo5c1BvakJvVWhjc3dITDJCUGhoTUNPQVVJRDQ2NG5QRDQzZThQQlpxN0FPeHNUZzBPZnp3M2dDek0xQ29MaC9qCktTUjlRSTlvNFRYK2V3Mk5sRXJ4ZndLQmdGM3poaXBDNHRvY2tRVW83QkIvVWhNYm5UaVlTeFRKRHBpUEdKOGgKS1dDaGhjSWdqUlBxZmduMm5tQ3dQbkhiV1A0Ky9STTc0cmg4NjAreVorMXJBdk1EbEpZRkdhalc2TVdLVmNmegpHRSt4K1NpNXlEUDd5SWlZQS9kS3V5S3JjQmZ6UFROS2hxSlN3ZnBxRmRDVFhqUVEwNGZQNXpyK2hYWGF6azFQCmZkcjlBb0dCQUpVblkwT1NneTVzelQ4OGtEWHdHUGtGZ2lBZklSV0tpZTkrTXZnQ2tKSUZsTzI4bFl2ODZxdWwKS3hldTZiTHB4Vk93OUcvOFhhS2N4R1lxZ3Y3NVN5VDAxS244cVIrYUJFL0pOMW50QWE2Q1g0UHdsRUhRMlYxUgpEdy9FQkh1bW5iWlU2VHJpMC9peGFBZVJCdWZGbGFZMGlyYlhoQVh0S2k4RlExWDBibFpHCi0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
type: Opaque

---
apiVersion: v1
kind: ConfigMap
metadata:
  name: sidecar-backend-proxy
  namespace: nlb-system
data:
  nlb-backend-agent.yaml: |
    name: nlb-backend-agent
    skipHostNetwork: true
    containers:
    - name: nlb-proxy
      image: {{agentImagePrefix}}/proxy-agent:{{agentVersion}}
      args:
      - 'backend' # mode
      - '1'       # thread count
      - '80'      # to-backend-mark
      - 'none'    # method
      - '27746'   # bind-port
      imagePullPolicy: {{agentImagePullPolicy}}
      securityContext:
        capabilities:
          add: ['NET_ADMIN']
      livenessProbe:
        failureThreshold: 2
        tcpSocket:
          port: 27746
        initialDelaySeconds: 5
        timeoutSeconds: 5
    - name: nlb-iptables
      image: {{iptablesImagePrefix}}/proxy-agent-iptables:{{iptablesVersion}}
      args:
      - 'mode=backend'
      - 'cap_mark=80'
      - 'set_mark=40'
      - 'mask=c0'
      - 'table=101'
      - 'priority=19950'
      - 'port=27746'
      imagePullPolicy: {{iptablesImagePullPolicy}}
      securityContext:
        capabilities:
          add: ['NET_ADMIN']

---
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
  name: "nlb-sidecar-injector-webhook"
webhooks:
- name: "injector.nlb.netease.com"
  failurePolicy: "Ignore"
  rules:
  - operations: [ "CREATE" ]
    apiGroups: [""]
    apiVersions: ["v1"]
    resources: ["pods"]
  clientConfig:
    service:
      name: "nlb-sidecar-injector"
      namespace: "nlb-system"
      path: "/mutate"
    caBundle: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUZqakNDQTNZQ0NRREsyRy9ZVTg0cnZqQU5CZ2txaGtpRzl3MEJBUXNGQURDQmh6RUxNQWtHQTFVRUJoTUMKUTA0eEVUQVBCZ05WQkFnTUNGcG9aV3BwWVc1bk1SRXdEd1lEVlFRSERBaElZVzVuZW1odmRURVFNQTRHQTFVRQpDZ3dIVG1WMFpXRnpaVEVnTUI0R0ExVUVBd3dYYm14aUxYTnBaR1ZqWVhJdGFXNXFaV04wYjNJdFEwRXhIakFjCkJna3Foa2lHOXcwQkNRRVdEMjUxYkd4QVgxOUVUMDFCU1U1Zlh6QWdGdzB5TURBNE1UZ3dPVEk0TXpoYUdBODAKTnpVNE1EY3hOVEE1TWpnek9Gb3dnWWN4Q3pBSkJnTlZCQVlUQWtOT01SRXdEd1lEVlFRSURBaGFhR1ZxYVdGdQpaekVSTUE4R0ExVUVCd3dJU0dGdVozcG9iM1V4RURBT0JnTlZCQW9NQjA1bGRHVmhjMlV4SURBZUJnTlZCQU1NCkYyNXNZaTF6YVdSbFkyRnlMV2x1YW1WamRHOXlMVU5CTVI0d0hBWUpLb1pJaHZjTkFRa0JGZzl1ZFd4c1FGOWYKUkU5TlFVbE9YMTh3Z2dJaU1BMEdDU3FHU0liM0RRRUJBUVVBQTRJQ0R3QXdnZ0lLQW9JQ0FRQy91VmdmcXlLRgoveEhuUGxvaDRxam5FSmo5R2pHRlFtVjd4ajJ4V2wxOUdyR09lZVpleG9MWllNZG5xcGFtbnk4QUFMMlVjc2JOCmxFQ3ZUdEVDcVovWDhkTE10R2V1bVk1cmxSZ0FRajRINmtnU2R2ZE5PeEVmZzA0QXFpbkVYbGtOOVVYUUljNUIKY3ArM0F3QWpjWnFsM0RrLzFFUFk0VDN2c1FRZWk3UjhvOThUbzBNVXJndmZKbG82aEtBRGRXVkZianRqYU9GRApPa3BIUjUwaHYvbSt4Y0lWb2RiRGI0UTI0a0JmcWM5OE1NaVRsT2FtZkFtOGJoMlgrQVRLQm9RNGFHU3hVUHJ4ClBld0M5UHU5ckhaeHIzeFZRbDFuMWF2MU5TRW9iY1B1QXFZYlBydmpBV3NSZGlYTXZrM3p2K1VsaXdmMkZPNjcKSGhDK2h1SWo4T3F2c2xXTE9oVk00VWdEaTJiWjc0dHF0c2p1Qk90bXl6ZEd2ZUtKcDRsYU9PWC9TL2d3L1FTOQpEWmFrUDFxY2lsWTI0NzIzOFp6TDhEZ3ZHbXdzR1JsWDRwK2VFQ2FwdFY5dWEva0FhL0o5aTJCcDNDdXozSEEwCitIQkNKdkJBRUp2VlJSb3VQRXRxTlB4ZFR1cG1nMDBsZHZaMFF4MmlsYURPaGdYVXJJT3BVV1M2QjBYeU52M0wKV290dFZ3Y1BhNGNwSmQzYnRDOVd3dTNtRnFROVZ0cjJyVzVaWWJmZGw1UUE5am0yVGRRc2hJMDlEWkFTUXpJVwpITkx5S1EvL09xVGQ1d05uZURxUDMzR2h3ZG0yQis2Y09vWlhXV2FHZzA1cy9lWDFzb3pYWlNaeEZZQ0dDbWJBCnl2UlVGaU5pQm1mdFJ6YkFpb0RwVjdnZGxibEo3K3RmcXdJREFRQUJNQTBHQ1NxR1NJYjNEUUVCQ3dVQUE0SUMKQVFDdVU4V3I3YTA3Qkt1OGx5Vi9YWHVWWU1tVnhHNVRyUEdCNnlPNTN5Q1k4VGR1bm8zWXByRmNlTTlWVEdnbgo5elBoVzdtVFBHQjNaMTVTTG5qR1l6SmFReGFWMWxlR0ZaWWJUL3lNK0trNGdWbExIOGF5WnJVbkYxYm94VDZwCm5tbWU3NnlXOG1xQVczUGxWbWVQbUpZaWt1ZkwwbkVYQXA1ZDUzU0hXajVVRTFZbGxZTnJpdXVHZTQ5Vkh0cFAKT3Z6ZXFlRm4rZ2RUekJSN1ZKZHBGbmd6YkRSMlR3RU5YVnFqeVk1WnVsdDRveW1PK2l1OVVzSFQ2SWxzQS9reQo4endBZHNibnlMZTYyeHVZMVZ1SjU4c1FmT1BNK3RBODhwK0ZCeGNIcHQvbTZUZ1lYZ1NJOGtKOUFuTDJMZ1lLCkQ2ekI5TE1ISmNGbnluemlJUENCajdNL2k5bzY4YTBab1VseXNBSGxKRnU0Qmk1TTUrNmE2b1QyVmNxRWhJNHoKcE9wTCtNK2ZEL0tseDF1anAxVFkwK2ZVcFd0L0J1U01maXhuZXBxRVdLYjJtTEZ4T1JkUWV5S2poNmxHQVMxcQpyQkVIZm8rMFVhRjFPUHVTTE5mZmliazBqcmhFUytEcHl1cTRtUmhCejZtL0pLZm9yTFZPQ2xPKzkzQS9mWU84CkN1czAvSlA2dFQzTHVITVF2TUdkT3hqUkhaS2ZqRDRHNExPZjg4Z3N4eUxZQUttOXVNUFVPSWo5OXpjc3FiTmsKaEF1bjA3Ymo0N2lwbUtBL1RUajVnZFMwNUVHaWdkeVQwc2VxeG56bzZPQkw0TkQxYy94NGdGTFMzMG1ZTEV5UAorSjk0ZENrbmN5azZTOHZJYUkrcTdTRmxBOGg4VzdCQjZzZ3hwR3ZyVHdDNnFRPT0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=

---
apiVersion: v1
kind: Service
metadata:
  name: nlb-sidecar-injector
  namespace: nlb-system
spec:
  type: ClusterIP
  ports:
  - name: https
    port: 443
    targetPort: https
    protocol: TCP
  - name: http-metrics
    port: 80
    targetPort: http-metrics
    protocol: TCP
  selector:
    nlb-app: nlb-sidecar-injector

---
apiVersion: apps/v1
kind: Deployment
metadata:
  name: "nlb-sidecar-injector"
  namespace: "nlb-system"
spec:
  replicas: {{injectorReplicas}}
  selector:
    matchLabels:
      nlb-app: nlb-sidecar-injector
  template:
    metadata:
      labels:
        nlb-app: "nlb-sidecar-injector"
    spec:
      serviceAccountName: nlb-sidecar-injector
      volumes:
      - name: secrets
        secret:
          secretName: nlb-sidecar-injector
      - name: confs
        configMap:
          name: sidecar-backend-proxy
      containers:
      - name: "nlb-sidecar-injector"
        imagePullPolicy: {{injectorImagePullPolicy}}
        image: {{injectorImagePrefix}}/nlb-sidecar-injector:{{injectorVersion}}
        args:
        - --v=2
        - --lifecycle-port=9000
        - --tls-port=9443
        - --annotation-namespace=injector.nlb.netease.com
        - --config-directory=/conf
        - --tls-cert-file=/var/lib/secrets/sidecar-injector.crt
        - --tls-key-file=/var/lib/secrets/sidecar-injector.key
        - --configmap-labels=app=nlb-sidecar-injector
        - --configmap-namespace=nlb-system
        ports:
        - name: https
          containerPort: 9443
        - name: http-metrics
          containerPort: 9000
        volumeMounts:
        - name: secrets
          mountPath: /var/lib/secrets
          readOnly : true
        - name: confs
          mountPath: /conf
          readOnly: true
        livenessProbe:
          httpGet:
            scheme: HTTPS
            path: /health
            port: https
          initialDelaySeconds: 10
          periodSeconds: 10
          timeoutSeconds: 3
        resources:
          requests:
            cpu: "0.5"
            memory: 1Gi
          limits:
            cpu: "0.5"
            memory: 2Gi
'''.strip() + DEPLOYMENT_TOLERATIONS

import sys
import os
import json

def getImagePrefix(o):
    return o['standardization']['image']['prefix']

def crd(ctx, o):
    if o['operatorConfig']['enabled']:
        ctx['yaml'].append(CRD)

def nlbNamespace(ctx, o):
    ctx['yaml'].append(NAMESPACE_YAML.replace('{{nsAnnos}}', NS_ANNOTATIONS))

def operatorEnabled(o):
    return o['operatorConfig']['enabled']

def serviceAccount(ctx, o):
    ctx['yaml'].append(BASIC_SERVICE_ACCOUNT_YAML)
    if operatorEnabled(o):
        ctx['yaml'].append(OPERATOR_SERVICE_ACCOUNT)

def requireImageSecret(o):
    return getImagePrefix(o) == 'hub.c.163.com/z553992453'

def imageSecret(ctx, o):
    if requireImageSecret(o):
        ctx['yaml'].append(IMAGE_PULL_SECRET_YAML)

def isVPC(o):
    kind = o['networkConfig']['kind']
    if kind == 'classical':
        return False
    elif kind == 'vpc':
        return True
    else:
        raise Exception('unknown networkConfig kind: ' + str(kind))

def isClassical(o):
    kind = o['networkConfig']['kind']
    if kind == 'classical':
        return True
    elif kind == 'vpc':
        return False
    else:
        raise Exception('unknown networkConfig kind: ' + str(kind))

def isDpvs(o):
    kind = o['gatewayConfig']['kind']
    if kind == 'dpvs' or kind == 'dpvs+envoy':
        return True
    elif kind == 'envoy':
        return False
    else:
        raise Exception('unknown gatewayConfig kind ' + str(kind))

def isEnvoy(o):
    kind = o['gatewayConfig']['kind']
    if kind == 'dpvs':
        return False
    elif kind == 'envoy' or kind == 'dpvs+envoy':
        return True
    else:
        raise Exception('unknown gatewayConfig kind ' + str(kind))

def moreThanEnvoy(o):
    return isDpvs(o)

def isExternal(o):
    return o['networkConfig']['externalConfig']['enabled']

def getPlatform(o):
    if not isEnvoy(o):
        if 'platform' not in o['os']:
            return ''
    return o['os']['platform']

def isSelinux(o):
    if not isEnvoy(o):
        if 'selinux' not in o['os']:
            return False
    return o['os']['selinux']

def isTransparentProxy(o):
    if not isEnvoy(o):
        if 'transparentProxy' not in o:
            return False
    return o['transparentProxy']['enabled']

def dpvsSystemConfig(ctx, o):
    if not isDpvs(o):
        return
    useExternal = isExternal(o)
    externalConfig = {}
    s = SERVICE_CONTROLLER_CONFIG_YAML
    keystone = 'not used'
    serviceUser = 'not used'
    servicePass = 'not used'
    gwTenantId = 'not used'
    proton = 'not used'
    paasVPC = 'not used'
    if isVPC(o):
        keystone = o['networkConfig']['vpcConfig']['keystone']
        proton = o['networkConfig']['vpcConfig']['proton']
        paasVPC = o['networkConfig']['vpcConfig']['paasVPC']
        serviceUser = o['networkConfig']['vpcConfig']['serviceUser']
        servicePass = o['networkConfig']['vpcConfig']['servicePass']
        gwTenantId = o['networkConfig']['vpcConfig']['gwTenantId']
    s = s.replace('{{keystone}}', keystone)
    s = s.replace('{{serviceUser}}', serviceUser)
    s = s.replace('{{servicePass}}', servicePass)
    s = s.replace('{{gwTenantId}}', gwTenantId)
    s = s.replace('{{proton}}', proton)
    s = s.replace('{{paasVPC}}', paasVPC)
    externalConfig['general'] = json.dumps({
        'KeystoneUrlPrefix': keystone,
        'ProtonApiUrlPrefix': proton,
        'PaaSVPCUrlPrefix': paasVPC,
        'ServiceUser': serviceUser,
        'ServicePass': servicePass,
        'GwTenantId': gwTenantId,
    })
    #
    sAzNet = ''
    for azo in o['networkConfig']['azs']:
        az = azo['az']
        for neto in azo['networks']:
            net = neto['network']
            if isClassical(o):
                net = 'eth_' + net
            sAzNet += '\n  "' + az + '.' + net + '": |'
            j = {}
            if net == 'private':
                j['GwAgentId'] = azo['vpcConfig']['privateGwAgentId']
                j['GwHostId'] = azo['vpcConfig']['privateGwHostId']
            else:
                j['GwAgentId'] = 'not used'
                j['GwHostId'] = 'not used'
            j['GwList'] = []
            for gwHostPort in neto['dpvsConfig']['gwHostPort']:
                j['GwList'].append({
                    'HostPort': gwHostPort
                })
            j['IPPool'] = []
            for ip in neto['dpvsConfig']['ippool']:
                j['IPPool'].append(ip)
            lines = json.dumps(j, indent=4, sort_keys=True).split('\n')
            for l in lines:
                sAzNet += '\n' + '    ' + l
            #
            externalConfig[az + '.' + net] = json.dumps(j)
    s += sAzNet
    if useExternal:
        se = SERVICE_CONTROLLER_CONFIG_EXTERNAL_YAML
        url = o['networkConfig']['externalConfig']['url']
        se = se.replace('{{externalUrl}}', url)
        ctx['yaml'].append(se)
        filename = 'netease.nlb.nlbgw'
        ctx['files'].append({
            'name': filename,
            'content': json.dumps(externalConfig)
        })
        ctx['doc'].append({
            'type': 'dpvsSystemConfig',
            'content': 'you need to ensure that ' + url + ' returns content of file ' + filename
        })
    else:
        ctx['yaml'].append(s)

def envSystemConfig(ctx, o):
    if not isEnvoy(o) and not operatorEnabled(o):
        return
    env = ENV_CONFIG_YAML
    networkTypes = ''
    ips = ''
    portmap = ''
    portmapNum = 1000
    for azo in o['networkConfig']['azs']:
        az = azo['az']
        azNetworkTypes = ''
        for neto in azo['networks']:
            net = neto['network']
            azNetworkTypes += '\n    ' + net
            ips += '\n  ips.' + az + '.' + net + ': |'
            if isEnvoy(o):
                ippool = neto['envoyConfig']['ippool']
            elif isDpvs(o):
                ippool = neto['dpvsConfig']['ippool']
            else:
                ippool = []
            for ip in ippool:
                ips += '\n    ' + ip
                portmap += '\n  portmap.' + ip + ': "' + str(portmapNum) + '"'
                portmapNum += 100
        if o['userConfig']['defaultAz'] == az and az != 'any':
            networkTypes += '\n  networkTypes.any: |' + azNetworkTypes
        networkTypes += '\n  networkTypes.' + az + ': |' + azNetworkTypes
    env = env + networkTypes
    env = env + ips
    if isEnvoy(o) and not moreThanEnvoy(o):
        env = env + '\n  envoy-controller-class: nlbgw'
    env = env + portmap
    ctx['yaml'].append(env)

def dpvsUserConfig(ctx, o):
    if not isDpvs(o):
        return
    ref = USER_REF_CONFIG_YAML
    userTenantId = 'no-tenant'
    if isVPC(o):
        userTenantId = o['userConfig']['vpcConfig']['tenantId']
    ref = ref.replace('{{userTenantId}}', userTenantId)
    ctx['yaml'].append(ref)
    #
    conf = USER_CONFIG_YAML
    defaultAz = o['userConfig']['defaultAz']
    defaultBandwidth = o['userConfig']['defaultBandwidth']
    defaultNetwork = o['userConfig']['defaultNetwork']
    networkId = 'not used'
    subnetId = 'not used'
    userTenantId = 'no-tenant'
    if isVPC(o):
        networkId = o['userConfig']['vpcConfig']['networkId']
        subnetId = o['userConfig']['vpcConfig']['subnetId']
        userTenantId = o['userConfig']['vpcConfig']['tenantId']
    conf = conf.replace('{{defaultAz}}', defaultAz)
    conf = conf.replace('{{defaultBandwidth}}', str(defaultBandwidth))
    conf = conf.replace('{{defaultNetwork}}', defaultNetwork)
    conf = conf.replace('{{networkId}}', networkId)
    conf = conf.replace('{{subnetId}}', subnetId)
    conf = conf.replace('{{userTenantId}}', userTenantId)
    ctx['yaml'].append(conf)

def dpvsController(ctx, o):
    if not isDpvs(o):
        return
    ctl = SERVICE_CONTROLLER_DEPLOYMENT
    replicas = o['controllerConfig']['replicas']
    clusterName = o['controllerConfig']['clusterName']
    version = o['controllerConfig']['dpvsConfig']['version']
    imagePullSecrets = '[]'
    if requireImageSecret(o):
        imagePullSecrets = IMAGE_PULL_SECRETS_JSON
    imagePullPolicy = 'IfNotPresent'
    if version == '0.0.1':
        imagePullPolicy = 'Always'
    usingClassical = 'no'
    if isClassical(o):
        usingClassical = 'yes'
    ctl = ctl.replace('{{imagePullSecrets}}', imagePullSecrets)
    ctl = ctl.replace('{{imagePrefix}}', getImagePrefix(o))
    ctl = ctl.replace('{{replicas}}', str(replicas))
    ctl = ctl.replace('{{clusterName}}', clusterName)
    ctl = ctl.replace('{{version}}', version)
    ctl = ctl.replace('{{imagePullPolicy}}', imagePullPolicy)
    ctl = ctl.replace('{{usingClassical}}', usingClassical)
    ctx['yaml'].append(ctl)

def operator(ctx, o):
    if not operatorEnabled(o):
        return
    op = OPERATOR_YAML
    imagePullSecrets = '[]'
    if requireImageSecret(o):
        imagePullSecrets = IMAGE_PULL_SECRETS_JSON
    replicas = o['operatorConfig']['replicas']
    version = o['operatorConfig']['version']
    imagePullPolicy = 'IfNotPresent'
    if version == '0.0.1':
        imagePullPolicy = 'Always'
    op = op.replace('{{imagePullSecrets}}', imagePullSecrets)
    op = op.replace('{{imagePrefix}}', getImagePrefix(o))
    op = op.replace('{{replicas}}', str(replicas))
    op = op.replace('{{version}}', version)
    op = op.replace('{{imagePullPolicy}}', imagePullPolicy)
    op = op.replace('{{cloudProvider}}', getPlatform(o))
    ctx['yaml'].append(op)

def envoyGateway(ctx, o):
    if not isEnvoy(o):
        return
    for azo in o['networkConfig']['azs']:
        az = azo['az']
        for neto in azo['networks']:
            net = neto['network']
            for eno in neto['envoyConfig']['envoyList']:
                suffix = eno['suffix']
                node = eno['node']['name']
                nodePort = str(eno['node']['nodePort'])
                envoy = ENVOY_YAML
                #
                imagePullSecrets = '[]'
                if requireImageSecret(o):
                    imagePullSecrets = IMAGE_PULL_SECRETS_JSON
                envoy = envoy.replace('{{imagePullSecrets}}', imagePullSecrets)
                #
                pilotVersion = o['controllerConfig']['envoyConfig']['version']
                pilotImagePullPolicy = 'IfNotPresent'
                if pilotVersion == '0.0.1':
                    pilotImagePullPolicy = 'Always'
                envoy = envoy.replace('{{imagePrefix}}', getImagePrefix(o))
                envoy = envoy.replace('{{pilotVersion}}', pilotVersion)
                envoy = envoy.replace('{{pilotImagePullPolicy}}', pilotImagePullPolicy)
                #
                envoy = envoy.replace('{{az}}', az)
                envoy = envoy.replace('{{network}}', net)
                envoy = envoy.replace('{{suffix}}', suffix)
                envoy = envoy.replace('{{node}}', node)
                envoy = envoy.replace('{{nodePort}}', nodePort)
                proxyVersion = o['gatewayConfig']['envoyConfig']['version']
                proxyImagePullPolicy = 'IfNotPresent'
                if proxyVersion == '0.0.1':
                    proxyImagePullPolicy = 'Always'
                envoy = envoy.replace('{{proxyVersion}}', proxyVersion)
                envoy = envoy.replace('{{proxyImagePullPolicy}}', proxyImagePullPolicy)
                #
                platform = getPlatform(o)
                envoy = envoy.replace('{{cloudProvider}}', platform)
                if platform == '':
                    envoy = envoy.replace('{{hostNetwork}}', 'true')
                else:
                    envoy = envoy.replace('{{hostNetwork}}', 'false')
                #
                ctx['yaml'].append(envoy)

def isKeepalived(o):
    if not isEnvoy(o):
        return False
    kind = o['gatewayConfig']['envoyConfig']['announce']['kind']
    if kind == 'keepalived':
        return True
    elif kind == 'bgp':
        return False
    else:
        raise Exception('unknown announce kind ' + kind)

def isBGP(o):
    if not isEnvoy(o):
        return False
    kind = o['gatewayConfig']['envoyConfig']['announce']['kind']
    if kind == 'keepalived':
        return False
    elif kind == 'bgp':
        return True
    else:
        raise Exception('unknown announce kind ' + kind)

def transparentProxy(ctx, o):
    if not isEnvoy(o):
        return
    if not isTransparentProxy(o):
        return
    if o['transparentProxy']['kind'] != 'proxy-agent':
        raise Exception('unknown transparentProxy type')
    s = SIDECAR_INJECTOR
    agentConf = o['transparentProxy']['proxyAgentConfig']['agent']
    agentVersion = agentConf['version']
    if agentVersion == '0.0.1':
        agentImagePullPolicy = 'Always'
    else:
        agentImagePullPolicy = 'IfNotPresent'
    iptablesConf = o['transparentProxy']['proxyAgentConfig']['iptables']
    iptablesVersion = iptablesConf['version']
    if iptablesVersion == '0.0.1':
        iptablesImagePullPolicy = 'Always'
    else:
        iptablesImagePullPolicy = 'IfNotPresent'
    injectorConf = o['transparentProxy']['proxyAgentConfig']['injector']
    injectorReplicas = injectorConf['replicas']
    injectorVersion = injectorConf['version']
    if injectorVersion == '0.0.1':
        injectorImagePullPolicy = 'Always'
    else:
        injectorImagePullPolicy = 'IfNotPresent'

    s = s.replace('{{agentImagePrefix}}', getImagePrefix(o))
    s = s.replace('{{agentVersion}}', agentVersion)
    s = s.replace('{{agentImagePullPolicy}}', agentImagePullPolicy)
    s = s.replace('{{iptablesImagePrefix}}', getImagePrefix(o))
    s = s.replace('{{iptablesVersion}}', iptablesVersion)
    s = s.replace('{{iptablesImagePullPolicy}}', iptablesImagePullPolicy)
    s = s.replace('{{injectorImagePrefix}}', getImagePrefix(o))
    s = s.replace('{{injectorVersion}}', injectorVersion)
    s = s.replace('{{injectorImagePullPolicy}}', injectorImagePullPolicy)
    s = s.replace('{{injectorReplicas}}', str(injectorReplicas))
    ctx['yaml'].append(s)

def keepalived(ctx, o):
    if not isEnvoy(o):
        return
    if not isKeepalived(o):
        return
    for azo in o['networkConfig']['azs']:
        az = azo['az']
        for neto in azo['networks']:
            net = neto['network']
            vrrp = neto['keepalivedConfig']['vrrp']
            role = 'MASTER'
            ipsStr = '        ' + '\n        '.join(neto['envoyConfig']['ippool'])
            peerIps = []
            for envo in neto['envoyConfig']['envoyList']:
                peerIps.append(envo['node']['ip'])
            for envo in neto['envoyConfig']['envoyList']:
                curIp = envo['node']['ip']
                host = envo['node']['name']
                nic = envo['node']['nic']
                priority = 100
                if role != 'MASTER':
                    priority = 50
                curPeerIps = []
                for ip in peerIps:
                    if ip != curIp:
                        curPeerIps.append(ip)
                peerIpsStr = '        ' + '\n        '.join(curPeerIps)
                #
                k = KEEPALIVED_CONF
                k = k.replace('{{az}}', az)
                k = k.replace('{{network}}', net)
                k = k.replace('{{vrrp}}', str(vrrp))
                k = k.replace('{{role}}', role)
                k = k.replace('{{nic}}', nic)
                k = k.replace('{{priority}}', str(priority))
                k = k.replace('{{ips}}', ipsStr)
                k = k.replace('{{peerIps}}', peerIpsStr)
                #
                platform = getPlatform(o)
                if platform == '':
                    k = k.replace('{{track_script}}', KEEPALIVED_TRACK_SCRIPT)
                else:
                    k = k.replace('{{track_script}}', '')
                #
                genName = 'keepalived.' + host + '.conf'
                ctx['files'].append({
                    'name': genName,
                    'content': k + '\n'
                })
                ctx['doc'].append({
                    'type': 'keepalived',
                    'content': 'install keepalived on node ' + host + ', ' + \
                               'and put keepalived configuration ' + genName + ' ' + \
                               'at /etc/keepalived/keepalived.conf on node ' + host + ', ' + \
                               'then run service keepalived restart'
                })
                #
                role = 'BACKUP'

def nse(ctx, o):
    if o['nseConfig']['enabled']:
        hostport = o['nseConfig']['nseHostPort']
        nse = NSE_CONFIG_YAML
        nse = nse.replace('{{nseHostPort}}', hostport)
        ctx['yaml'].append(nse)

def exporter(ctx, o):
    if isDpvs(o):
        ex = DPVS_EXPORTER
        imagePullSecrets = '[]'
        if requireImageSecret(o):
            imagePullSecrets = IMAGE_PULL_SECRETS_JSON
        version = o['prometheus']['dpvsConfig']['exporter']['version']
        imagePullPolicy = 'IfNotPresent'
        if version == '0.0.1':
            imagePullPolicy = 'Always'
        ex = ex.replace('{{imagePrefix}}', getImagePrefix(o))
        ex = ex.replace('{{nodePort}}', str(o['prometheus']['dpvsConfig']['nodePort']))
        clusterName = o['controllerConfig']['clusterName']
        ex = ex.replace('{{imagePullSecrets}}', imagePullSecrets)
        ex = ex.replace('{{version}}', version)
        ex = ex.replace('{{imagePullPolicy}}', imagePullPolicy)
        ex = ex.replace('{{clusterName}}', clusterName)
        external = ''
        if isExternal(o):
            external = o['networkConfig']['externalConfig']['url']
        ex = ex.replace('{{external}}', external)
        gwList = []
        statPort = o['prometheus']['dpvsConfig']['exporter']['port']
        for azo in o['networkConfig']['azs']:
            for neto in azo['networks']:
                for hostport in neto['dpvsConfig']['gwHostPort']:
                    hostport = hostport.split(':')[0] + ':' + str(statPort)
                    gwList.append(hostport)
        ex = ex.replace('{{gwList}}', ','.join(gwList))
        ctx['yaml'].append(ex)
    if not isDpvs(o) and not isEnvoy(o):
        return
    if isDpvs(o):
        adaptorType = 'dpvs'
        adaptorAddresses = '127.0.0.1:' + str(o['prometheus']['dpvsConfig']['nodePort'])
    if isEnvoy(o): # override dpvs configs
        adaptorType = 'envoy'
        adaptorAddresses = ''
        isFirst = True
        for azo in o['networkConfig']['azs']:
            for neto in azo['networks']:
                for envoyo in neto['envoyConfig']['envoyList']:
                    if isFirst:
                        isFirst = False
                    else:
                        adaptorAddresses += ','
                    adaptorAddresses += '127.0.0.1:' + str(envoyo['node']['nodePort'])
    imagePullSecrets = '[]'
    if requireImageSecret(o):
        imagePullSecrets = IMAGE_PULL_SECRETS_JSON
    version = o['prometheus']['exporterAdaptor']['version']
    imagePullPolicy = 'IfNotPresent'
    if version == '0.0.1':
        imagePullPolicy = 'Always'
    ad = EXPORTER_ADAPTOR
    ad = ad.replace('{{imagePullSecrets}}', imagePullSecrets)
    ad = ad.replace('{{adaptorType}}', adaptorType)
    ad = ad.replace('{{adaptorAddresses}}', adaptorAddresses)
    ad = ad.replace('{{imagePrefix}}', getImagePrefix(o))
    ad = ad.replace('{{version}}', version)
    ad = ad.replace('{{imagePullPolicy}}', imagePullPolicy)
    ctx['yaml'].append(ad)

def firewalld(ctx, o):
    if o['os']['distribution'] != 'centos':
        return
    if not isEnvoy(o):
        return
    sh_name = 'firewalld-allow-vips.sh'
    for azo in o['networkConfig']['azs']:
        for neto in azo['networks']:
            for envo in neto['envoyConfig']['envoyList']:
                shName = 'firewalld-allow-vips-' + envo['node']['name'] + '.sh'
                content = ''
                for ip in neto['envoyConfig']['ippool']:
                    content += FIREWALLD.replace('{{vip}}', ip) + '\n'
                content += FIREWALLD_RELOAD + '\n'
                ctx['files'].append({
                    'name': shName,
                    'content': content
                })
                ctx['doc'].append({
                    'type': 'firewalld',
                    'content': 'run script ' + shName + ' on node ' + envo['node']['name'] + ' to allow vip netflow'
                })

def nlbgw(ctx, o):
    if not isDpvs(o):
        return
    general = {
        'GwTenantId': 'not used',
        'KeystoneUrlPrefix': 'not used',
        'ProtonApiUrlPrefix': 'not used',
        'PaaSVPCUrlPrefix': 'not used',
        'ServicePass': 'not used',
        'ServiceUser': 'not used'
    }
    if isVPC(o):
        general['GwTenantId'] = o['networkConfig']['vpcConfig']['gwTenantId']
        general['KeystoneUrlPrefix'] = o['networkConfig']['vpcConfig']['keystone']
        general['ProtonApiUrlPrefix'] = o['networkConfig']['vpcConfig']['proton']
        general['PaaSVPCUrlPrefix'] = o['networkConfig']['vpcConfig']['paasVPC']
        general['ServicePass'] = o['networkConfig']['vpcConfig']['servicePass']
        general['ServiceUser'] = o['networkConfig']['vpcConfig']['serviceUser']
    for azo in o['networkConfig']['azs']:
        for neto in azo['networks']:
            gwList = []
            for x in neto['dpvsConfig']['gwHostPort']:
                gwList.append(x)
            content = {
                'Az': azo['az'],
                'GwList': ','.join(gwList),
                'GwAgentId': 'not used',
                'GwHostId': 'not used',
            }
            if neto['network'] == 'private':
                content['GwAgentId'] = azo['vpcConfig']['privateGwAgentId']
                content['GwHostId'] = azo['vpcConfig']['privateGwHostId']
            for k in general:
                content[k] = general[k]
            ctx['files'].append({
                'name': 'nlbgw.storage.' + azo['az'] + '.' + neto['network'],
                'content': json.dumps(content, indent=4, sort_keys=True)
            })

def selinux(ctx, o):
    if not isSelinux(o):
        return
    for azo in o['networkConfig']['azs']:
        for neto in azo['networks']:
            for envo in neto['envoyConfig']['envoyList']:
                if isKeepalived(o):
                    ctx['doc'].append({
                        'type': 'selinux',
                        'content': 'run command setsebool -P keepalived_connect_any 1 on node ' + envo['node']['name'] + ' to allow keepalived to run health checks',
                    })

handlers = [
    { 'name': 'crd',               'handler': crd },
    { 'name': 'nlb-namespace',     'handler': nlbNamespace },
    { 'name': 'service-account',   'handler': serviceAccount },
    { 'name': 'image-secret',      'handler': imageSecret },
    { 'name': 'dpvsSystemConfig',  'handler': dpvsSystemConfig },
    { 'name': 'envSystemConfig',   'handler': envSystemConfig },
    { 'name': 'dpvsUserConfig',    'handler': dpvsUserConfig },
    { 'name': 'dpvsController',    'handler': dpvsController },
    { 'name': 'operator',          'handler': operator },
    { 'name': 'envoyGateway',      'handler': envoyGateway },
    { 'name': 'transparent-proxy', 'handler': transparentProxy },
    { 'name': 'keepalived',        'handler': keepalived },
    { 'name': 'nse',               'handler': nse },
    { 'name': 'exporter',          'handler': exporter },
    { 'name': 'firewalld',         'handler': firewalld },
    { 'name': 'nlbgw',             'handler': nlbgw },
    { 'name': 'selinux',           'handler': selinux }
]

def handle(ctx, o):
    ctx['yaml'] = [] # [ 'yaml string' ]
    ctx['files'] = [] # [ { name: xxx, content: '...' } ]
    ctx['doc'] = [] # [ type: xxx, content: '...' ]
    #
    ctx['doc'].append({
        'type': 'k8s',
        'content': 'kubectl apply -f nlb.yaml'
    })
    #
    for handler in handlers:
        print ('processing: ' + handler['name'])
        handler['handler'](ctx, o)
    yaml_f = open('nlb.yaml', 'w+')
    for x in ctx['yaml']:
        yaml_f.write('---\n')
        yaml_f.write(x)
        yaml_f.write('\n\n')
    yaml_f.close()
    for fo in ctx['files']:
        f = open(fo['name'], 'w+')
        f.write(fo['content'].strip() + '\n')
        f.close()
    print("")
    print("================================")
    print("")
    for d in ctx['doc']:
        print(d['type'] + ': ' + d['content'])

def main(argv):
    if len(argv) == 0:
        print('you need to specify the config json')
        return 1
    if len(argv) != 1:
        print('unexpected extra argument')
        return 1
    json_file_name = argv[0]
    f = open(json_file_name, 'r')
    file_content = f.read()
    f.close()
    o = json.loads(file_content)
    ctx = {}
    handle(ctx, o)
    return 0

if __name__ == "__main__":
    exit(main(sys.argv[1:]))
