# language: zh-CN
# Epic 12.1: 云原生部署支持
# 需求ID: REQ-012.1
# SRS需求: 系统应支持云原生部署模式，包括Docker容器化、Kubernetes编排、Helm Chart管理、CI/CD集成、自动扩缩容、服务网格等现代云原生技术栈
# Feature ID: FT-012.1
# 关联Epic: EP-012 (云原生与集成)
# 优先级: 高
# 复杂度: 高

# 用户故事:
# US-062: 作为开发者，我希望支持Docker容器化部署，以便实现应用的标准化打包和部署
# US-063: 作为运维工程师，我希望支持Kubernetes部署配置和Helm Chart管理，以便实现应用的容器编排和管理
# US-064: 作为DevOps工程师，我希望集成CI/CD流水线和GitOps工作流，以便实现自动化的构建、测试和部署流程
# US-065: 作为系统管理员，我希望实现自动扩缩容和服务网格，以便根据负载自动调整资源配置和管理微服务通信

# 验收标准:
# - 支持Docker多阶段构建，生成优化的容器镜像
# - 提供完整的Kubernetes部署配置，包括Deployment、Service、Ingress等
# - 支持Helm Chart管理，简化应用部署和配置管理
# - 集成CI/CD流水线，支持自动化构建、测试、部署
# - 实现HPA和VPA自动扩缩容机制
# - 支持服务网格(Istio)，提供流量管理和安全策略
# - 提供监控告警和日志聚合功能
# - 支持多环境部署和蓝绿/金丝雀发布策略

功能: 云原生部署
  作为DevOps工程师
  我希望系统支持云原生部署
  以便在现代云环境中高效运行和管理应用

  背景:
    假设我是一个DevOps工程师
    并且我需要在云环境中部署应用
    并且我需要支持容器化和微服务架构
    并且我需要实现自动化运维

  # US-062: Docker容器化支持
  场景: Docker容器化支持
    假设我需要将应用容器化
    当我配置Docker部署时:
      """
      # Dockerfile (自动生成)
      FROM node:18-alpine AS builder
      
      WORKDIR /app
      COPY package*.json ./
      RUN npm ci --only=production
      
      COPY . .
      RUN npm run build
      
      FROM node:18-alpine AS runtime
      
      # 安全配置
      RUN addgroup -g 1001 -S nodejs
      RUN adduser -S nextjs -u 1001
      
      WORKDIR /app
      
      # 复制构建产物
      COPY --from=builder --chown=nextjs:nodejs /app/dist ./dist
      COPY --from=builder --chown=nextjs:nodejs /app/node_modules ./node_modules
      COPY --from=builder --chown=nextjs:nodejs /app/package.json ./package.json
      
      # 健康检查
      HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
        CMD curl -f http://localhost:3000/health || exit 1
      
      USER nextjs
      
      EXPOSE 3000
      
      ENV NODE_ENV=production
      ENV PORT=3000
      
      CMD ["npm", "start"]
      
      # docker-compose.yml
      version: '3.8'
      
      services:
        app:
          build:
            context: .
            dockerfile: Dockerfile
            target: runtime
          ports:
            - "3000:3000"
          environment:
            - NODE_ENV=production
            - DATABASE_URL=${DATABASE_URL}
            - REDIS_URL=${REDIS_URL}
          depends_on:
            - postgres
            - redis
          restart: unless-stopped
          
        postgres:
          image: postgres:15-alpine
          environment:
            POSTGRES_DB: myapp
            POSTGRES_USER: ${DB_USER}
            POSTGRES_PASSWORD: ${DB_PASSWORD}
          volumes:
            - postgres_data:/var/lib/postgresql/data
            - ./init.sql:/docker-entrypoint-initdb.d/init.sql
          restart: unless-stopped
          
        redis:
          image: redis:7-alpine
          command: redis-server --appendonly yes
          volumes:
            - redis_data:/data
          restart: unless-stopped
      
      volumes:
        postgres_data:
        redis_data:
      """
    那么应该生成优化的容器镜像:
      """
      {
        "containerBuild": {
          "imageSize": {
            "total": "245MB",
            "layers": {
              "base": "85MB",
              "dependencies": "120MB",
              "application": "40MB"
            }
          },
          "securityScan": {
            "vulnerabilities": {
              "critical": 0,
              "high": 0,
              "medium": 2,
              "low": 5
            },
            "compliance": "passed"
          },
          "performance": {
            "buildTime": "2m 15s",
            "startupTime": "3.2s",
            "memoryUsage": "128MB"
          }
        }
      }
      """
    并且应该支持多阶段构建
    并且应该包含安全配置
    并且应该支持健康检查
    并且应该优化镜像大小

  # US-063: Kubernetes部署配置
  场景: Kubernetes部署配置
    假设我需要在Kubernetes中部署应用
    当我配置K8s资源时:
      """
      # deployment.yaml
      apiVersion: apps/v1
      kind: Deployment
      metadata:
        name: myapp
        namespace: production
        labels:
          app: myapp
          version: v1.0.0
      spec:
        replicas: 3
        strategy:
          type: RollingUpdate
          rollingUpdate:
            maxSurge: 1
            maxUnavailable: 0
        selector:
          matchLabels:
            app: myapp
        template:
          metadata:
            labels:
              app: myapp
              version: v1.0.0
          spec:
            containers:
            - name: myapp
              image: myregistry.com/myapp:v1.0.0
              ports:
              - containerPort: 3000
                name: http
              env:
              - name: NODE_ENV
                value: "production"
              - name: DATABASE_URL
                valueFrom:
                  secretKeyRef:
                    name: myapp-secrets
                    key: database-url
              resources:
                requests:
                  memory: "256Mi"
                  cpu: "250m"
                limits:
                  memory: "512Mi"
                  cpu: "500m"
              livenessProbe:
                httpGet:
                  path: /health
                  port: 3000
                initialDelaySeconds: 30
                periodSeconds: 10
              readinessProbe:
                httpGet:
                  path: /ready
                  port: 3000
                initialDelaySeconds: 5
                periodSeconds: 5
              securityContext:
                runAsNonRoot: true
                runAsUser: 1001
                allowPrivilegeEscalation: false
                readOnlyRootFilesystem: true
            imagePullSecrets:
            - name: registry-secret
      
      ---
      # service.yaml
      apiVersion: v1
      kind: Service
      metadata:
        name: myapp-service
        namespace: production
      spec:
        selector:
          app: myapp
        ports:
        - port: 80
          targetPort: 3000
          protocol: TCP
        type: ClusterIP
      
      ---
      # ingress.yaml
      apiVersion: networking.k8s.io/v1
      kind: Ingress
      metadata:
        name: myapp-ingress
        namespace: production
        annotations:
          kubernetes.io/ingress.class: nginx
          cert-manager.io/cluster-issuer: letsencrypt-prod
          nginx.ingress.kubernetes.io/rate-limit: "100"
      spec:
        tls:
        - hosts:
          - api.myapp.com
          secretName: myapp-tls
        rules:
        - host: api.myapp.com
          http:
            paths:
            - path: /
              pathType: Prefix
              backend:
                service:
                  name: myapp-service
                  port:
                    number: 80
      """
    那么应该部署高可用的应用:
      """
      {
        "kubernetesDeployment": {
          "status": "deployed",
          "replicas": {
            "desired": 3,
            "ready": 3,
            "available": 3
          },
          "resources": {
            "cpu": {
              "requested": "750m",
              "used": "420m",
              "utilization": "56%"
            },
            "memory": {
              "requested": "768Mi",
              "used": "445Mi",
              "utilization": "58%"
            }
          },
          "health": {
            "livenessProbe": "passing",
            "readinessProbe": "passing",
            "overallHealth": "healthy"
          },
          "networking": {
            "service": "myapp-service",
            "ingress": "myapp-ingress",
            "tls": "enabled",
            "loadBalancer": "nginx"
          }
        }
      }
      """
    并且应该支持滚动更新
    并且应该配置资源限制
    并且应该设置健康检查
    并且应该支持自动扩缩容

  # US-063: Helm Chart管理
  场景: Helm Chart管理
    假设我需要使用Helm管理部署
    当我创建Helm Chart时:
      """
      # Chart.yaml
      apiVersion: v2
      name: myapp
      description: A Helm chart for MyApp
      type: application
      version: 0.1.0
      appVersion: "1.0.0"
      
      dependencies:
      - name: postgresql
        version: "11.9.13"
        repository: "https://charts.bitnami.com/bitnami"
        condition: postgresql.enabled
      - name: redis
        version: "17.3.7"
        repository: "https://charts.bitnami.com/bitnami"
        condition: redis.enabled
      
      # values.yaml
      replicaCount: 3
      
      image:
        repository: myregistry.com/myapp
        pullPolicy: IfNotPresent
        tag: ""
      
      service:
        type: ClusterIP
        port: 80
        targetPort: 3000
      
      ingress:
        enabled: true
        className: nginx
        annotations:
          cert-manager.io/cluster-issuer: letsencrypt-prod
        hosts:
        - host: api.myapp.com
          paths:
          - path: /
            pathType: Prefix
        tls:
        - secretName: myapp-tls
          hosts:
          - api.myapp.com
      
      resources:
        limits:
          cpu: 500m
          memory: 512Mi
        requests:
          cpu: 250m
          memory: 256Mi
      
      autoscaling:
        enabled: true
        minReplicas: 2
        maxReplicas: 10
        targetCPUUtilizationPercentage: 70
        targetMemoryUtilizationPercentage: 80
      
      postgresql:
        enabled: true
        auth:
          postgresPassword: "changeme"
          database: "myapp"
        primary:
          persistence:
            enabled: true
            size: 20Gi
      
      redis:
        enabled: true
        auth:
          enabled: false
        master:
          persistence:
            enabled: true
            size: 8Gi
      
      # templates/deployment.yaml
      apiVersion: apps/v1
      kind: Deployment
      metadata:
        name: {{ include "myapp.fullname" . }}
        labels:
          {{- include "myapp.labels" . | nindent 4 }}
      spec:
        {{- if not .Values.autoscaling.enabled }}
        replicas: {{ .Values.replicaCount }}
        {{- end }}
        selector:
          matchLabels:
            {{- include "myapp.selectorLabels" . | nindent 6 }}
        template:
          metadata:
            labels:
              {{- include "myapp.selectorLabels" . | nindent 8 }}
          spec:
            containers:
            - name: {{ .Chart.Name }}
              image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
              imagePullPolicy: {{ .Values.image.pullPolicy }}
              ports:
              - name: http
                containerPort: {{ .Values.service.targetPort }}
                protocol: TCP
              env:
              - name: DATABASE_URL
                value: "postgresql://postgres:{{ .Values.postgresql.auth.postgresPassword }}@{{ include "myapp.fullname" . }}-postgresql:5432/{{ .Values.postgresql.auth.database }}"
              - name: REDIS_URL
                value: "redis://{{ include "myapp.fullname" . }}-redis-master:6379"
              resources:
                {{- toYaml .Values.resources | nindent 12 }}
      """
    那么应该简化部署管理:
      """
      # 部署命令
      helm install myapp ./myapp-chart \
        --namespace production \
        --create-namespace \
        --set image.tag=v1.0.0 \
        --set ingress.hosts[0].host=api.myapp.com
      
      # 升级命令
      helm upgrade myapp ./myapp-chart \
        --namespace production \
        --set image.tag=v1.1.0 \
        --reuse-values
      
      # 回滚命令
      helm rollback myapp 1 --namespace production
      
      {
        "helmDeployment": {
          "release": "myapp",
          "namespace": "production",
          "revision": 3,
          "status": "deployed",
          "chart": "myapp-0.1.0",
          "appVersion": "1.1.0",
          "resources": {
            "deployments": 1,
            "services": 3,
            "ingresses": 1,
            "secrets": 2,
            "configmaps": 1
          },
          "dependencies": {
            "postgresql": "deployed",
            "redis": "deployed"
          }
        }
      }
      """
    并且应该支持依赖管理
    并且应该支持配置模板化
    并且应该支持版本管理
    并且应该支持回滚操作

  # US-064: CI/CD流水线集成
  场景: CI/CD流水线集成
    假设我需要集成CI/CD流水线
    当我配置GitLab CI时:
      """
      # .gitlab-ci.yml
      stages:
        - test
        - build
        - security
        - deploy
      
      variables:
        DOCKER_DRIVER: overlay2
        DOCKER_TLS_CERTDIR: "/certs"
        REGISTRY: $CI_REGISTRY
        IMAGE_NAME: $CI_REGISTRY_IMAGE
        KUBECONFIG: /tmp/kubeconfig
      
      # 测试阶段
      test:unit:
        stage: test
        image: node:18-alpine
        script:
          - npm ci
          - npm run test:unit
          - npm run test:coverage
        coverage: '/Lines\s*:\s*(\d+\.?\d*)%/'
        artifacts:
          reports:
            coverage_report:
              coverage_format: cobertura
              path: coverage/cobertura-coverage.xml
      
      test:integration:
        stage: test
        image: docker:20.10.16
        services:
          - docker:20.10.16-dind
          - postgres:15-alpine
        variables:
          POSTGRES_DB: test_db
          POSTGRES_USER: test_user
          POSTGRES_PASSWORD: test_pass
          DATABASE_URL: postgresql://test_user:test_pass@postgres:5432/test_db
        script:
          - docker-compose -f docker-compose.test.yml up -d
          - docker-compose -f docker-compose.test.yml exec -T app npm run test:integration
        after_script:
          - docker-compose -f docker-compose.test.yml down
      
      # 构建阶段
      build:docker:
        stage: build
        image: docker:20.10.16
        services:
          - docker:20.10.16-dind
        before_script:
          - echo $CI_REGISTRY_PASSWORD | docker login -u $CI_REGISTRY_USER --password-stdin $CI_REGISTRY
        script:
          - docker build -t $IMAGE_NAME:$CI_COMMIT_SHA .
          - docker build -t $IMAGE_NAME:latest .
          - docker push $IMAGE_NAME:$CI_COMMIT_SHA
          - docker push $IMAGE_NAME:latest
        only:
          - main
          - develop
      
      # 安全扫描
      security:container:
        stage: security
        image: aquasec/trivy:latest
        script:
          - trivy image --exit-code 1 --severity HIGH,CRITICAL $IMAGE_NAME:$CI_COMMIT_SHA
        allow_failure: false
        only:
          - main
      
      security:sast:
        stage: security
        image: securecodewarrior/gitlab-sast:latest
        script:
          - gitlab-sast-scan
        artifacts:
          reports:
            sast: gl-sast-report.json
      
      # 部署阶段
      deploy:staging:
        stage: deploy
        image: bitnami/kubectl:latest
        environment:
          name: staging
          url: https://staging-api.myapp.com
        before_script:
          - echo $KUBE_CONFIG_STAGING | base64 -d > $KUBECONFIG
          - kubectl config use-context staging
        script:
          - helm upgrade --install myapp-staging ./helm-chart \
              --namespace staging \
              --create-namespace \
              --set image.tag=$CI_COMMIT_SHA \
              --set ingress.hosts[0].host=staging-api.myapp.com \
              --wait --timeout=300s
        only:
          - develop
      
      deploy:production:
        stage: deploy
        image: bitnami/kubectl:latest
        environment:
          name: production
          url: https://api.myapp.com
        before_script:
          - echo $KUBE_CONFIG_PROD | base64 -d > $KUBECONFIG
          - kubectl config use-context production
        script:
          - helm upgrade --install myapp ./helm-chart \
              --namespace production \
              --create-namespace \
              --set image.tag=$CI_COMMIT_SHA \
              --set ingress.hosts[0].host=api.myapp.com \
              --wait --timeout=600s
        when: manual
        only:
          - main
      """
    那么应该自动化部署流程:
      """
      {
        "cicdPipeline": {
          "pipelineId": "12345",
          "status": "success",
          "stages": {
            "test": {
              "unit": { "status": "passed", "coverage": "87%", "duration": "2m 15s" },
              "integration": { "status": "passed", "duration": "5m 30s" }
            },
            "build": {
              "docker": { 
                "status": "passed", 
                "imageSize": "245MB", 
                "duration": "3m 45s",
                "tags": ["abc123def", "latest"]
              }
            },
            "security": {
              "container": { "status": "passed", "vulnerabilities": 0 },
              "sast": { "status": "passed", "issues": 2 }
            },
            "deploy": {
              "staging": { 
                "status": "deployed", 
                "url": "https://staging-api.myapp.com",
                "duration": "2m 10s"
              },
              "production": { "status": "pending_manual" }
            }
          },
          "totalDuration": "13m 40s",
          "artifacts": {
            "testReports": "available",
            "coverageReport": "available",
            "securityReport": "available",
            "dockerImage": "myregistry.com/myapp:abc123def"
          }
        }
      }
      """
    并且应该支持多环境部署
    并且应该包含安全扫描
    并且应该支持自动回滚
    并且应该生成部署报告

  # US-064: 服务网格集成
  场景: 服务网格集成
    假设我需要集成Istio服务网格
    当我配置服务网格时:
      """
      # istio-gateway.yaml
      apiVersion: networking.istio.io/v1beta1
      kind: Gateway
      metadata:
        name: myapp-gateway
        namespace: production
      spec:
        selector:
          istio: ingressgateway
        servers:
        - port:
            number: 443
            name: https
            protocol: HTTPS
          tls:
            mode: SIMPLE
            credentialName: myapp-tls
          hosts:
          - api.myapp.com
        - port:
            number: 80
            name: http
            protocol: HTTP
          hosts:
          - api.myapp.com
          tls:
            httpsRedirect: true
      
      ---
      # virtual-service.yaml
      apiVersion: networking.istio.io/v1beta1
      kind: VirtualService
      metadata:
        name: myapp-vs
        namespace: production
      spec:
        hosts:
        - api.myapp.com
        gateways:
        - myapp-gateway
        http:
        - match:
          - uri:
              prefix: /api/v2
          route:
          - destination:
              host: myapp-v2-service
              port:
                number: 80
            weight: 100
        - match:
          - uri:
              prefix: /api
          route:
          - destination:
              host: myapp-service
              port:
                number: 80
            weight: 90
          - destination:
              host: myapp-v2-service
              port:
                number: 80
            weight: 10
          fault:
            delay:
              percentage:
                value: 0.1
              fixedDelay: 5s
          retries:
            attempts: 3
            perTryTimeout: 2s
      
      ---
      # destination-rule.yaml
      apiVersion: networking.istio.io/v1beta1
      kind: DestinationRule
      metadata:
        name: myapp-dr
        namespace: production
      spec:
        host: myapp-service
        trafficPolicy:
          connectionPool:
            tcp:
              maxConnections: 100
            http:
              http1MaxPendingRequests: 50
              maxRequestsPerConnection: 10
          loadBalancer:
            simple: LEAST_CONN
          outlierDetection:
            consecutiveErrors: 3
            interval: 30s
            baseEjectionTime: 30s
            maxEjectionPercent: 50
        subsets:
        - name: v1
          labels:
            version: v1
        - name: v2
          labels:
            version: v2
      
      ---
      # peer-authentication.yaml
      apiVersion: security.istio.io/v1beta1
      kind: PeerAuthentication
      metadata:
        name: myapp-pa
        namespace: production
      spec:
        selector:
          matchLabels:
            app: myapp
        mtls:
          mode: STRICT
      
      ---
      # authorization-policy.yaml
      apiVersion: security.istio.io/v1beta1
      kind: AuthorizationPolicy
      metadata:
        name: myapp-authz
        namespace: production
      spec:
        selector:
          matchLabels:
            app: myapp
        rules:
        - from:
          - source:
              principals: ["cluster.local/ns/production/sa/frontend"]
        - to:
          - operation:
              methods: ["GET", "POST"]
              paths: ["/api/*"]
        - when:
          - key: request.headers[authorization]
            values: ["Bearer *"]
      """
    那么应该提供高级流量管理:
      """
      {
        "serviceMesh": {
          "gateway": {
            "status": "active",
            "hosts": ["api.myapp.com"],
            "tls": "enabled",
            "httpsRedirect": true
          },
          "trafficManagement": {
            "canaryDeployment": {
              "v1Weight": 90,
              "v2Weight": 10,
              "successRate": 0.995,
              "errorRate": 0.005
            },
            "loadBalancing": "LEAST_CONN",
            "circuitBreaker": {
              "consecutiveErrors": 3,
              "ejectionRate": 0.15
            },
            "retries": {
              "attempts": 3,
              "timeout": "2s",
              "successRate": 0.98
            }
          },
          "security": {
            "mtls": {
              "mode": "STRICT",
              "certificateRotation": "automatic"
            },
            "authorization": {
              "policies": 3,
              "deniedRequests": 12,
              "allowedRequests": 98567
            }
          },
          "observability": {
            "tracing": "enabled",
            "metrics": "enabled",
            "logging": "enabled",
            "dashboards": ["kiali", "grafana", "jaeger"]
          }
        }
      }
      """
    并且应该支持金丝雀部署
    并且应该提供mTLS安全
    并且应该支持流量策略
    并且应该提供可观测性

  # US-064: 监控和可观测性
  场景: 监控和可观测性
    假设我需要实现全面监控
    当我配置监控系统时:
      """
      # prometheus-config.yaml
      apiVersion: v1
      kind: ConfigMap
      metadata:
        name: prometheus-config
        namespace: monitoring
      data:
        prometheus.yml: |
          global:
            scrape_interval: 15s
            evaluation_interval: 15s
          
          rule_files:
            - "/etc/prometheus/rules/*.yml"
          
          scrape_configs:
          - job_name: 'kubernetes-pods'
            kubernetes_sd_configs:
            - role: pod
            relabel_configs:
            - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape]
              action: keep
              regex: true
            - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path]
              action: replace
              target_label: __metrics_path__
              regex: (.+)
          
          - job_name: 'myapp'
            static_configs:
            - targets: ['myapp-service:80']
            metrics_path: '/metrics'
            scrape_interval: 10s
      
      # grafana-dashboard.json
      {
        "dashboard": {
          "title": "MyApp Metrics",
          "panels": [
            {
              "title": "Request Rate",
              "type": "graph",
              "targets": [
                {
                  "expr": "rate(http_requests_total{job=\"myapp\"}[5m])",
                  "legendFormat": "{{method}} {{status}}"
                }
              ]
            },
            {
              "title": "Response Time",
              "type": "graph",
              "targets": [
                {
                  "expr": "histogram_quantile(0.95, rate(http_request_duration_seconds_bucket{job=\"myapp\"}[5m]))",
                  "legendFormat": "95th percentile"
                }
              ]
            },
            {
              "title": "Error Rate",
              "type": "singlestat",
              "targets": [
                {
                  "expr": "rate(http_requests_total{job=\"myapp\",status=~\"5..\"}[5m]) / rate(http_requests_total{job=\"myapp\"}[5m])"
                }
              ]
            }
          ]
        }
      }
      
      # alerting-rules.yaml
      apiVersion: v1
      kind: ConfigMap
      metadata:
        name: alerting-rules
        namespace: monitoring
      data:
        alerts.yml: |
          groups:
          - name: myapp.rules
            rules:
            - alert: HighErrorRate
              expr: rate(http_requests_total{job="myapp",status=~"5.."}[5m]) / rate(http_requests_total{job="myapp"}[5m]) > 0.05
              for: 5m
              labels:
                severity: critical
              annotations:
                summary: "High error rate detected"
                description: "Error rate is {{ $value | humanizePercentage }} for the last 5 minutes"
            
            - alert: HighResponseTime
              expr: histogram_quantile(0.95, rate(http_request_duration_seconds_bucket{job="myapp"}[5m])) > 2
              for: 10m
              labels:
                severity: warning
              annotations:
                summary: "High response time detected"
                description: "95th percentile response time is {{ $value }}s"
            
            - alert: PodCrashLooping
              expr: rate(kube_pod_container_status_restarts_total{namespace="production",pod=~"myapp-.*"}[15m]) > 0
              for: 5m
              labels:
                severity: critical
              annotations:
                summary: "Pod is crash looping"
                description: "Pod {{ $labels.pod }} is restarting frequently"
      
      # jaeger-tracing.yaml
      apiVersion: jaegertracing.io/v1
      kind: Jaeger
      metadata:
        name: myapp-jaeger
        namespace: monitoring
      spec:
        strategy: production
        storage:
          type: elasticsearch
          elasticsearch:
            nodeCount: 3
            storage:
              size: 50Gi
        collector:
          maxReplicas: 5
          resources:
            limits:
              memory: 512Mi
        query:
          replicas: 2
      """
    那么应该提供全面的可观测性:
      """
      {
        "observability": {
          "metrics": {
            "prometheus": {
              "status": "active",
              "targets": 15,
              "scrapeInterval": "15s",
              "retention": "30d",
              "storageUsage": "12.5GB"
            },
            "grafana": {
              "dashboards": 8,
              "alerts": 12,
              "users": 25,
              "uptime": "99.9%"
            }
          },
          "logging": {
            "elasticsearch": {
              "indices": 45,
              "documents": 15000000,
              "storageUsage": "85GB"
            },
            "kibana": {
              "visualizations": 20,
              "searches": 35
            },
            "logstash": {
              "throughput": "50000 events/sec",
              "pipelines": 3
            }
          },
          "tracing": {
            "jaeger": {
              "traces": 2500000,
              "services": 12,
              "operations": 156,
              "retention": "7d"
            },
            "samplingRate": 0.1
          },
          "alerts": {
            "active": 2,
            "resolved": 45,
            "channels": ["slack", "email", "pagerduty"]
          }
        }
      }
      """
    并且应该支持分布式追踪
    并且应该提供实时告警
    并且应该支持日志聚合
    并且应该提供性能分析

  # US-065: 自动扩缩容
  场景: 自动扩缩容
    假设我需要实现自动扩缩容
    当我配置HPA和VPA时:
      """
      # hpa.yaml
      apiVersion: autoscaling/v2
      kind: HorizontalPodAutoscaler
      metadata:
        name: myapp-hpa
        namespace: production
      spec:
        scaleTargetRef:
          apiVersion: apps/v1
          kind: Deployment
          name: myapp
        minReplicas: 2
        maxReplicas: 20
        metrics:
        - type: Resource
          resource:
            name: cpu
            target:
              type: Utilization
              averageUtilization: 70
        - type: Resource
          resource:
            name: memory
            target:
              type: Utilization
              averageUtilization: 80
        - type: Pods
          pods:
            metric:
              name: http_requests_per_second
            target:
              type: AverageValue
              averageValue: "100"
        behavior:
          scaleUp:
            stabilizationWindowSeconds: 60
            policies:
            - type: Percent
              value: 100
              periodSeconds: 15
            - type: Pods
              value: 2
              periodSeconds: 60
          scaleDown:
            stabilizationWindowSeconds: 300
            policies:
            - type: Percent
              value: 10
              periodSeconds: 60
      
      # vpa.yaml
      apiVersion: autoscaling.k8s.io/v1
      kind: VerticalPodAutoscaler
      metadata:
        name: myapp-vpa
        namespace: production
      spec:
        targetRef:
          apiVersion: apps/v1
          kind: Deployment
          name: myapp
        updatePolicy:
          updateMode: "Auto"
        resourcePolicy:
          containerPolicies:
          - containerName: myapp
            minAllowed:
              cpu: 100m
              memory: 128Mi
            maxAllowed:
              cpu: 2
              memory: 2Gi
            controlledResources: ["cpu", "memory"]
      
      # cluster-autoscaler.yaml
      apiVersion: apps/v1
      kind: Deployment
      metadata:
        name: cluster-autoscaler
        namespace: kube-system
      spec:
        template:
          spec:
            containers:
            - image: k8s.gcr.io/autoscaling/cluster-autoscaler:v1.21.0
              name: cluster-autoscaler
              command:
              - ./cluster-autoscaler
              - --v=4
              - --stderrthreshold=info
              - --cloud-provider=aws
              - --skip-nodes-with-local-storage=false
              - --expander=least-waste
              - --node-group-auto-discovery=asg:tag=k8s.io/cluster-autoscaler/enabled,k8s.io/cluster-autoscaler/myapp-cluster
              - --balance-similar-node-groups
              - --scale-down-enabled=true
              - --scale-down-delay-after-add=10m
              - --scale-down-unneeded-time=10m
      """
    那么应该智能调整资源:
      """
      {
        "autoscaling": {
          "horizontal": {
            "currentReplicas": 5,
            "desiredReplicas": 5,
            "minReplicas": 2,
            "maxReplicas": 20,
            "metrics": {
              "cpu": { "current": "65%", "target": "70%" },
              "memory": { "current": "72%", "target": "80%" },
              "rps": { "current": "85", "target": "100" }
            },
            "lastScaleTime": "2024-01-15T10:15:00Z",
            "scaleEvents": [
              {
                "time": "2024-01-15T10:15:00Z",
                "action": "scale_up",
                "from": 3,
                "to": 5,
                "reason": "cpu utilization above target"
              }
            ]
          },
          "vertical": {
            "recommendations": {
              "cpu": { "target": "350m", "lowerBound": "250m", "upperBound": "500m" },
              "memory": { "target": "384Mi", "lowerBound": "256Mi", "upperBound": "512Mi" }
            },
            "lastUpdate": "2024-01-15T09:30:00Z",
            "updateMode": "Auto"
          },
          "cluster": {
            "nodes": {
              "desired": 6,
              "ready": 6,
              "utilization": {
                "cpu": "68%",
                "memory": "74%"
              }
            },
            "lastScaleEvent": {
              "time": "2024-01-15T09:45:00Z",
              "action": "scale_up",
              "nodeGroup": "worker-nodes",
              "reason": "insufficient resources"
            }
          }
        }
      }
      """
    并且应该支持多维度扩缩容
    并且应该优化资源利用率
    并且应该支持集群级扩缩容
    并且应该提供扩缩容历史