1. Docker 镜像构建

1.1 多阶段构建策略

# Dockerfile.multi-stage
# 多阶段构建 Dockerfile

# ===== 构建阶段 =====
FROM ghcr.io/graalvm/graalvm-ce:ol8-java17-22.3.0 AS builder

# 安装必要工具
RUN microdnf install -y findutils
RUN gu install native-image

# 设置工作目录
WORKDIR /app

# 复制 Maven 配置文件
COPY pom.xml .
COPY .mvn .mvn
COPY mvnw .

# 下载依赖(利用 Docker 缓存)
RUN ./mvnw dependency:go-offline -B

# 复制源代码
COPY src src

# 编译和打包
RUN ./mvnw clean package -DskipTests -B

# 构建原生镜像
RUN ./mvnw -Pnative native:compile -DskipTests

# ===== 运行阶段 =====
FROM gcr.io/distroless/base-debian11:latest AS runtime

# 设置时区
ENV TZ=Asia/Shanghai

# 创建非 root 用户
USER nonroot:nonroot

# 设置工作目录
WORKDIR /app

# 从构建阶段复制可执行文件
COPY --from=builder /app/target/spring-native-demo /app/spring-native-demo

# 暴露端口
EXPOSE 8080

# 健康检查
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
    CMD ["./spring-native-demo", "--management.endpoint.health.enabled=true"]

# 启动命令
ENTRYPOINT ["./spring-native-demo"]

1.2 优化的 Dockerfile

# Dockerfile.optimized
# 优化的生产环境 Dockerfile

# ===== 基础镜像准备 =====
FROM ghcr.io/graalvm/graalvm-ce:ol8-java17-22.3.0 AS base

# 安装系统依赖
RUN microdnf update -y && \
    microdnf install -y \
        findutils \
        gzip \
        tar \
        which && \
    microdnf clean all

# 安装 GraalVM Native Image
RUN gu install native-image

# ===== 依赖缓存阶段 =====
FROM base AS dependencies

WORKDIR /app

# 复制构建文件
COPY pom.xml .
COPY .mvn .mvn
COPY mvnw .

# 下载依赖
RUN ./mvnw dependency:go-offline -B

# ===== 构建阶段 =====
FROM dependencies AS builder

# 复制源代码
COPY src src

# 编译应用
RUN ./mvnw clean compile -B

# 运行测试
RUN ./mvnw test -B

# 打包应用
RUN ./mvnw package -DskipTests -B

# 构建原生镜像(优化参数)
RUN ./mvnw -Pnative native:compile \
    -Dspring-boot.build-image.builder=paketobuildpacks/builder:tiny \
    -Dspring.native.remove-unused-autoconfig=true \
    -Dspring.native.remove-yaml-support=false

# ===== 最小运行时镜像 =====
FROM gcr.io/distroless/static-debian11:latest AS runtime

# 标签信息
LABEL maintainer="your-email@example.com" \
      version="1.0.0" \
      description="Spring Native Demo Application" \
      org.opencontainers.image.source="https://github.com/your-org/spring-native-demo"

# 环境变量
ENV SPRING_PROFILES_ACTIVE=prod \
    TZ=Asia/Shanghai \
    LANG=C.UTF-8 \
    LC_ALL=C.UTF-8

# 创建应用目录
WORKDIR /app

# 复制可执行文件
COPY --from=builder /app/target/spring-native-demo /app/spring-native-demo

# 设置文件权限
USER 65534:65534

# 暴露端口
EXPOSE 8080

# 健康检查
HEALTHCHECK --interval=30s --timeout=10s --start-period=30s --retries=3 \
    CMD ["/app/spring-native-demo", "--management.endpoint.health.enabled=true"] || exit 1

# 启动命令
ENTRYPOINT ["/app/spring-native-demo"]
CMD ["--server.port=8080"]

1.3 构建脚本

#!/bin/bash
# docker-build.sh - Docker 镜像构建脚本

set -e

# 配置变量
APP_NAME="spring-native-demo"
IMAGE_NAME="spring-native-demo"
IMAGE_TAG="latest"
REGISTRY="your-registry.com"
DOCKERFILE="Dockerfile.optimized"

# 颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'

log_info() {
    echo -e "${BLUE}[INFO]${NC} $1"
}

log_success() {
    echo -e "${GREEN}[SUCCESS]${NC} $1"
}

log_warning() {
    echo -e "${YELLOW}[WARNING]${NC} $1"
}

log_error() {
    echo -e "${RED}[ERROR]${NC} $1"
}

# 检查 Docker 环境
check_docker() {
    log_info "检查 Docker 环境..."
    
    if ! command -v docker &> /dev/null; then
        log_error "Docker 未安装或不在 PATH 中"
        exit 1
    fi
    
    if ! docker info &> /dev/null; then
        log_error "Docker 守护进程未运行"
        exit 1
    fi
    
    log_success "Docker 环境检查通过"
}

# 清理旧镜像
clean_old_images() {
    log_info "清理旧镜像..."
    
    # 删除悬空镜像
    docker image prune -f
    
    # 删除旧版本镜像
    OLD_IMAGES=$(docker images "$IMAGE_NAME" --format "table {{.Repository}}:{{.Tag}}" | grep -v "$IMAGE_TAG" | tail -n +2 || true)
    if [ -n "$OLD_IMAGES" ]; then
        echo "$OLD_IMAGES" | xargs -r docker rmi
        log_success "已清理旧镜像"
    else
        log_info "没有需要清理的旧镜像"
    fi
}

# 构建镜像
build_image() {
    log_info "开始构建 Docker 镜像..."
    
    # 记录开始时间
    START_TIME=$(date +%s)
    
    # 构建镜像
    docker build \
        --file "$DOCKERFILE" \
        --tag "$IMAGE_NAME:$IMAGE_TAG" \
        --tag "$IMAGE_NAME:$(date +%Y%m%d-%H%M%S)" \
        --build-arg BUILD_DATE="$(date -u +'%Y-%m-%dT%H:%M:%SZ')" \
        --build-arg VCS_REF="$(git rev-parse --short HEAD 2>/dev/null || echo 'unknown')" \
        --build-arg VERSION="$IMAGE_TAG" \
        --progress=plain \
        .
    
    # 记录结束时间
    END_TIME=$(date +%s)
    BUILD_TIME=$((END_TIME - START_TIME))
    
    log_success "镜像构建完成,耗时: ${BUILD_TIME}s"
}

# 验证镜像
verify_image() {
    log_info "验证镜像..."
    
    # 检查镜像是否存在
    if ! docker images "$IMAGE_NAME:$IMAGE_TAG" --format "table {{.Repository}}:{{.Tag}}" | grep -q "$IMAGE_NAME:$IMAGE_TAG"; then
        log_error "镜像构建失败"
        exit 1
    fi
    
    # 获取镜像信息
    IMAGE_SIZE=$(docker images "$IMAGE_NAME:$IMAGE_TAG" --format "table {{.Size}}" | tail -n +2)
    IMAGE_ID=$(docker images "$IMAGE_NAME:$IMAGE_TAG" --format "table {{.ID}}" | tail -n +2)
    
    log_success "镜像验证通过"
    log_info "镜像 ID: $IMAGE_ID"
    log_info "镜像大小: $IMAGE_SIZE"
    
    # 检查镜像层
    log_info "镜像层信息:"
    docker history "$IMAGE_NAME:$IMAGE_TAG" --format "table {{.CreatedBy}}\t{{.Size}}" | head -10
}

# 安全扫描
security_scan() {
    log_info "执行安全扫描..."
    
    # 使用 Trivy 扫描(如果可用)
    if command -v trivy &> /dev/null; then
        trivy image "$IMAGE_NAME:$IMAGE_TAG"
    else
        log_warning "Trivy 未安装,跳过安全扫描"
    fi
}

# 测试镜像
test_image() {
    log_info "测试镜像..."
    
    # 启动容器进行测试
    CONTAINER_ID=$(docker run -d -p 8080:8080 "$IMAGE_NAME:$IMAGE_TAG")
    
    # 等待应用启动
    log_info "等待应用启动..."
    sleep 10
    
    # 健康检查
    if curl -f http://localhost:8080/actuator/health &> /dev/null; then
        log_success "健康检查通过"
    else
        log_error "健康检查失败"
        docker logs "$CONTAINER_ID"
        docker stop "$CONTAINER_ID"
        docker rm "$CONTAINER_ID"
        exit 1
    fi
    
    # 清理测试容器
    docker stop "$CONTAINER_ID"
    docker rm "$CONTAINER_ID"
    
    log_success "镜像测试通过"
}

# 推送镜像
push_image() {
    if [ "$1" = "--push" ]; then
        log_info "推送镜像到仓库..."
        
        # 标记镜像
        docker tag "$IMAGE_NAME:$IMAGE_TAG" "$REGISTRY/$IMAGE_NAME:$IMAGE_TAG"
        
        # 推送镜像
        docker push "$REGISTRY/$IMAGE_NAME:$IMAGE_TAG"
        
        log_success "镜像推送完成"
    fi
}

# 生成部署清单
generate_manifests() {
    log_info "生成部署清单..."
    
    mkdir -p deploy
    
    # 生成 docker-compose.yml
    cat > deploy/docker-compose.yml << EOF
version: '3.8'

services:
  spring-native-demo:
    image: $IMAGE_NAME:$IMAGE_TAG
    container_name: spring-native-demo
    ports:
      - "8080:8080"
    environment:
      - SPRING_PROFILES_ACTIVE=prod
      - TZ=Asia/Shanghai
    healthcheck:
      test: ["CMD", "./spring-native-demo", "--management.endpoint.health.enabled=true"]
      interval: 30s
      timeout: 10s
      retries: 3
      start_period: 30s
    restart: unless-stopped
    networks:
      - app-network
    logging:
      driver: "json-file"
      options:
        max-size: "10m"
        max-file: "3"

networks:
  app-network:
    driver: bridge
EOF
    
    # 生成 Kubernetes 部署清单
    cat > deploy/k8s-deployment.yaml << EOF
apiVersion: apps/v1
kind: Deployment
metadata:
  name: spring-native-demo
  labels:
    app: spring-native-demo
spec:
  replicas: 3
  selector:
    matchLabels:
      app: spring-native-demo
  template:
    metadata:
      labels:
        app: spring-native-demo
    spec:
      containers:
      - name: spring-native-demo
        image: $IMAGE_NAME:$IMAGE_TAG
        ports:
        - containerPort: 8080
        env:
        - name: SPRING_PROFILES_ACTIVE
          value: "prod"
        - name: TZ
          value: "Asia/Shanghai"
        resources:
          requests:
            memory: "64Mi"
            cpu: "100m"
          limits:
            memory: "128Mi"
            cpu: "500m"
        livenessProbe:
          httpGet:
            path: /actuator/health
            port: 8080
          initialDelaySeconds: 30
          periodSeconds: 30
        readinessProbe:
          httpGet:
            path: /actuator/health
            port: 8080
          initialDelaySeconds: 5
          periodSeconds: 5
---
apiVersion: v1
kind: Service
metadata:
  name: spring-native-demo-service
spec:
  selector:
    app: spring-native-demo
  ports:
  - protocol: TCP
    port: 80
    targetPort: 8080
  type: LoadBalancer
EOF
    
    log_success "部署清单已生成到 deploy/ 目录"
}

# 生成构建报告
generate_report() {
    log_info "生成构建报告..."
    
    REPORT_FILE="build-report-$(date +%Y%m%d-%H%M%S).txt"
    
    cat > "$REPORT_FILE" << EOF
=== Docker 镜像构建报告 ===
构建时间: $(date)
镜像名称: $IMAGE_NAME:$IMAGE_TAG
镜像 ID: $(docker images "$IMAGE_NAME:$IMAGE_TAG" --format "{{.ID}}")
镜像大小: $(docker images "$IMAGE_NAME:$IMAGE_TAG" --format "{{.Size}}")

=== 构建配置 ===
Dockerfile: $DOCKERFILE
基础镜像: ghcr.io/graalvm/graalvm-ce:ol8-java17-22.3.0
运行时镜像: gcr.io/distroless/static-debian11:latest

=== 镜像层信息 ===
$(docker history "$IMAGE_NAME:$IMAGE_TAG" --format "table {{.CreatedBy}}\t{{.Size}}")

=== 系统信息 ===
Docker 版本: $(docker --version)
构建主机: $(hostname)
操作系统: $(uname -a)
EOF
    
    log_success "构建报告已生成: $REPORT_FILE"
}

# 主函数
main() {
    log_info "开始 Docker 镜像构建流程"
    
    check_docker
    clean_old_images
    build_image
    verify_image
    security_scan
    test_image
    push_image "$@"
    generate_manifests
    generate_report
    
    log_success "Docker 镜像构建完成!"
    log_info "镜像名称: $IMAGE_NAME:$IMAGE_TAG"
    log_info "运行命令: docker run -p 8080:8080 $IMAGE_NAME:$IMAGE_TAG"
}

# 错误处理
trap 'log_error "构建过程中发生错误,退出码: $?"' ERR

# 执行主函数
main "$@"

2. Kubernetes 部署

2.1 完整的 K8s 部署清单

# k8s-complete.yaml
# 完整的 Kubernetes 部署清单

# Namespace
apiVersion: v1
kind: Namespace
metadata:
  name: spring-native
  labels:
    name: spring-native

---
# ConfigMap
apiVersion: v1
kind: ConfigMap
metadata:
  name: spring-native-config
  namespace: spring-native
data:
  application.yml: |
    spring:
      application:
        name: spring-native-demo
      profiles:
        active: prod
      datasource:
        url: jdbc:h2:mem:testdb
        driver-class-name: org.h2.Driver
        username: sa
        password: 
      jpa:
        hibernate:
          ddl-auto: create-drop
        show-sql: false
    
    management:
      endpoints:
        web:
          exposure:
            include: health,info,metrics,prometheus
      endpoint:
        health:
          show-details: always
    
    logging:
      level:
        com.example.demo: INFO
        org.springframework: WARN
      pattern:
        console: "%d{HH:mm:ss.SSS} [%thread] %-5level %logger{36} - %msg%n"

---
# Secret
apiVersion: v1
kind: Secret
metadata:
  name: spring-native-secret
  namespace: spring-native
type: Opaque
data:
  # base64 encoded values
  database-password: cGFzc3dvcmQ=  # password
  api-key: YWJjZGVmZ2hpams=  # abcdefghijk

---
# Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
  name: spring-native-demo
  namespace: spring-native
  labels:
    app: spring-native-demo
    version: v1
spec:
  replicas: 3
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 1
  selector:
    matchLabels:
      app: spring-native-demo
  template:
    metadata:
      labels:
        app: spring-native-demo
        version: v1
      annotations:
        prometheus.io/scrape: "true"
        prometheus.io/port: "8080"
        prometheus.io/path: "/actuator/prometheus"
    spec:
      serviceAccountName: spring-native-sa
      securityContext:
        runAsNonRoot: true
        runAsUser: 65534
        fsGroup: 65534
      containers:
      - name: spring-native-demo
        image: spring-native-demo:latest
        imagePullPolicy: Always
        ports:
        - name: http
          containerPort: 8080
          protocol: TCP
        env:
        - name: SPRING_PROFILES_ACTIVE
          value: "prod"
        - name: TZ
          value: "Asia/Shanghai"
        - name: JAVA_OPTS
          value: "-XX:+UseContainerSupport -XX:MaxRAMPercentage=75.0"
        - name: DATABASE_PASSWORD
          valueFrom:
            secretKeyRef:
              name: spring-native-secret
              key: database-password
        - name: API_KEY
          valueFrom:
            secretKeyRef:
              name: spring-native-secret
              key: api-key
        resources:
          requests:
            memory: "64Mi"
            cpu: "100m"
            ephemeral-storage: "1Gi"
          limits:
            memory: "128Mi"
            cpu: "500m"
            ephemeral-storage: "2Gi"
        livenessProbe:
          httpGet:
            path: /actuator/health/liveness
            port: http
          initialDelaySeconds: 30
          periodSeconds: 30
          timeoutSeconds: 5
          failureThreshold: 3
        readinessProbe:
          httpGet:
            path: /actuator/health/readiness
            port: http
          initialDelaySeconds: 5
          periodSeconds: 5
          timeoutSeconds: 3
          failureThreshold: 3
        startupProbe:
          httpGet:
            path: /actuator/health
            port: http
          initialDelaySeconds: 10
          periodSeconds: 10
          timeoutSeconds: 3
          failureThreshold: 30
        volumeMounts:
        - name: config-volume
          mountPath: /app/config
          readOnly: true
        - name: tmp-volume
          mountPath: /tmp
        securityContext:
          allowPrivilegeEscalation: false
          readOnlyRootFilesystem: true
          capabilities:
            drop:
            - ALL
      volumes:
      - name: config-volume
        configMap:
          name: spring-native-config
      - name: tmp-volume
        emptyDir: {}
      restartPolicy: Always
      terminationGracePeriodSeconds: 30
      dnsPolicy: ClusterFirst
      affinity:
        podAntiAffinity:
          preferredDuringSchedulingIgnoredDuringExecution:
          - weight: 100
            podAffinityTerm:
              labelSelector:
                matchExpressions:
                - key: app
                  operator: In
                  values:
                  - spring-native-demo
              topologyKey: kubernetes.io/hostname

---
# Service
apiVersion: v1
kind: Service
metadata:
  name: spring-native-service
  namespace: spring-native
  labels:
    app: spring-native-demo
spec:
  type: ClusterIP
  ports:
  - port: 80
    targetPort: http
    protocol: TCP
    name: http
  selector:
    app: spring-native-demo

---
# Ingress
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
  name: spring-native-ingress
  namespace: spring-native
  annotations:
    kubernetes.io/ingress.class: nginx
    nginx.ingress.kubernetes.io/rewrite-target: /
    nginx.ingress.kubernetes.io/ssl-redirect: "true"
    nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
    cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
  tls:
  - hosts:
    - spring-native.example.com
    secretName: spring-native-tls
  rules:
  - host: spring-native.example.com
    http:
      paths:
      - path: /
        pathType: Prefix
        backend:
          service:
            name: spring-native-service
            port:
              number: 80

---
# ServiceAccount
apiVersion: v1
kind: ServiceAccount
metadata:
  name: spring-native-sa
  namespace: spring-native

---
# HorizontalPodAutoscaler
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
  name: spring-native-hpa
  namespace: spring-native
spec:
  scaleTargetRef:
    apiVersion: apps/v1
    kind: Deployment
    name: spring-native-demo
  minReplicas: 2
  maxReplicas: 10
  metrics:
  - type: Resource
    resource:
      name: cpu
      target:
        type: Utilization
        averageUtilization: 70
  - type: Resource
    resource:
      name: memory
      target:
        type: Utilization
        averageUtilization: 80
  behavior:
    scaleDown:
      stabilizationWindowSeconds: 300
      policies:
      - type: Percent
        value: 10
        periodSeconds: 60
    scaleUp:
      stabilizationWindowSeconds: 60
      policies:
      - type: Percent
        value: 50
        periodSeconds: 60
      - type: Pods
        value: 2
        periodSeconds: 60

---
# PodDisruptionBudget
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
  name: spring-native-pdb
  namespace: spring-native
spec:
  minAvailable: 1
  selector:
    matchLabels:
      app: spring-native-demo

---
# NetworkPolicy
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
  name: spring-native-netpol
  namespace: spring-native
spec:
  podSelector:
    matchLabels:
      app: spring-native-demo
  policyTypes:
  - Ingress
  - Egress
  ingress:
  - from:
    - namespaceSelector:
        matchLabels:
          name: ingress-nginx
    - namespaceSelector:
        matchLabels:
          name: monitoring
    ports:
    - protocol: TCP
      port: 8080
  egress:
  - to: []
    ports:
    - protocol: TCP
      port: 53
    - protocol: UDP
      port: 53
  - to:
    - namespaceSelector:
        matchLabels:
          name: kube-system

2.2 Helm Chart

# helm/Chart.yaml
apiVersion: v2
name: spring-native-demo
description: A Helm chart for Spring Native Demo application
type: application
version: 0.1.0
appVersion: "1.0.0"
keywords:
  - spring
  - native
  - graalvm
  - microservice
home: https://github.com/your-org/spring-native-demo
sources:
  - https://github.com/your-org/spring-native-demo
maintainers:
  - name: Your Name
    email: your-email@example.com
# helm/values.yaml
# Default values for spring-native-demo
replicaCount: 3

image:
  repository: spring-native-demo
  pullPolicy: Always
  tag: "latest"

imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""

serviceAccount:
  create: true
  annotations: {}
  name: ""

podAnnotations:
  prometheus.io/scrape: "true"
  prometheus.io/port: "8080"
  prometheus.io/path: "/actuator/prometheus"

podSecurityContext:
  runAsNonRoot: true
  runAsUser: 65534
  fsGroup: 65534

securityContext:
  allowPrivilegeEscalation: false
  readOnlyRootFilesystem: true
  capabilities:
    drop:
    - ALL

service:
  type: ClusterIP
  port: 80
  targetPort: 8080

ingress:
  enabled: true
  className: "nginx"
  annotations:
    nginx.ingress.kubernetes.io/rewrite-target: /
    nginx.ingress.kubernetes.io/ssl-redirect: "true"
    cert-manager.io/cluster-issuer: letsencrypt-prod
  hosts:
    - host: spring-native.example.com
      paths:
        - path: /
          pathType: Prefix
  tls:
    - secretName: spring-native-tls
      hosts:
        - spring-native.example.com

resources:
  limits:
    cpu: 500m
    memory: 128Mi
    ephemeral-storage: 2Gi
  requests:
    cpu: 100m
    memory: 64Mi
    ephemeral-storage: 1Gi

autoscaling:
  enabled: true
  minReplicas: 2
  maxReplicas: 10
  targetCPUUtilizationPercentage: 70
  targetMemoryUtilizationPercentage: 80

nodeSelector: {}

tolerations: []

affinity:
  podAntiAffinity:
    preferredDuringSchedulingIgnoredDuringExecution:
    - weight: 100
      podAffinityTerm:
        labelSelector:
          matchExpressions:
          - key: app.kubernetes.io/name
            operator: In
            values:
            - spring-native-demo
        topologyKey: kubernetes.io/hostname

# Application configuration
config:
  spring:
    profiles:
      active: prod
    datasource:
      url: jdbc:h2:mem:testdb
      username: sa
      password: ""
  management:
    endpoints:
      web:
        exposure:
          include: health,info,metrics,prometheus
    endpoint:
      health:
        show-details: always
  logging:
    level:
      com.example.demo: INFO
      org.springframework: WARN

# Secrets
secrets:
  databasePassword: "password"
  apiKey: "abcdefghijk"

# Probes
probes:
  liveness:
    httpGet:
      path: /actuator/health/liveness
      port: http
    initialDelaySeconds: 30
    periodSeconds: 30
    timeoutSeconds: 5
    failureThreshold: 3
  readiness:
    httpGet:
      path: /actuator/health/readiness
      port: http
    initialDelaySeconds: 5
    periodSeconds: 5
    timeoutSeconds: 3
    failureThreshold: 3
  startup:
    httpGet:
      path: /actuator/health
      port: http
    initialDelaySeconds: 10
    periodSeconds: 10
    timeoutSeconds: 3
    failureThreshold: 30

# Pod Disruption Budget
podDisruptionBudget:
  enabled: true
  minAvailable: 1

# Network Policy
networkPolicy:
  enabled: true
  ingress:
    - from:
      - namespaceSelector:
          matchLabels:
            name: ingress-nginx
      - namespaceSelector:
          matchLabels:
            name: monitoring
      ports:
      - protocol: TCP
        port: 8080
  egress:
    - to: []
      ports:
      - protocol: TCP
        port: 53
      - protocol: UDP
        port: 53
# helm/templates/deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
  name: {{ include "spring-native-demo.fullname" . }}
  labels:
    {{- include "spring-native-demo.labels" . | nindent 4 }}
spec:
  {{- if not .Values.autoscaling.enabled }}
  replicas: {{ .Values.replicaCount }}
  {{- end }}
  strategy:
    type: RollingUpdate
    rollingUpdate:
      maxSurge: 1
      maxUnavailable: 1
  selector:
    matchLabels:
      {{- include "spring-native-demo.selectorLabels" . | nindent 6 }}
  template:
    metadata:
      {{- with .Values.podAnnotations }}
      annotations:
        {{- toYaml . | nindent 8 }}
      {{- end }}
      labels:
        {{- include "spring-native-demo.selectorLabels" . | nindent 8 }}
    spec:
      {{- with .Values.imagePullSecrets }}
      imagePullSecrets:
        {{- toYaml . | nindent 8 }}
      {{- end }}
      serviceAccountName: {{ include "spring-native-demo.serviceAccountName" . }}
      securityContext:
        {{- toYaml .Values.podSecurityContext | nindent 8 }}
      containers:
        - name: {{ .Chart.Name }}
          securityContext:
            {{- toYaml .Values.securityContext | nindent 12 }}
          image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
          imagePullPolicy: {{ .Values.image.pullPolicy }}
          ports:
            - name: http
              containerPort: 8080
              protocol: TCP
          env:
            - name: SPRING_PROFILES_ACTIVE
              value: {{ .Values.config.spring.profiles.active }}
            - name: TZ
              value: "Asia/Shanghai"
            - name: DATABASE_PASSWORD
              valueFrom:
                secretKeyRef:
                  name: {{ include "spring-native-demo.fullname" . }}-secret
                  key: database-password
            - name: API_KEY
              valueFrom:
                secretKeyRef:
                  name: {{ include "spring-native-demo.fullname" . }}-secret
                  key: api-key
          livenessProbe:
            {{- toYaml .Values.probes.liveness | nindent 12 }}
          readinessProbe:
            {{- toYaml .Values.probes.readiness | nindent 12 }}
          startupProbe:
            {{- toYaml .Values.probes.startup | nindent 12 }}
          resources:
            {{- toYaml .Values.resources | nindent 12 }}
          volumeMounts:
            - name: config-volume
              mountPath: /app/config
              readOnly: true
            - name: tmp-volume
              mountPath: /tmp
      volumes:
        - name: config-volume
          configMap:
            name: {{ include "spring-native-demo.fullname" . }}-config
        - name: tmp-volume
          emptyDir: {}
      {{- with .Values.nodeSelector }}
      nodeSelector:
        {{- toYaml . | nindent 8 }}
      {{- end }}
      {{- with .Values.affinity }}
      affinity:
        {{- toYaml . | nindent 8 }}
      {{- end }}
      {{- with .Values.tolerations }}
      tolerations:
        {{- toYaml . | nindent 8 }}
      {{- end }}

2.3 部署脚本

#!/bin/bash
# k8s-deploy.sh - Kubernetes 部署脚本

set -e

# 配置变量
NAMESPACE="spring-native"
APP_NAME="spring-native-demo"
IMAGE_TAG="latest"
HELM_RELEASE="spring-native-demo"
KUBECONFIG="~/.kube/config"

# 颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m'

log_info() {
    echo -e "${BLUE}[INFO]${NC} $1"
}

log_success() {
    echo -e "${GREEN}[SUCCESS]${NC} $1"
}

log_warning() {
    echo -e "${YELLOW}[WARNING]${NC} $1"
}

log_error() {
    echo -e "${RED}[ERROR]${NC} $1"
}

# 检查环境
check_environment() {
    log_info "检查部署环境..."
    
    # 检查 kubectl
    if ! command -v kubectl &> /dev/null; then
        log_error "kubectl 未安装或不在 PATH 中"
        exit 1
    fi
    
    # 检查 helm
    if ! command -v helm &> /dev/null; then
        log_error "helm 未安装或不在 PATH 中"
        exit 1
    fi
    
    # 检查集群连接
    if ! kubectl cluster-info &> /dev/null; then
        log_error "无法连接到 Kubernetes 集群"
        exit 1
    fi
    
    log_success "环境检查通过"
}

# 创建命名空间
create_namespace() {
    log_info "创建命名空间..."
    
    if kubectl get namespace "$NAMESPACE" &> /dev/null; then
        log_info "命名空间 $NAMESPACE 已存在"
    else
        kubectl create namespace "$NAMESPACE"
        kubectl label namespace "$NAMESPACE" name="$NAMESPACE"
        log_success "命名空间 $NAMESPACE 创建成功"
    fi
}

# 部署应用
deploy_application() {
    log_info "部署应用..."
    
    if [ -d "helm" ]; then
        # 使用 Helm 部署
        log_info "使用 Helm 部署应用..."
        
        helm upgrade --install "$HELM_RELEASE" ./helm \
            --namespace "$NAMESPACE" \
            --set image.tag="$IMAGE_TAG" \
            --set ingress.hosts[0].host="spring-native.example.com" \
            --wait --timeout=300s
            
        log_success "Helm 部署完成"
    else
        # 使用 kubectl 部署
        log_info "使用 kubectl 部署应用..."
        
        if [ -f "k8s-complete.yaml" ]; then
            kubectl apply -f k8s-complete.yaml
            log_success "kubectl 部署完成"
        else
            log_error "未找到部署清单文件"
            exit 1
        fi
    fi
}

# 等待部署完成
wait_for_deployment() {
    log_info "等待部署完成..."
    
    kubectl wait --for=condition=available --timeout=300s \
        deployment/"$APP_NAME" -n "$NAMESPACE"
        
    log_success "部署已就绪"
}

# 验证部署
verify_deployment() {
    log_info "验证部署..."
    
    # 检查 Pod 状态
    PODS=$(kubectl get pods -n "$NAMESPACE" -l app="$APP_NAME" --no-headers)
    if [ -z "$PODS" ]; then
        log_error "未找到应用 Pod"
        exit 1
    fi
    
    echo "Pod 状态:"
    kubectl get pods -n "$NAMESPACE" -l app="$APP_NAME"
    
    # 检查服务状态
    echo "\n服务状态:"
    kubectl get services -n "$NAMESPACE"
    
    # 检查 Ingress 状态
    echo "\nIngress 状态:"
    kubectl get ingress -n "$NAMESPACE"
    
    # 健康检查
    log_info "执行健康检查..."
    
    # 端口转发进行测试
    kubectl port-forward -n "$NAMESPACE" service/spring-native-service 8080:80 &
    PORT_FORWARD_PID=$!
    
    sleep 5
    
    if curl -f http://localhost:8080/actuator/health &> /dev/null; then
        log_success "健康检查通过"
    else
        log_error "健康检查失败"
        kill $PORT_FORWARD_PID
        exit 1
    fi
    
    kill $PORT_FORWARD_PID
    
    log_success "部署验证完成"
}

# 显示部署信息
show_deployment_info() {
    log_info "部署信息:"
    
    echo "命名空间: $NAMESPACE"
    echo "应用名称: $APP_NAME"
    echo "镜像标签: $IMAGE_TAG"
    
    # 获取外部访问地址
    INGRESS_IP=$(kubectl get ingress -n "$NAMESPACE" -o jsonpath='{.items[0].status.loadBalancer.ingress[0].ip}' 2>/dev/null || echo "未配置")
    INGRESS_HOST=$(kubectl get ingress -n "$NAMESPACE" -o jsonpath='{.items[0].spec.rules[0].host}' 2>/dev/null || echo "未配置")
    
    echo "访问地址: http://$INGRESS_HOST (IP: $INGRESS_IP)"
    
    # 显示有用的命令
    echo "\n有用的命令:"
    echo "查看 Pod 日志: kubectl logs -f deployment/$APP_NAME -n $NAMESPACE"
    echo "查看 Pod 状态: kubectl get pods -n $NAMESPACE -l app=$APP_NAME"
    echo "端口转发: kubectl port-forward -n $NAMESPACE service/spring-native-service 8080:80"
    echo "删除部署: kubectl delete namespace $NAMESPACE"
}

# 回滚部署
rollback_deployment() {
    if [ "$1" = "--rollback" ]; then
        log_info "回滚部署..."
        
        if [ -d "helm" ]; then
            helm rollback "$HELM_RELEASE" -n "$NAMESPACE"
        else
            kubectl rollout undo deployment/"$APP_NAME" -n "$NAMESPACE"
        fi
        
        log_success "回滚完成"
    fi
}

# 清理部署
cleanup_deployment() {
    if [ "$1" = "--cleanup" ]; then
        log_info "清理部署..."
        
        if [ -d "helm" ]; then
            helm uninstall "$HELM_RELEASE" -n "$NAMESPACE"
        fi
        
        kubectl delete namespace "$NAMESPACE" --ignore-not-found=true
        
        log_success "清理完成"
    fi
}

# 主函数
main() {
    log_info "开始 Kubernetes 部署流程"
    
    # 处理特殊参数
    if [ "$1" = "--rollback" ]; then
        rollback_deployment "$1"
        return
    fi
    
    if [ "$1" = "--cleanup" ]; then
        cleanup_deployment "$1"
        return
    fi
    
    check_environment
    create_namespace
    deploy_application
    wait_for_deployment
    verify_deployment
    show_deployment_info
    
    log_success "Kubernetes 部署完成!"
}

# 错误处理
trap 'log_error "部署过程中发生错误,退出码: $?"' ERR

# 执行主函数
main "$@"

通过本章的学习,你已经掌握了 Spring Native 应用的容器化部署方法。接下来我们将学习生产环境的最佳实践和故障排除。