Kubernetes with Ansible
Overview
Ansible provides comprehensive Kubernetes automation including cluster provisioning, application deployment, Helm chart management, Operators, and GitOps workflows.
Prerequisites
pip install kubernetes openshift
ansible-galaxy collection install kubernetes.core community.kubernetes
Complete Application Deployment
Deployment with Service and Ingress
---
- name: Deploy complete application stack
hosts: localhost
vars:
app_name: myapp
namespace: production
replicas: 3
tasks:
- name: Create namespace
kubernetes.core.k8s:
name: "{{ namespace }}"
api_version: v1
kind: Namespace
state: present
- name: Create ConfigMap
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: ConfigMap
metadata:
name: "{{ app_name }}-config"
namespace: "{{ namespace }}"
data:
APP_ENV: production
LOG_LEVEL: info
DATABASE_HOST: postgres.database.svc.cluster.local
- name: Create Secret
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Secret
metadata:
name: "{{ app_name }}-secret"
namespace: "{{ namespace }}"
type: Opaque
stringData:
DATABASE_PASSWORD: "{{ vault_db_password }}"
API_KEY: "{{ vault_api_key }}"
- name: Create Deployment
kubernetes.core.k8s:
state: present
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: "{{ app_name }}"
namespace: "{{ namespace }}"
spec:
replicas: "{{ replicas }}"
selector:
matchLabels:
app: "{{ app_name }}"
template:
metadata:
labels:
app: "{{ app_name }}"
version: v1
spec:
containers:
- name: "{{ app_name }}"
image: "registry.example.com/{{ app_name }}:latest"
ports:
- containerPort: 8080
name: http
envFrom:
- configMapRef:
name: "{{ app_name }}-config"
- secretRef:
name: "{{ app_name }}-secret"
resources:
requests:
memory: "128Mi"
cpu: "100m"
limits:
memory: "256Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: 8080
initialDelaySeconds: 30
periodSeconds: 10
readinessProbe:
httpGet:
path: /ready
port: 8080
initialDelaySeconds: 5
periodSeconds: 5
- name: Create Service
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: "{{ app_name }}"
namespace: "{{ namespace }}"
spec:
selector:
app: "{{ app_name }}"
ports:
- protocol: TCP
port: 80
targetPort: 8080
type: ClusterIP
- name: Create Ingress
kubernetes.core.k8s:
state: present
definition:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: "{{ app_name }}"
namespace: "{{ namespace }}"
annotations:
kubernetes.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
tls:
- hosts:
- "{{ app_domain }}"
secretName: "{{ app_name }}-tls"
rules:
- host: "{{ app_domain }}"
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: "{{ app_name }}"
port:
number: 80
Helm Chart Management
Installing and Managing Helm Charts
---
- name: Manage Helm deployments
hosts: localhost
tasks:
- name: Add Helm repository
kubernetes.core.helm_repository:
name: bitnami
repo_url: https://charts.bitnami.com/bitnami
- name: Deploy PostgreSQL with Helm
kubernetes.core.helm:
name: postgres
chart_ref: bitnami/postgresql
release_namespace: database
create_namespace: yes
values:
auth:
postgresPassword: "{{ vault_postgres_password }}"
database: myapp_db
primary:
persistence:
enabled: true
size: 10Gi
metrics:
enabled: true
- name: Deploy Redis with Helm
kubernetes.core.helm:
name: redis
chart_ref: bitnami/redis
release_namespace: cache
create_namespace: yes
values:
architecture: replication
auth:
password: "{{ vault_redis_password }}"
master:
persistence:
size: 8Gi
replica:
replicaCount: 2
- name: Upgrade Helm release
kubernetes.core.helm:
name: postgres
chart_ref: bitnami/postgresql
release_namespace: database
state: present
update_repo_cache: yes
values:
primary:
persistence:
size: 20Gi
- name: Get Helm release info
kubernetes.core.helm_info:
name: postgres
release_namespace: database
register: helm_status
- name: Rollback Helm release
kubernetes.core.helm:
name: postgres
release_namespace: database
state: present
release_values:
rollback:
enabled: yes
revision: 2
StatefulSets and DaemonSets
StatefulSet for Databases
---
- name: Deploy StatefulSet
hosts: localhost
tasks:
- name: Create StatefulSet for MongoDB
kubernetes.core.k8s:
state: present
definition:
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: mongodb
namespace: database
spec:
serviceName: mongodb
replicas: 3
selector:
matchLabels:
app: mongodb
template:
metadata:
labels:
app: mongodb
spec:
containers:
- name: mongodb
image: mongo:6.0
ports:
- containerPort: 27017
volumeMounts:
- name: mongo-data
mountPath: /data/db
env:
- name: MONGO_INITDB_ROOT_USERNAME
value: admin
- name: MONGO_INITDB_ROOT_PASSWORD
valueFrom:
secretKeyRef:
name: mongodb-secret
key: password
volumeClaimTemplates:
- metadata:
name: mongo-data
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 10Gi
- name: Create Headless Service
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Service
metadata:
name: mongodb
namespace: database
spec:
clusterIP: None
selector:
app: mongodb
ports:
- port: 27017
targetPort: 27017
DaemonSet for Monitoring
---
- name: Deploy monitoring agent on all nodes
hosts: localhost
tasks:
- name: Create DaemonSet
kubernetes.core.k8s:
state: present
definition:
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: node-exporter
namespace: monitoring
spec:
selector:
matchLabels:
app: node-exporter
template:
metadata:
labels:
app: node-exporter
spec:
hostNetwork: true
hostPID: true
containers:
- name: node-exporter
image: prom/node-exporter:latest
ports:
- containerPort: 9100
volumeMounts:
- name: proc
mountPath: /host/proc
readOnly: true
- name: sys
mountPath: /host/sys
readOnly: true
volumes:
- name: proc
hostPath:
path: /proc
- name: sys
hostPath:
path: /sys
Kubernetes Operators
Deploying Operators
---
- name: Deploy Prometheus Operator
hosts: localhost
tasks:
- name: Create Operator namespace
kubernetes.core.k8s:
name: operators
api_version: v1
kind: Namespace
state: present
- name: Deploy Prometheus Operator with Helm
kubernetes.core.helm:
name: prometheus-operator
chart_ref: prometheus-community/kube-prometheus-stack
release_namespace: operators
values:
prometheus:
prometheusSpec:
retention: 30d
storageSpec:
volumeClaimTemplate:
spec:
accessModes: ["ReadWriteOnce"]
resources:
requests:
storage: 50Gi
grafana:
adminPassword: "{{ vault_grafana_password }}"
ingress:
enabled: true
hosts:
- grafana.example.com
- name: Create ServiceMonitor
kubernetes.core.k8s:
state: present
definition:
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: myapp-monitor
namespace: production
spec:
selector:
matchLabels:
app: myapp
endpoints:
- port: metrics
interval: 30s
Custom Resource Definitions (CRDs)
---
- name: Work with Custom Resources
hosts: localhost
tasks:
- name: Create CRD
kubernetes.core.k8s:
state: present
definition:
apiVersion: apiextensions.k8s.io/v1
kind: CustomResourceDefinition
metadata:
name: applications.example.com
spec:
group: example.com
versions:
- name: v1
served: true
storage: true
schema:
openAPIV3Schema:
type: object
properties:
spec:
type: object
properties:
replicas:
type: integer
image:
type: string
scope: Namespaced
names:
plural: applications
singular: application
kind: Application
- name: Create Custom Resource
kubernetes.core.k8s:
state: present
definition:
apiVersion: example.com/v1
kind: Application
metadata:
name: my-custom-app
namespace: production
spec:
replicas: 3
image: myapp:v1.0.0
RBAC and Security
Role-Based Access Control
---
- name: Configure RBAC
hosts: localhost
tasks:
- name: Create ServiceAccount
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: ServiceAccount
metadata:
name: deploy-bot
namespace: production
- name: Create Role
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: deployment-manager
namespace: production
rules:
- apiGroups: ["apps"]
resources: ["deployments", "replicasets"]
verbs: ["get", "list", "watch", "create", "update", "patch"]
- apiGroups: [""]
resources: ["pods", "services"]
verbs: ["get", "list", "watch"]
- name: Create RoleBinding
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: deploy-bot-binding
namespace: production
subjects:
- kind: ServiceAccount
name: deploy-bot
namespace: production
roleRef:
kind: Role
name: deployment-manager
apiGroup: rbac.authorization.k8s.io
- name: Create ClusterRole for monitoring
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: monitoring-reader
rules:
- apiGroups: [""]
resources: ["pods", "services", "endpoints"]
verbs: ["get", "list", "watch"]
- apiGroups: ["apps"]
resources: ["deployments", "statefulsets"]
verbs: ["get", "list", "watch"]
- name: Create ClusterRoleBinding
kubernetes.core.k8s:
state: present
definition:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: monitoring-reader-binding
subjects:
- kind: ServiceAccount
name: prometheus
namespace: monitoring
roleRef:
kind: ClusterRole
name: monitoring-reader
apiGroup: rbac.authorization.k8s.io
GitOps with ArgoCD
Deploying ArgoCD
---
- name: Setup ArgoCD for GitOps
hosts: localhost
tasks:
- name: Create ArgoCD namespace
kubernetes.core.k8s:
name: argocd
api_version: v1
kind: Namespace
state: present
- name: Deploy ArgoCD
kubernetes.core.k8s:
state: present
src: https://raw.githubusercontent.com/argoproj/argo-cd/stable/manifests/install.yaml
namespace: argocd
- name: Create Ingress for ArgoCD
kubernetes.core.k8s:
state: present
definition:
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: argocd-server
namespace: argocd
annotations:
nginx.ingress.kubernetes.io/ssl-redirect: "true"
nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
spec:
ingressClassName: nginx
rules:
- host: argocd.example.com
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: argocd-server
port:
number: 443
- name: Create ArgoCD Application
kubernetes.core.k8s:
state: present
definition:
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: myapp
namespace: argocd
spec:
project: default
source:
repoURL: https://github.com/myorg/myapp-manifests
targetRevision: HEAD
path: kubernetes
destination:
server: https://kubernetes.default.svc
namespace: production
syncPolicy:
automated:
prune: true
selfHeal: true
syncOptions:
- CreateNamespace=true
CI/CD Pipeline Integration
Jenkins Pipeline for Kubernetes Deployment
---
- name: Deploy application via CI/CD
hosts: localhost
vars:
image_tag: "{{ lookup('env', 'BUILD_NUMBER') }}"
namespace: production
tasks:
- name: Build and push Docker image
docker_image:
name: "registry.example.com/myapp"
tag: "{{ image_tag }}"
push: yes
source: build
build:
path: ./app
delegate_to: localhost
- name: Update deployment image
kubernetes.core.k8s:
state: present
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
namespace: "{{ namespace }}"
spec:
template:
spec:
containers:
- name: myapp
image: "registry.example.com/myapp:{{ image_tag }}"
- name: Wait for rollout to complete
kubernetes.core.k8s_info:
api_version: apps/v1
kind: Deployment
name: myapp
namespace: "{{ namespace }}"
register: deployment
until: deployment.resources[0].status.updatedReplicas == deployment.resources[0].spec.replicas
retries: 10
delay: 30
- name: Run smoke tests
uri:
url: "https://{{ app_domain }}/health"
status_code: 200
register: health_check
retries: 5
delay: 10
Cluster Management
Scaling and Updates
---
- name: Manage cluster operations
hosts: localhost
tasks:
- name: Scale deployment
kubernetes.core.k8s_scale:
api_version: apps/v1
kind: Deployment
name: myapp
namespace: production
replicas: 5
- name: Patch deployment with rolling update
kubernetes.core.k8s:
state: present
definition:
apiVersion: apps/v1
kind: Deployment
metadata:
name: myapp
namespace: production
spec:
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
- name: Drain node for maintenance
kubernetes.core.k8s_drain:
name: worker-node-01
delete_emptydir_data: yes
force: yes
ignore_daemonsets: yes
- name: Cordon node
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Node
metadata:
name: worker-node-01
spec:
unschedulable: true
- name: Uncordon node
kubernetes.core.k8s:
state: present
definition:
apiVersion: v1
kind: Node
metadata:
name: worker-node-01
spec:
unschedulable: false
Best Practices Summary
Kubernetes Automation Best Practices
- Namespaces: Use namespaces for logical separation and RBAC
- Resource Limits: Always define resource requests and limits
- Health Checks: Implement liveness and readiness probes
- Secrets: Use Kubernetes Secrets or external secret managers
- Helm: Leverage Helm for complex application deployments
- GitOps: Implement GitOps with ArgoCD or Flux for declarative deployments
- RBAC: Implement least-privilege access with proper RBAC
- Monitoring: Use Prometheus Operator for comprehensive monitoring
- Operators: Leverage Operators for stateful application management
- CI/CD: Integrate with Jenkins, GitLab CI, or GitHub Actions
- Rolling Updates: Use rolling updates with proper health checks
- Backup: Implement regular backups with Velero or similar tools