Cloud Automation (AWS, Azure, GCP)
Overview
Ansible provides comprehensive modules for managing resources across major cloud providers including AWS, Microsoft Azure, and Google Cloud Platform.
AWS Automation
Prerequisites
pip install boto3 botocore
ansible-galaxy collection install amazon.aws
AWS Credentials
# Method 1: Environment variables
export AWS_ACCESS_KEY_ID=your_key
export AWS_SECRET_ACCESS_KEY=your_secret
export AWS_REGION=us-east-1
# Method 2: In playbook variables (use Vault!)
aws_access_key: "{{ vault_aws_access_key }}"
aws_secret_key: "{{ vault_aws_secret_key }}"
EC2 Instance Management
---
- name: Manage EC2 instances
hosts: localhost
gather_facts: no
tasks:
- name: Launch EC2 instance
amazon.aws.ec2_instance:
name: web-server-01
key_name: mykey
instance_type: t3.micro
image_id: ami-0c55b159cbfafe1f0 # Ubuntu 20.04
region: us-east-1
vpc_subnet_id: subnet-12345678
security_group: web-sg
network:
assign_public_ip: yes
tags:
Environment: production
Role: webserver
wait: yes
state: running
register: ec2
- name: Add instance to inventory
add_host:
name: "{{ ec2.instances[0].public_ip_address }}"
groups: launched
S3 Bucket Management
- name: Create S3 bucket
amazon.aws.s3_bucket:
name: my-app-bucket-12345
region: us-east-1
versioning: yes
encryption: AES256
public_access:
block_public_acls: yes
block_public_policy: yes
tags:
Environment: production
- name: Upload files to S3
amazon.aws.aws_s3:
bucket: my-app-bucket-12345
object: /backups/db-backup.sql
src: /tmp/db-backup.sql
mode: put
encrypt: yes
RDS Database
- name: Create RDS instance
amazon.aws.rds_instance:
id: my-database
state: present
engine: postgres
engine_version: "14.7"
instance_type: db.t3.micro
storage_size: 20
master_username: admin
master_user_password: "{{ db_password }}"
db_name: myapp
vpc_security_group_ids:
- sg-12345678
publicly_accessible: no
backup_retention_period: 7
tags:
Environment: production
Azure Automation
Prerequisites
pip install azure-cli
ansible-galaxy collection install azure.azcollection
pip install -r ~/.ansible/collections/ansible_collections/azure/azcollection/requirements-azure.txt
Azure Authentication
# Login via CLI
az login
# Or use service principal in playbook
azure_subscription_id: "{{ vault_azure_subscription_id }}"
azure_client_id: "{{ vault_azure_client_id }}"
azure_secret: "{{ vault_azure_secret }}"
azure_tenant: "{{ vault_azure_tenant }}"
Virtual Machine
---
- name: Create Azure VM
hosts: localhost
tasks:
- name: Create resource group
azure.azcollection.azure_rm_resourcegroup:
name: myResourceGroup
location: eastus
- name: Create virtual network
azure.azcollection.azure_rm_virtualnetwork:
resource_group: myResourceGroup
name: myVNet
address_prefixes: "10.0.0.0/16"
- name: Create subnet
azure.azcollection.azure_rm_subnet:
resource_group: myResourceGroup
name: mySubnet
address_prefix: "10.0.1.0/24"
virtual_network: myVNet
- name: Create public IP
azure.azcollection.azure_rm_publicipaddress:
resource_group: myResourceGroup
name: myPublicIP
allocation_method: Static
- name: Create VM
azure.azcollection.azure_rm_virtualmachine:
resource_group: myResourceGroup
name: myVM
vm_size: Standard_B1s
admin_username: azureuser
ssh_password_enabled: false
ssh_public_keys:
- path: /home/azureuser/.ssh/authorized_keys
key_data: "{{ ssh_public_key }}"
network_interfaces: myNIC
image:
offer: UbuntuServer
publisher: Canonical
sku: '20.04-LTS'
version: latest
Google Cloud Platform
Prerequisites
pip install google-auth
ansible-galaxy collection install google.cloud
GCP Compute Instance
---
- name: Create GCP instance
hosts: localhost
tasks:
- name: Create instance
google.cloud.gcp_compute_instance:
name: web-server
machine_type: n1-standard-1
zone: us-central1-a
project: my-project-id
auth_kind: serviceaccount
service_account_file: /path/to/credentials.json
disks:
- auto_delete: yes
boot: yes
initialize_params:
source_image: projects/ubuntu-os-cloud/global/images/family/ubuntu-2004-lts
network_interfaces:
- network: default
access_configs:
- name: External NAT
type: ONE_TO_ONE_NAT
tags:
items:
- http-server
- https-server
state: present
Multi-Cloud Deployment Example
---
- name: Deploy application across clouds
hosts: localhost
vars:
app_name: myapp
environment: production
tasks:
# AWS
- name: Deploy to AWS
block:
- name: Launch EC2 instance
amazon.aws.ec2_instance:
name: "{{ app_name }}-aws"
instance_type: t3.micro
image_id: ami-0c55b159cbfafe1f0
region: us-east-1
tags:
App: "{{ app_name }}"
Cloud: AWS
register: aws_instance
when: deploy_aws | default(true)
# Azure
- name: Deploy to Azure
block:
- name: Create Azure VM
azure.azcollection.azure_rm_virtualmachine:
name: "{{ app_name }}-azure"
resource_group: myResourceGroup
vm_size: Standard_B1s
tags:
App: "{{ app_name }}"
Cloud: Azure
register: azure_instance
when: deploy_azure | default(true)
# GCP
- name: Deploy to GCP
block:
- name: Create GCP instance
google.cloud.gcp_compute_instance:
name: "{{ app_name }}-gcp"
machine_type: n1-standard-1
zone: us-central1-a
tags:
items:
- "{{ app_name }}"
- gcp
register: gcp_instance
when: deploy_gcp | default(true)
Advanced AWS Automation
VPC and Networking
---
- name: Create complete AWS VPC infrastructure
hosts: localhost
gather_facts: no
vars:
vpc_cidr: 10.0.0.0/16
region: us-east-1
tasks:
- name: Create VPC
amazon.aws.ec2_vpc_net:
name: production-vpc
cidr_block: "{{ vpc_cidr }}"
region: "{{ region }}"
dns_hostnames: yes
dns_support: yes
tags:
Environment: production
register: vpc
- name: Create Internet Gateway
amazon.aws.ec2_vpc_igw:
vpc_id: "{{ vpc.vpc.id }}"
region: "{{ region }}"
tags:
Name: production-igw
register: igw
- name: Create public subnet
amazon.aws.ec2_vpc_subnet:
vpc_id: "{{ vpc.vpc.id }}"
cidr: 10.0.1.0/24
az: "{{ region }}a"
tags:
Name: public-subnet-a
Type: public
register: public_subnet
- name: Create private subnet
amazon.aws.ec2_vpc_subnet:
vpc_id: "{{ vpc.vpc.id }}"
cidr: 10.0.10.0/24
az: "{{ region }}a"
tags:
Name: private-subnet-a
Type: private
register: private_subnet
- name: Create NAT Gateway EIP
amazon.aws.ec2_eip:
region: "{{ region }}"
tags:
Name: nat-gateway-eip
register: nat_eip
- name: Create NAT Gateway
amazon.aws.ec2_vpc_nat_gateway:
subnet_id: "{{ public_subnet.subnet.id }}"
allocation_id: "{{ nat_eip.allocation_id }}"
region: "{{ region }}"
wait: yes
register: nat_gateway
- name: Create public route table
amazon.aws.ec2_vpc_route_table:
vpc_id: "{{ vpc.vpc.id }}"
region: "{{ region }}"
tags:
Name: public-rt
subnets:
- "{{ public_subnet.subnet.id }}"
routes:
- dest: 0.0.0.0/0
gateway_id: "{{ igw.gateway_id }}"
- name: Create private route table
amazon.aws.ec2_vpc_route_table:
vpc_id: "{{ vpc.vpc.id }}"
region: "{{ region }}"
tags:
Name: private-rt
subnets:
- "{{ private_subnet.subnet.id }}"
routes:
- dest: 0.0.0.0/0
gateway_id: "{{ nat_gateway.nat_gateway_id }}"
Auto Scaling and Load Balancing
---
- name: Setup Auto Scaling with Application Load Balancer
hosts: localhost
vars:
region: us-east-1
ami_id: ami-0c55b159cbfafe1f0
tasks:
- name: Create security group for ALB
amazon.aws.ec2_group:
name: alb-sg
description: Security group for ALB
vpc_id: "{{ vpc_id }}"
region: "{{ region }}"
rules:
- proto: tcp
ports: [80, 443]
cidr_ip: 0.0.0.0/0
rule_desc: Allow HTTP/HTTPS from anywhere
- name: Create security group for instances
amazon.aws.ec2_group:
name: web-sg
description: Security group for web servers
vpc_id: "{{ vpc_id }}"
region: "{{ region }}"
rules:
- proto: tcp
ports: [80]
group_name: alb-sg
rule_desc: Allow HTTP from ALB
- name: Create target group
community.aws.elb_target_group:
name: web-tg
protocol: http
port: 80
vpc_id: "{{ vpc_id }}"
health_check_path: /health
health_check_interval: 30
health_check_timeout: 5
healthy_threshold_count: 2
unhealthy_threshold_count: 3
region: "{{ region }}"
register: target_group
- name: Create Application Load Balancer
community.aws.elb_application_lb:
name: web-alb
region: "{{ region }}"
security_groups:
- "{{ alb_sg.group_id }}"
subnets:
- "{{ public_subnet_a }}"
- "{{ public_subnet_b }}"
listeners:
- Protocol: HTTP
Port: 80
DefaultActions:
- Type: forward
TargetGroupArn: "{{ target_group.target_group_arn }}"
tags:
Environment: production
- name: Create Launch Template
community.aws.ec2_launch_template:
name: web-template
image_id: "{{ ami_id }}"
instance_type: t3.micro
key_name: mykey
security_group_ids:
- "{{ web_sg.group_id }}"
user_data: "{{ lookup('file', 'userdata.sh') | b64encode }}"
tag_specifications:
- resource_type: instance
tags:
Name: web-server
Environment: production
- name: Create Auto Scaling Group
community.aws.autoscaling_group:
name: web-asg
launch_template:
launch_template_name: web-template
min_size: 2
max_size: 10
desired_capacity: 3
vpc_zone_identifier:
- "{{ private_subnet_a }}"
- "{{ private_subnet_b }}"
target_group_arns:
- "{{ target_group.target_group_arn }}"
health_check_type: ELB
health_check_period: 300
tags:
- key: Environment
value: production
propagate_at_launch: yes
- name: Create scaling policy
community.aws.autoscaling_policy:
name: scale-on-cpu
asg_name: web-asg
policy_type: TargetTrackingScaling
target_tracking_config:
predefined_metric_specification:
predefined_metric_type: ASGAverageCPUUtilization
target_value: 70.0
AWS Lambda Functions
---
- name: Deploy Lambda function
hosts: localhost
tasks:
- name: Create IAM role for Lambda
community.aws.iam_role:
name: lambda-execution-role
assume_role_policy_document: |
{
"Version": "2012-10-17",
"Statement": [{
"Effect": "Allow",
"Principal": {"Service": "lambda.amazonaws.com"},
"Action": "sts:AssumeRole"
}]
}
managed_policy:
- arn:aws:iam::aws:policy/service-role/AWSLambdaBasicExecutionRole
- arn:aws:iam::aws:policy/AmazonS3ReadOnlyAccess
- name: Package Lambda function
archive:
path: ./lambda_function/
dest: /tmp/lambda_function.zip
format: zip
- name: Deploy Lambda function
community.aws.lambda:
name: data-processor
runtime: python3.9
role: "arn:aws:iam::{{ account_id }}:role/lambda-execution-role"
handler: lambda_function.lambda_handler
zip_file: /tmp/lambda_function.zip
timeout: 60
memory_size: 256
environment_variables:
ENVIRONMENT: production
S3_BUCKET: "{{ data_bucket }}"
tags:
Application: DataProcessing
Environment: production
- name: Create CloudWatch Events rule
community.aws.cloudwatchevent_rule:
name: daily-data-processing
schedule_expression: "cron(0 2 * * ? *)" # Daily at 2 AM UTC
description: Trigger data processing Lambda daily
targets:
- id: "1"
arn: "{{ lambda_arn }}"
CloudFormation Integration
---
- name: Deploy infrastructure with CloudFormation
hosts: localhost
tasks:
- name: Deploy CloudFormation stack
amazon.aws.cloudformation:
stack_name: app-infrastructure
region: us-east-1
state: present
template: files/infrastructure.yaml
template_parameters:
EnvironmentName: production
VpcCIDR: 10.0.0.0/16
InstanceType: t3.micro
tags:
Environment: production
ManagedBy: Ansible
- name: Get stack outputs
amazon.aws.cloudformation_info:
stack_name: app-infrastructure
region: us-east-1
register: stack_info
- name: Display stack outputs
debug:
msg: "VPC ID: {{ stack_info.cloudformation[stack_name].stack_outputs.VpcId }}"
Advanced Azure Automation
Azure ARM Template Deployment
---
- name: Deploy Azure infrastructure with ARM templates
hosts: localhost
tasks:
- name: Create resource group
azure.azcollection.azure_rm_resourcegroup:
name: production-rg
location: eastus
tags:
Environment: production
ManagedBy: Ansible
- name: Deploy ARM template
azure.azcollection.azure_rm_deployment:
resource_group: production-rg
name: infrastructure-deployment
template_link: 'https://raw.githubusercontent.com/Azure/azure-quickstart-templates/master/quickstarts/microsoft.web/web-app-sql-database/azuredeploy.json'
parameters:
skuName:
value: F1
skuCapacity:
value: 1
sqlAdministratorLogin:
value: sqladmin
sqlAdministratorLoginPassword:
value: "{{ vault_sql_password }}"
register: arm_output
- name: Get deployment outputs
debug:
msg: "Web App URL: {{ arm_output.deployment.outputs.webAppUrl.value }}"
Azure Kubernetes Service (AKS)
---
- name: Create and configure AKS cluster
hosts: localhost
vars:
resource_group: aks-rg
cluster_name: production-aks
location: eastus
tasks:
- name: Create resource group
azure.azcollection.azure_rm_resourcegroup:
name: "{{ resource_group }}"
location: "{{ location }}"
- name: Create AKS cluster
azure.azcollection.azure_rm_aks:
name: "{{ cluster_name }}"
resource_group: "{{ resource_group }}"
location: "{{ location }}"
dns_prefix: "{{ cluster_name }}"
kubernetes_version: "1.27.3"
agent_pool_profiles:
- name: default
count: 3
vm_size: Standard_DS2_v2
mode: System
enable_auto_scaling: yes
min_count: 2
max_count: 5
service_principal:
client_id: "{{ azure_client_id }}"
client_secret: "{{ azure_client_secret }}"
network_profile:
network_plugin: azure
service_cidr: 10.0.0.0/16
dns_service_ip: 10.0.0.10
addon:
monitoring:
enabled: yes
http_application_routing:
enabled: yes
tags:
Environment: production
- name: Get AKS credentials
azure.azcollection.azure_rm_aks_info:
name: "{{ cluster_name }}"
resource_group: "{{ resource_group }}"
register: aks_info
- name: Save kubeconfig
copy:
content: "{{ aks_info.aks[0].kube_config }}"
dest: ~/.kube/config-aks
mode: '0600'
Azure App Service
---
- name: Deploy web application to Azure App Service
hosts: localhost
vars:
resource_group: webapp-rg
app_service_plan: production-plan
webapp_name: myapp-prod-web
tasks:
- name: Create App Service Plan
azure.azcollection.azure_rm_appserviceplan:
resource_group: "{{ resource_group }}"
name: "{{ app_service_plan }}"
location: eastus
sku:
name: P1v2
tier: PremiumV2
size: P1v2
family: Pv2
capacity: 2
is_linux: yes
- name: Create Web App
azure.azcollection.azure_rm_webapp:
resource_group: "{{ resource_group }}"
name: "{{ webapp_name }}"
plan:
resource_group: "{{ resource_group }}"
name: "{{ app_service_plan }}"
frameworks:
- name: "python"
version: "3.9"
app_settings:
ENVIRONMENT: production
DATABASE_URL: "{{ vault_database_url }}"
site_config:
always_on: yes
http20_enabled: yes
min_tls_version: "1.2"
- name: Configure deployment slot
azure.azcollection.azure_rm_webappslot:
resource_group: "{{ resource_group }}"
webapp_name: "{{ webapp_name }}"
name: staging
configuration_source: "{{ webapp_name }}"
app_settings:
ENVIRONMENT: staging
Advanced GCP Automation
Google Kubernetes Engine (GKE)
---
- name: Create GKE cluster
hosts: localhost
vars:
project: my-project-id
cluster_name: production-gke
zone: us-central1-a
tasks:
- name: Create GKE cluster
google.cloud.gcp_container_cluster:
name: "{{ cluster_name }}"
initial_node_count: 3
node_config:
machine_type: n1-standard-2
disk_size_gb: 100
oauth_scopes:
- https://www.googleapis.com/auth/devstorage.read_only
- https://www.googleapis.com/auth/logging.write
- https://www.googleapis.com/auth/monitoring
- https://www.googleapis.com/auth/servicecontrol
- https://www.googleapis.com/auth/service.management.readonly
- https://www.googleapis.com/auth/trace.append
preemptible: no
master_auth:
client_certificate_config:
issue_client_certificate: no
addons_config:
http_load_balancing:
disabled: no
horizontal_pod_autoscaling:
disabled: no
network_policy_config:
disabled: no
network_policy:
enabled: yes
ip_allocation_policy:
use_ip_aliases: yes
cluster_ipv4_cidr_block: 10.0.0.0/14
services_ipv4_cidr_block: 10.4.0.0/19
location: "{{ zone }}"
project: "{{ project }}"
auth_kind: serviceaccount
service_account_file: "{{ gcp_cred_file }}"
state: present
- name: Create node pool
google.cloud.gcp_container_node_pool:
name: worker-pool
initial_node_count: 2
cluster: "{{ cluster_name }}"
config:
machine_type: n1-standard-4
disk_size_gb: 100
preemptible: yes
autoscaling:
enabled: yes
min_node_count: 2
max_node_count: 10
management:
auto_repair: yes
auto_upgrade: yes
location: "{{ zone }}"
project: "{{ project }}"
auth_kind: serviceaccount
service_account_file: "{{ gcp_cred_file }}"
state: present
Cloud SQL and Cloud Storage
---
- name: Setup GCP database and storage
hosts: localhost
tasks:
- name: Create Cloud SQL instance
google.cloud.gcp_sql_instance:
name: production-db
database_version: POSTGRES_14
region: us-central1
settings:
tier: db-n1-standard-2
ip_configuration:
ipv4_enabled: yes
authorized_networks:
- value: 0.0.0.0/0
name: allow-all
backup_configuration:
enabled: yes
start_time: "03:00"
database_flags:
- name: max_connections
value: "100"
project: "{{ project }}"
auth_kind: serviceaccount
service_account_file: "{{ gcp_cred_file }}"
state: present
- name: Create database
google.cloud.gcp_sql_database:
name: appdb
instance: production-db
project: "{{ project }}"
auth_kind: serviceaccount
service_account_file: "{{ gcp_cred_file }}"
state: present
- name: Create Cloud Storage bucket
google.cloud.gcp_storage_bucket:
name: "{{ project }}-backups"
location: US
storage_class: STANDARD
versioning:
enabled: yes
lifecycle:
rule:
- action:
type: Delete
condition:
age: 90
project: "{{ project }}"
auth_kind: serviceaccount
service_account_file: "{{ gcp_cred_file }}"
state: present
Multi-Cloud and Hybrid Strategies
Multi-Cloud Disaster Recovery
---
- name: Multi-cloud disaster recovery setup
hosts: localhost
vars:
primary_cloud: aws
dr_cloud: azure
tasks:
# Deploy to primary cloud (AWS)
- name: Deploy to AWS (primary)
include_role:
name: aws_infrastructure
vars:
deployment_type: primary
region: us-east-1
# Setup replication to DR site (Azure)
- name: Deploy to Azure (DR)
include_role:
name: azure_infrastructure
vars:
deployment_type: dr
location: westus2
# Configure cross-cloud replication
- name: Setup data replication
block:
- name: Configure S3 to Azure Blob replication
include_tasks: setup_replication.yml
vars:
source_cloud: aws
dest_cloud: azure
- name: Setup database replication
include_tasks: setup_db_replication.yml
vars:
primary_db: "{{ aws_rds_endpoint }}"
dr_db: "{{ azure_sql_endpoint }}"
# Configure DNS failover
- name: Setup Route53 health checks and failover
amazon.aws.route53_health_check:
state: present
fqdn: "{{ primary_endpoint }}"
type: HTTPS
port: 443
resource_path: /health
request_interval: 30
failure_threshold: 3
- name: Configure DNS failover to Azure
amazon.aws.route53:
state: present
zone: example.com
record: app.example.com
type: A
ttl: 60
value: "{{ azure_public_ip }}"
failover: SECONDARY
health_check: "{{ health_check_id }}"
Hybrid Cloud with VPN Connection
---
- name: Setup hybrid cloud VPN
hosts: localhost
vars:
on_prem_gateway: 203.0.113.1
on_prem_cidr: 192.168.0.0/16
tasks:
# AWS Site-to-Site VPN
- name: Create Customer Gateway (on-prem)
amazon.aws.ec2_customer_gateway:
ip_address: "{{ on_prem_gateway }}"
bgp_asn: 65000
region: us-east-1
tags:
Name: on-prem-gateway
register: cgw
- name: Create Virtual Private Gateway
amazon.aws.ec2_vpc_vgw:
vpc_id: "{{ vpc_id }}"
region: us-east-1
tags:
Name: aws-vpn-gateway
register: vgw
- name: Create VPN Connection
amazon.aws.ec2_vpc_vpn:
customer_gateway_id: "{{ cgw.gateway.customer_gateway.customer_gateway_id }}"
vpn_gateway_id: "{{ vgw.vgw.vpn_gateway_id }}"
type: ipsec.1
static_routes:
- "{{ on_prem_cidr }}"
region: us-east-1
register: vpn
- name: Add route to on-prem network
amazon.aws.ec2_vpc_route_table:
vpc_id: "{{ vpc_id }}"
region: us-east-1
tags:
Name: private-to-onprem
routes:
- dest: "{{ on_prem_cidr }}"
gateway_id: "{{ vgw.vgw.vpn_gateway_id }}"
Cloud Security and Compliance
AWS Security Baseline
---
- name: Implement AWS security best practices
hosts: localhost
tasks:
# Enable CloudTrail
- name: Enable CloudTrail logging
community.aws.cloudtrail:
state: present
name: organization-trail
s3_bucket_name: cloudtrail-logs-{{ account_id }}
include_global_events: yes
is_multi_region_trail: yes
enable_log_file_validation: yes
tags:
Security: enabled
# Enable GuardDuty
- name: Enable GuardDuty
community.aws.guardduty_detector:
state: present
enable: yes
finding_publishing_frequency: FIFTEEN_MINUTES
# Configure Security Hub
- name: Enable Security Hub
community.aws.securityhub_hub:
state: present
enable_default_standards: yes
# S3 bucket encryption
- name: Enforce S3 bucket encryption
amazon.aws.s3_bucket:
name: "{{ item }}"
encryption: AES256
public_access:
block_public_acls: yes
block_public_policy: yes
ignore_public_acls: yes
restrict_public_buckets: yes
loop: "{{ s3_buckets }}"
# IAM password policy
- name: Set IAM password policy
community.aws.iam_password_policy:
min_pw_length: 14
require_symbols: yes
require_numbers: yes
require_uppercase: yes
require_lowercase: yes
allow_pw_change: yes
pw_max_age: 90
pw_reuse_prevention: 24
pw_expire: yes
Cost Optimization
AWS Cost Management
---
- name: Implement cost optimization strategies
hosts: localhost
tasks:
# Identify unattached EBS volumes
- name: Find unattached EBS volumes
amazon.aws.ec2_vol_info:
region: "{{ aws_region }}"
filters:
status: available
register: unattached_volumes
- name: Create snapshot before deletion
amazon.aws.ec2_snapshot:
volume_id: "{{ item.id }}"
description: "Snapshot before deletion - {{ ansible_date_time.date }}"
loop: "{{ unattached_volumes.volumes }}"
when: unattached_volumes.volumes | length > 0
- name: Delete unattached volumes
amazon.aws.ec2_vol:
id: "{{ item.id }}"
state: absent
loop: "{{ unattached_volumes.volumes }}"
# Identify unused Elastic IPs
- name: Find unassociated Elastic IPs
amazon.aws.ec2_eip_info:
region: "{{ aws_region }}"
register: eips
- name: Release unassociated EIPs
amazon.aws.ec2_eip:
region: "{{ aws_region }}"
public_ip: "{{ item.public_ip }}"
state: absent
loop: "{{ eips.addresses }}"
when: item.instance_id is not defined
# Right-size instances based on CloudWatch metrics
- name: Get instance CPU utilization
community.aws.cloudwatch_metric_alarm:
state: present
name: "low-cpu-{{ item }}"
metric: CPUUtilization
namespace: AWS/EC2
statistic: Average
comparison: LessThanThreshold
threshold: 10.0
period: 3600
evaluation_periods: 24
dimensions:
InstanceId: "{{ item }}"
loop: "{{ instance_ids }}"
Resource Tagging Strategy
---
- name: Implement comprehensive tagging strategy
hosts: localhost
vars:
required_tags:
Environment: "{{ environment }}"
Project: "{{ project_name }}"
Owner: "{{ team_email }}"
CostCenter: "{{ cost_center }}"
ManagedBy: Ansible
CreatedDate: "{{ ansible_date_time.date }}"
tasks:
- name: Tag all EC2 instances
amazon.aws.ec2_tag:
region: "{{ aws_region }}"
resource: "{{ item }}"
tags: "{{ required_tags }}"
loop: "{{ ec2_instance_ids }}"
- name: Tag all RDS instances
community.aws.rds_instance:
id: "{{ item }}"
tags: "{{ required_tags }}"
apply_immediately: yes
loop: "{{ rds_instance_ids }}"
- name: Create cost allocation report
community.aws.aws_cur:
report_name: monthly-cost-report
s3_bucket: cost-reports-{{ account_id }}
s3_prefix: reports/
time_unit: MONTHLY
format: textORcsv
compression: GZIP
additional_schema_elements:
- RESOURCES
additional_artifacts:
- ATHENA
refresh_closed_reports: yes
report_versioning: OVERWRITE_REPORT
Terraform Integration with Ansible
Using Ansible with Terraform
---
- name: Provision infrastructure with Terraform, configure with Ansible
hosts: localhost
tasks:
- name: Run Terraform to provision infrastructure
community.general.terraform:
project_path: ./terraform/
state: present
force_init: yes
variables:
environment: production
instance_count: 3
register: terraform_output
- name: Extract instance IPs from Terraform output
set_fact:
instance_ips: "{{ terraform_output.outputs.instance_ips.value }}"
- name: Wait for instances to be ready
wait_for:
host: "{{ item }}"
port: 22
timeout: 300
loop: "{{ instance_ips }}"
- name: Add instances to inventory
add_host:
name: "{{ item }}"
groups: terraform_instances
ansible_user: ubuntu
loop: "{{ instance_ips }}"
- name: Configure instances with Ansible
hosts: terraform_instances
become: yes
roles:
- common
- webserver
- monitoring
Cloud-Native Application Patterns
Serverless Deployment Pattern
---
- name: Deploy serverless application
hosts: localhost
vars:
app_name: serverless-api
stage: production
tasks:
# API Gateway
- name: Create API Gateway
community.aws.api_gateway:
name: "{{ app_name }}-api"
description: Serverless API
endpoint_configuration:
types:
- REGIONAL
register: api_gateway
# Lambda functions for different endpoints
- name: Deploy Lambda functions
community.aws.lambda:
name: "{{ app_name }}-{{ item.name }}"
runtime: python3.9
role: "{{ lambda_execution_role_arn }}"
handler: "handlers.{{ item.handler }}"
zip_file: /tmp/lambda_package.zip
environment_variables:
STAGE: "{{ stage }}"
TABLE_NAME: "{{ dynamodb_table }}"
loop:
- { name: 'get-items', handler: 'get_items' }
- { name: 'create-item', handler: 'create_item' }
- { name: 'update-item', handler: 'update_item' }
- { name: 'delete-item', handler: 'delete_item' }
register: lambda_functions
# DynamoDB table
- name: Create DynamoDB table
community.aws.dynamodb_table:
name: "{{ app_name }}-data"
hash_key_name: id
hash_key_type: STRING
read_capacity: 5
write_capacity: 5
indexes:
- name: timestamp-index
type: global_secondary_index
hash_key_name: timestamp
hash_key_type: NUMBER
tags:
Application: "{{ app_name }}"
# API Gateway integrations
- name: Configure API Gateway routes
community.aws.api_gateway_resource:
rest_api_id: "{{ api_gateway.rest_api_id }}"
parent_id: "{{ api_gateway.root_resource_id }}"
path_part: items
register: items_resource
- name: Deploy API Gateway stage
community.aws.api_gateway_deployment:
rest_api_id: "{{ api_gateway.rest_api_id }}"
stage_name: "{{ stage }}"
register: api_deployment
- name: Output API endpoint
debug:
msg: "API Endpoint: https://{{ api_gateway.rest_api_id }}.execute-api.{{ aws_region }}.amazonaws.com/{{ stage }}"
Monitoring and Observability
CloudWatch Monitoring Setup
---
- name: Setup comprehensive CloudWatch monitoring
hosts: localhost
tasks:
- name: Create CloudWatch Log Group
community.aws.cloudwatch_log_group:
log_group_name: "/aws/application/{{ app_name }}"
retention_in_days: 30
tags:
Application: "{{ app_name }}"
- name: Create CloudWatch Dashboard
community.aws.cloudwatch_dashboard:
dashboard_name: "{{ app_name }}-dashboard"
dashboard_body: |
{
"widgets": [
{
"type": "metric",
"properties": {
"metrics": [
["AWS/EC2", "CPUUtilization", {"stat": "Average"}],
["AWS/ApplicationELB", "TargetResponseTime", {"stat": "Average"}]
],
"period": 300,
"stat": "Average",
"region": "{{ aws_region }}",
"title": "Application Performance"
}
}
]
}
- name: Create CloudWatch Alarms
community.aws.cloudwatch_metric_alarm:
state: present
name: "{{ item.name }}"
metric: "{{ item.metric }}"
namespace: "{{ item.namespace }}"
statistic: Average
comparison: "{{ item.comparison }}"
threshold: "{{ item.threshold }}"
period: 300
evaluation_periods: 2
alarm_actions:
- "{{ sns_topic_arn }}"
loop:
- name: high-cpu-alarm
metric: CPUUtilization
namespace: AWS/EC2
comparison: GreaterThanThreshold
threshold: 80
- name: high-memory-alarm
metric: MemoryUtilization
namespace: CWAgent
comparison: GreaterThanThreshold
threshold: 85
Best Practices Summary
Cloud Automation Best Practices
- Security: Always use Ansible Vault for cloud credentials and secrets
- Inventory: Use dynamic inventory plugins for cloud resources
- Tagging: Implement comprehensive tagging strategy for cost allocation and management
- Idempotency: Ensure playbooks are idempotent for safe re-execution
- Error Handling: Implement proper retry logic and error handling
- Modules: Use cloud-specific modules instead of CLI commands
- Cost Optimization: Regularly audit unused resources and right-size instances
- High Availability: Deploy across multiple availability zones/regions
- Monitoring: Implement comprehensive monitoring and alerting
- Compliance: Enable security services (CloudTrail, GuardDuty, Security Hub)
- Automation: Integrate with CI/CD pipelines for infrastructure as code
- Disaster Recovery: Implement multi-region or multi-cloud DR strategies