logo
TerraformSample config AWS
Terraform

Sample Config on AWS

Before I generate, here's what I'll build — all 100% always-free, wired together as a real event-driven pipeline:

SNS Topic → SQS Queue → Lambda Function → DynamoDB Table
                                        → CloudWatch Logs
                    S3 Bucket (storage)
              SSM Parameter Store (config)
         VPC + Subnet + IGW + Route Table + Security Group

ComponentAWS Service / TerminologyDescription
SNS TopicAmazon Simple Notification Service (SNS) TopicPub/Sub messaging service to send notifications
SQS QueueAmazon Simple Queue Service (SQS) QueueMessage queue service to decouple and buffer messages
Lambda FunctionAWS Lambda FunctionServerless compute service to run code in response to events
DynamoDB TableAmazon DynamoDB TableNoSQL managed database service
CloudWatch LogsAmazon CloudWatch LogsLog storage and monitoring service
S3 BucketAmazon Simple Storage Service (S3) BucketObject storage for files and backups
SSM Parameter StoreAWS Systems Manager (SSM) Parameter StoreSecure storage for configuration and secrets
VPCVirtual Private Cloud (VPC)Virtual network for AWS resources
SubnetSubnet within a VPCSubdivision of a VPC’s IP address range
IGWInternet GatewayEnables internet access for resources in a VPC
Route TableRoute TableNetwork routing rules for subnets
Security GroupSecurity GroupVirtual firewall controlling inbound/outbound traffic

provider.tf

terraform {
  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "~> 5.0"
    }
    archive = {
      source  = "hashicorp/archive"
      version = "~> 2.4"
    }
  }
}

provider "aws" {
  region     = var.aws_region
  access_key = var.aws_access_key
  secret_key = var.aws_secret_key
}

variables.tf

variable "aws_region" {
  description = "AWS region to deploy resources."
  default     = "us-east-1"
}

variable "aws_access_key" {
  description = "AWS access key ID."
  sensitive   = true
}

variable "aws_secret_key" {
  description = "AWS secret access key."
  sensitive   = true
}

variable "project_name" {
  description = "Project name prefix for all resources."
  default     = "tf-free"
}

data.tf

# Used to get account ID for globally unique S3 bucket name
data "aws_caller_identity" "current" {}

# Used to get available AZs dynamically (mirrors OCI ads data source)
data "aws_availability_zones" "available" {
  state = "available"
}

network.tf

resource "aws_vpc" "main" {
  cidr_block           = "10.0.0.0/16"
  enable_dns_hostnames = true
  enable_dns_support   = true

  tags = { Name = "${var.project_name}-vpc" }
}

resource "aws_subnet" "public" {
  vpc_id                  = aws_vpc.main.id
  cidr_block              = "10.0.1.0/24"
  availability_zone       = data.aws_availability_zones.available.names[0]
  map_public_ip_on_launch = true

  tags = { Name = "${var.project_name}-public-subnet" }
}

resource "aws_internet_gateway" "igw" {
  vpc_id = aws_vpc.main.id

  tags = { Name = "${var.project_name}-igw" }
}

resource "aws_route_table" "public" {
  vpc_id = aws_vpc.main.id

  route {
    cidr_block = "0.0.0.0/0"
    gateway_id = aws_internet_gateway.igw.id
  }

  tags = { Name = "${var.project_name}-public-rt" }
}

resource "aws_route_table_association" "public" {
  subnet_id      = aws_subnet.public.id
  route_table_id = aws_route_table.public.id
}

resource "aws_security_group" "main" {
  name        = "${var.project_name}-sg"
  description = "Main security group for ${var.project_name}"
  vpc_id      = aws_vpc.main.id

  egress {
    description = "Allow all outbound"
    from_port   = 0
    to_port     = 0
    protocol    = "-1"
    cidr_blocks = ["0.0.0.0/0"]
  }

  tags = { Name = "${var.project_name}-sg" }
}

iam.tf

# IAM Role for Lambda execution (mirrors OCI iam_policy.tf)
resource "aws_iam_role" "lambda_role" {
  name = "${var.project_name}-lambda-role"

  assume_role_policy = jsonencode({
    Version = "2012-10-17"
    Statement = [{
      Action    = "sts:AssumeRole"
      Effect    = "Allow"
      Principal = { Service = "lambda.amazonaws.com" }
    }]
  })

  tags = { Name = "${var.project_name}-lambda-role" }
}

# Inline policy granting Lambda access to all project resources
resource "aws_iam_role_policy" "lambda_policy" {
  name = "${var.project_name}-lambda-policy"
  role = aws_iam_role.lambda_role.id

  policy = jsonencode({
    Version = "2012-10-17"
    Statement = [
      {
        Sid    = "CloudWatchLogs"
        Effect = "Allow"
        Action = [
          "logs:CreateLogGroup",
          "logs:CreateLogStream",
          "logs:PutLogEvents"
        ]
        Resource = "arn:aws:logs:*:*:*"
      },
      {
        Sid    = "DynamoDBAccess"
        Effect = "Allow"
        Action = [
          "dynamodb:PutItem",
          "dynamodb:GetItem",
          "dynamodb:UpdateItem",
          "dynamodb:DeleteItem",
          "dynamodb:Query",
          "dynamodb:Scan"
        ]
        Resource = aws_dynamodb_table.main.arn
      },
      {
        Sid    = "SQSAccess"
        Effect = "Allow"
        Action = [
          "sqs:ReceiveMessage",
          "sqs:DeleteMessage",
          "sqs:GetQueueAttributes"
        ]
        Resource = aws_sqs_queue.main.arn
      },
      {
        Sid    = "S3Access"
        Effect = "Allow"
        Action = [
          "s3:PutObject",
          "s3:GetObject",
          "s3:ListBucket"
        ]
        Resource = [
          aws_s3_bucket.main.arn,
          "${aws_s3_bucket.main.arn}/*"
        ]
      },
      {
        Sid    = "SSMAccess"
        Effect = "Allow"
        Action = [
          "ssm:GetParameter",
          "ssm:GetParameters"
        ]
        Resource = "arn:aws:ssm:${var.aws_region}:${data.aws_caller_identity.current.account_id}:parameter/${var.project_name}/*"
      }
    ]
  })
}

storage.tf

# ── S3 Bucket ─────────────────────────────────────────────
# Always free: 5 GB storage, 20K GET, 2K PUT requests/month
# Account ID suffix guarantees a globally unique bucket name
resource "aws_s3_bucket" "main" {
  bucket = "${var.project_name}-${data.aws_caller_identity.current.account_id}"

  tags = { Name = "${var.project_name}-bucket" }
}

resource "aws_s3_bucket_versioning" "main" {
  bucket = aws_s3_bucket.main.id
  versioning_configuration {
    status = "Enabled"
  }
}

resource "aws_s3_bucket_server_side_encryption_configuration" "main" {
  bucket = aws_s3_bucket.main.id
  rule {
    apply_server_side_encryption_by_default {
      sse_algorithm = "AES256"
    }
  }
}

resource "aws_s3_bucket_public_access_block" "main" {
  bucket                  = aws_s3_bucket.main.id
  block_public_acls       = true
  block_public_policy     = true
  ignore_public_acls      = true
  restrict_public_buckets = true
}

# ── DynamoDB Table ────────────────────────────────────────
# Always free: 25 GB storage, 25 RCU + 25 WCU/month
# PROVISIONED mode at 1/1 keeps usage well within free tier
resource "aws_dynamodb_table" "main" {
  name           = "${var.project_name}-table"
  billing_mode   = "PROVISIONED"
  read_capacity  = 1
  write_capacity = 1
  hash_key       = "id"

  attribute {
    name = "id"
    type = "S"
  }

  tags = { Name = "${var.project_name}-table" }
}

messaging.tf

# ── SNS Topic ─────────────────────────────────────────────
# Always free: 1M publish requests/month
resource "aws_sns_topic" "main" {
  name = "${var.project_name}-topic"

  tags = { Name = "${var.project_name}-topic" }
}

# ── SQS Queue ─────────────────────────────────────────────
# Always free: 1M requests/month
resource "aws_sqs_queue" "main" {
  name                       = "${var.project_name}-queue"
  delay_seconds              = 0
  max_message_size           = 262144 # 256 KB max
  message_retention_seconds  = 86400  # 1 day
  visibility_timeout_seconds = 30

  tags = { Name = "${var.project_name}-queue" }
}

# Allow SNS to send messages to SQS
resource "aws_sqs_queue_policy" "main" {
  queue_url = aws_sqs_queue.main.id

  policy = jsonencode({
    Version = "2012-10-17"
    Statement = [{
      Effect    = "Allow"
      Principal = { Service = "sns.amazonaws.com" }
      Action    = "sqs:SendMessage"
      Resource  = aws_sqs_queue.main.arn
      Condition = {
        ArnEquals = { "aws:SourceArn" = aws_sns_topic.main.arn }
      }
    }]
  })
}

# Wire SNS → SQS
resource "aws_sns_topic_subscription" "sqs" {
  topic_arn = aws_sns_topic.main.arn
  protocol  = "sqs"
  endpoint  = aws_sqs_queue.main.arn
}

lambda.tf

# Inline Python code — packaged into a zip at plan time
data "archive_file" "lambda_zip" {
  type        = "zip"
  output_path = "${path.module}/lambda_function.zip"

  source {
    filename = "lambda_function.py"
    content  = <<-EOF
      import json
      import boto3
      import os
      import logging

      logger = logging.getLogger()
      logger.setLevel(logging.INFO)

      dynamodb = boto3.resource('dynamodb')
      table    = dynamodb.Table(os.environ['DYNAMODB_TABLE'])

      def handler(event, context):
          for record in event.get('Records', []):
              body = json.loads(record['body'])
              logger.info(f"Processing: {body}")

              table.put_item(Item={
                  'id':     record['messageId'],
                  'body':   json.dumps(body),
                  'source': 'sqs'
              })

          return {'statusCode': 200, 'body': 'OK'}
    EOF
  }
}

# Always free: 1M requests/month + 400K GB-seconds compute/month
resource "aws_lambda_function" "main" {
  function_name    = "${var.project_name}-function"
  role             = aws_iam_role.lambda_role.arn
  handler          = "lambda_function.handler"
  runtime          = "python3.12"
  filename         = data.archive_file.lambda_zip.output_path
  source_code_hash = data.archive_file.lambda_zip.output_base64sha256
  timeout          = 30
  memory_size      = 128 # minimum memory, within free tier

  environment {
    variables = {
      DYNAMODB_TABLE = aws_dynamodb_table.main.name
      S3_BUCKET      = aws_s3_bucket.main.bucket
      PROJECT_NAME   = var.project_name
    }
  }

  tags = { Name = "${var.project_name}-function" }
}

# Wire SQS → Lambda
resource "aws_lambda_event_source_mapping" "sqs_trigger" {
  event_source_arn = aws_sqs_queue.main.arn
  function_name    = aws_lambda_function.main.arn
  batch_size       = 10
  enabled          = true
}

monitoring.tf

# CloudWatch Log Group for Lambda
# Always free: 5 GB log ingestion/month, 10 alarms
resource "aws_cloudwatch_log_group" "lambda" {
  name              = "/aws/lambda/${aws_lambda_function.main.function_name}"
  retention_in_days = 7 # auto-expire logs to stay within free tier

  tags = { Name = "${var.project_name}-lambda-logs" }
}

# Alarm fires when Lambda errors > 0
resource "aws_cloudwatch_metric_alarm" "lambda_errors" {
  alarm_name          = "${var.project_name}-lambda-errors"
  comparison_operator = "GreaterThanThreshold"
  evaluation_periods  = 1
  metric_name         = "Errors"
  namespace           = "AWS/Lambda"
  period              = 60
  statistic           = "Sum"
  threshold           = 0
  alarm_description   = "Fires when Lambda function errors are detected"
  treat_missing_data  = "notBreaching"

  dimensions = {
    FunctionName = aws_lambda_function.main.function_name
  }

  tags = { Name = "${var.project_name}-lambda-error-alarm" }
}

ssm.tf

# SSM Parameter Store — Standard tier is always free
# Stores runtime config so Lambda (or anything else) can look up
# resource names/URLs without hardcoding them

resource "aws_ssm_parameter" "dynamodb_table" {
  name  = "/${var.project_name}/dynamodb_table"
  type  = "String"
  value = aws_dynamodb_table.main.name
  tags  = { Name = "${var.project_name}-param-dynamodb" }
}

resource "aws_ssm_parameter" "s3_bucket" {
  name  = "/${var.project_name}/s3_bucket"
  type  = "String"
  value = aws_s3_bucket.main.bucket
  tags  = { Name = "${var.project_name}-param-s3" }
}

resource "aws_ssm_parameter" "sqs_queue_url" {
  name  = "/${var.project_name}/sqs_queue_url"
  type  = "String"
  value = aws_sqs_queue.main.url
  tags  = { Name = "${var.project_name}-param-sqs" }
}

resource "aws_ssm_parameter" "sns_topic_arn" {
  name  = "/${var.project_name}/sns_topic_arn"
  type  = "String"
  value = aws_sns_topic.main.arn
  tags  = { Name = "${var.project_name}-param-sns" }
}

outputs.tf

output "account_id" {
  description = "AWS Account ID"
  value       = data.aws_caller_identity.current.account_id
}

output "vpc_id" {
  description = "ID of the VPC"
  value       = aws_vpc.main.id
}

output "s3_bucket_name" {
  description = "Name of the S3 bucket"
  value       = aws_s3_bucket.main.bucket
}

output "dynamodb_table_name" {
  description = "Name of the DynamoDB table"
  value       = aws_dynamodb_table.main.name
}

output "lambda_function_name" {
  description = "Name of the Lambda function"
  value       = aws_lambda_function.main.function_name
}

output "sqs_queue_url" {
  description = "URL of the SQS queue"
  value       = aws_sqs_queue.main.url
}

output "sns_topic_arn" {
  description = "ARN of the SNS topic"
  value       = aws_sns_topic.main.arn
}

output "cloudwatch_log_group" {
  description = "CloudWatch log group for Lambda"
  value       = aws_cloudwatch_log_group.lambda.name
}

Setup & Deploy

1. Create your credentials file:

mkdir ~/terraform_aws && cd ~/terraform_aws
cat > terraform.tfvars <<EOF
aws_access_key = "<YOUR_ACCESS_KEY_ID>"
aws_secret_key = "<YOUR_SECRET_ACCESS_KEY>"
project_name   = "tf-free"
EOF

2. Init, plan, apply:

terraform init
terraform plan -out=tfplan.out
terraform apply tfplan.out

3. Test the pipeline end-to-end:

# Publish a message to SNS → flows through to SQS → triggers Lambda → writes to DynamoDB
aws sns publish \
  --region us-east-1 \
  --topic-arn $(terraform output -raw sns_topic_arn) \
  --message '{"hello": "from terraform"}' \
  --profile <YOUR_AWS_CLI_PROFILE>

Zero cost. Every resource in this setup falls under AWS's always-free tier. No VMs, no charges.