Building a Poor Man’s Vercel: Deploy Any Framework to AWS in Under $5/month

Last week, Vercel quoted me $20/month for a simple portfolio site that gets 1000 visits per day. GitHub Pages couldn’t handle my Next.js API routes. Netlify’s build minutes vanished in 3 days.

So I built my own Vercel clone on AWS. It handles Next.js (with ISR), Nuxt, SvelteKit, and even Remix. Total cost? $3.50/month.

Here’s exactly how to build a production-ready deployment platform that rivals Vercel’s features without the price tag.

What We’re Building

A complete deployment platform with:

  • Git push deployments (like Vercel)
  • Automatic HTTPS with custom domains
  • Server-side rendering support
  • API routes and serverless functions
  • Static asset optimization
  • Preview deployments for PRs
  • Rollbacks and versioning
  • Global CDN distribution

Tech stack:

  • S3 for static hosting
  • CloudFront for CDN
  • Lambda@Edge for SSR
  • API Gateway for serverless functions
  • CodeBuild for CI/CD
  • Route53 for DNS

The Architecture

1
2
3
4
5
6
7
GitHub Push → CodeBuild → S3 + Lambda → CloudFront → Users
            Build & Deploy
        - Static assets → S3
        - API routes → Lambda
        - SSR pages → Lambda@Edge

Step 1: Setting Up the Infrastructure

Let’s start with Terraform to make this reproducible:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
# versions.tf
terraform {
  required_providers {
    aws = {
      source  = "hashicorp/aws"
      version = "~> 5.0"
    }
  }
}

provider "aws" {
  region = "us-east-1" # CloudFront requires us-east-1
}

# S3 bucket for static assets
resource "aws_s3_bucket" "static_assets" {
  bucket = "${var.project_name}-static-assets"
}

resource "aws_s3_bucket_public_access_block" "static_assets" {
  bucket = aws_s3_bucket.static_assets.id

  block_public_acls       = true
  block_public_policy     = true
  ignore_public_acls      = true
  restrict_public_buckets = true
}

# S3 bucket for Lambda deployment packages
resource "aws_s3_bucket" "lambda_deployments" {
  bucket = "${var.project_name}-lambda-deployments"
}

# CloudFront Origin Access Control (OAC)
resource "aws_cloudfront_origin_access_control" "main" {
  name                              = "${var.project_name}-oac"
  description                       = "OAC for ${var.project_name}"
  origin_access_control_origin_type = "s3"
  signing_behavior                  = "always"
  signing_protocol                  = "sigv4"
}

# S3 bucket policy for CloudFront OAC
data "aws_iam_policy_document" "s3_policy" {
  statement {
    actions   = ["s3:GetObject"]
    resources = ["${aws_s3_bucket.static_assets.arn}/*"]

    principals {
      type        = "Service"
      identifiers = ["cloudfront.amazonaws.com"]
    }

    condition {
      test     = "StringEquals"
      variable = "AWS:SourceArn"
      values   = [aws_cloudfront_distribution.main.arn]
    }
  }
}

resource "aws_s3_bucket_policy" "static_assets" {
  bucket = aws_s3_bucket.static_assets.id
  policy = data.aws_iam_policy_document.s3_policy.json
}

Step 2: Lambda@Edge for Server-Side Rendering

Here’s the magic - a Lambda@Edge function that handles SSR for any framework:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
// lambda-edge-ssr.js
'use strict';

const path = require('path');

// Framework detection and handlers
const FRAMEWORKS = {
  nextjs: {
    detector: (headers) => headers['x-nextjs-page'],
    handler: require('./handlers/nextjs-handler')
  },
  nuxt: {
    detector: (headers) => headers['x-nuxt-page'],
    handler: require('./handlers/nuxt-handler')
  },
  sveltekit: {
    detector: (headers) => headers['x-sveltekit-page'],
    handler: require('./handlers/sveltekit-handler')
  }
};

exports.handler = async (event, context) => {
  const request = event.Records[0].cf.request;
  const headers = request.headers;
  const uri = request.uri;
  
  // Serve static assets directly
  if (isStaticAsset(uri)) {
    return request;
  }
  
  // API routes
  if (uri.startsWith('/api/')) {
    return handleApiRoute(request);
  }
  
  // Detect framework and handle SSR
  for (const [name, framework] of Object.entries(FRAMEWORKS)) {
    if (framework.detector(headers)) {
      return framework.handler(request, context);
    }
  }
  
  // Default: try index.html for SPAs
  if (!path.extname(uri)) {
    request.uri = '/index.html';
  }
  
  return request;
};

function isStaticAsset(uri) {
  const staticExtensions = [
    '.js', '.css', '.ico', '.png', '.jpg', 
    '.jpeg', '.gif', '.svg', '.woff', '.woff2'
  ];
  return staticExtensions.some(ext => uri.endsWith(ext));
}

async function handleApiRoute(request) {
  // Forward to API Lambda
  const apiFunction = process.env.API_LAMBDA_ARN;
  
  // Invoke the API Lambda function
  const lambda = new AWS.Lambda();
  const payload = {
    httpMethod: request.method,
    path: request.uri,
    headers: request.headers,
    body: request.body?.data ? 
      Buffer.from(request.body.data, 'base64').toString() : 
      undefined
  };
  
  try {
    const response = await lambda.invoke({
      FunctionName: apiFunction,
      Payload: JSON.stringify(payload)
    }).promise();
    
    const result = JSON.parse(response.Payload);
    
    return {
      status: result.statusCode,
      statusDescription: 'OK',
      headers: formatHeaders(result.headers),
      body: result.body
    };
  } catch (error) {
    console.error('API route error:', error);
    return {
      status: '500',
      statusDescription: 'Internal Server Error',
      body: 'API Error'
    };
  }
}

Step 3: Next.js Handler with ISR Support

The Next.js handler that makes Incremental Static Regeneration work:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
// handlers/nextjs-handler.js
const AWS = require('aws-sdk');
const s3 = new AWS.S3();

const CACHE_BUCKET = process.env.CACHE_BUCKET;
const BUILD_ID = process.env.BUILD_ID;

async function handler(request, context) {
  const uri = request.uri;
  const cacheKey = `${BUILD_ID}${uri}`;
  
  // Check cache first
  try {
    const cached = await s3.getObject({
      Bucket: CACHE_BUCKET,
      Key: cacheKey
    }).promise();
    
    const metadata = cached.Metadata;
    const staleTime = parseInt(metadata['stale-time'] || '0');
    const now = Date.now();
    
    // Serve stale while revalidating
    if (now < staleTime) {
      return {
        status: '200',
        statusDescription: 'OK',
        headers: {
          'content-type': [{ key: 'Content-Type', value: 'text/html' }],
          'cache-control': [{ 
            key: 'Cache-Control', 
            value: `s-maxage=${metadata['revalidate']}, stale-while-revalidate` 
          }]
        },
        body: cached.Body.toString()
      };
    }
    
    // Trigger background revalidation
    if (metadata['revalidate']) {
      triggerRevalidation(uri, cacheKey);
    }
  } catch (error) {
    // Not in cache, continue to render
  }
  
  // Server-side render
  const html = await renderPage(uri);
  
  // Cache the result
  await s3.putObject({
    Bucket: CACHE_BUCKET,
    Key: cacheKey,
    Body: html,
    Metadata: {
      'stale-time': String(Date.now() + 60000), // 1 minute
      'revalidate': '60'
    }
  }).promise();
  
  return {
    status: '200',
    statusDescription: 'OK',
    headers: {
      'content-type': [{ key: 'Content-Type', value: 'text/html' }],
      'cache-control': [{ key: 'Cache-Control', value: 's-maxage=60' }]
    },
    body: html
  };
}

async function renderPage(uri) {
  // Import your Next.js server bundle
  const { renderToHTML } = require('./next-server');
  
  const html = await renderToHTML({
    pathname: uri,
    query: {},
    // Pass other Next.js requirements
  });
  
  return html;
}

async function triggerRevalidation(uri, cacheKey) {
  // Invoke revalidation Lambda asynchronously
  const lambda = new AWS.Lambda();
  await lambda.invoke({
    FunctionName: process.env.REVALIDATION_LAMBDA,
    InvocationType: 'Event', // Async
    Payload: JSON.stringify({ uri, cacheKey })
  }).promise();
}

module.exports = handler;

Step 4: Build Pipeline with CodeBuild

The CI/CD pipeline that makes git push deployments work:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
# buildspec.yml
version: 0.2

phases:
  install:
    runtime-versions:
      nodejs: 18
    commands:
      - echo Installing dependencies...
      - npm ci
      
  pre_build:
    commands:
      - echo Detecting framework...
      - |
        if [ -f "next.config.js" ]; then
          export FRAMEWORK="nextjs"
        elif [ -f "nuxt.config.js" ]; then
          export FRAMEWORK="nuxt"
        elif [ -f "svelte.config.js" ]; then
          export FRAMEWORK="sveltekit"
        else
          export FRAMEWORK="static"
        fi
      - echo "Detected framework: $FRAMEWORK"
      
  build:
    commands:
      - echo Building application...
      - |
        case $FRAMEWORK in
          nextjs)
            npm run build
            # Export static assets
            npx next export -o dist/static || true
            # Bundle server code
            npm run build:server
            ;;
          nuxt)
            npm run generate
            ;;
          sveltekit)
            npm run build
            ;;
          *)
            npm run build
            ;;
        esac
        
  post_build:
    commands:
      - echo Deploying to S3 and Lambda...
      - |
        # Upload static assets
        aws s3 sync dist/static s3://$STATIC_BUCKET/ \
          --delete \
          --cache-control "public,max-age=31536000,immutable" \
          --exclude "*.html" \
          --exclude "_next/data/*"
          
        # Upload HTML files with shorter cache
        aws s3 sync dist/static s3://$STATIC_BUCKET/ \
          --delete \
          --cache-control "public,max-age=0,must-revalidate" \
          --exclude "*" \
          --include "*.html"
          
        # Deploy Lambda functions
        if [ -d "dist/server" ]; then
          cd dist/server
          zip -r ../lambda-deployment.zip .
          cd ../..
          
          aws s3 cp dist/lambda-deployment.zip \
            s3://$LAMBDA_BUCKET/$CODEBUILD_BUILD_ID.zip
            
          aws lambda update-function-code \
            --function-name $SSR_FUNCTION \
            --s3-bucket $LAMBDA_BUCKET \
            --s3-key $CODEBUILD_BUILD_ID.zip
        fi
        
      - echo Invalidating CloudFront...
      - |
        aws cloudfront create-invalidation \
          --distribution-id $DISTRIBUTION_ID \
          --paths "/*"
          
      - echo Build completed!

artifacts:
  files:
    - '**/*'
  name: build-$CODEBUILD_BUILD_ID

Step 5: CloudFront Distribution

The CDN configuration that ties it all together:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
# CloudFront distribution
resource "aws_cloudfront_distribution" "main" {
  enabled             = true
  is_ipv6_enabled     = true
  default_root_object = "index.html"
  
  origin {
    domain_name              = aws_s3_bucket.static_assets.bucket_regional_domain_name
    origin_access_control_id = aws_cloudfront_origin_access_control.main.id
    origin_id                = "S3-${aws_s3_bucket.static_assets.id}"
  }
  
  # Default cache behavior (static assets)
  default_cache_behavior {
    allowed_methods  = ["GET", "HEAD"]
    cached_methods   = ["GET", "HEAD"]
    target_origin_id = "S3-${aws_s3_bucket.static_assets.id}"
    
    forwarded_values {
      query_string = false
      cookies {
        forward = "none"
      }
    }
    
    viewer_protocol_policy = "redirect-to-https"
    min_ttl                = 0
    default_ttl            = 86400
    max_ttl                = 31536000
    
    # Attach Lambda@Edge for SSR
    lambda_function_association {
      event_type   = "origin-request"
      lambda_arn   = aws_lambda_function.edge_ssr.qualified_arn
      include_body = true
    }
  }
  
  # API routes behavior
  ordered_cache_behavior {
    path_pattern     = "/api/*"
    allowed_methods  = ["DELETE", "GET", "HEAD", "OPTIONS", "PATCH", "POST", "PUT"]
    cached_methods   = ["GET", "HEAD"]
    target_origin_id = "S3-${aws_s3_bucket.static_assets.id}"
    
    forwarded_values {
      query_string = true
      headers      = ["*"]
      
      cookies {
        forward = "all"
      }
    }
    
    viewer_protocol_policy = "https-only"
    min_ttl                = 0
    default_ttl            = 0
    max_ttl                = 0
    
    lambda_function_association {
      event_type   = "origin-request"
      lambda_arn   = aws_lambda_function.edge_ssr.qualified_arn
      include_body = true
    }
  }
  
  price_class = "PriceClass_100" # Use only NA and EU edge locations to save cost
  
  restrictions {
    geo_restriction {
      restriction_type = "none"
    }
  }
  
  viewer_certificate {
    cloudfront_default_certificate = true
  }
  
  custom_error_response {
    error_code         = 404
    response_code      = 200
    response_page_path = "/index.html"
  }
}

Step 6: GitHub Integration

Set up automatic deployments on push:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
# .github/workflows/deploy.yml
name: Deploy to AWS

on:
  push:
    branches: [main]
  pull_request:
    branches: [main]

jobs:
  deploy:
    runs-on: ubuntu-latest
    
    steps:
      - uses: actions/checkout@v3
      
      - name: Configure AWS credentials
        uses: aws-actions/configure-aws-credentials@v2
        with:
          aws-access-key-id: ${{ secrets.AWS_ACCESS_KEY_ID }}
          aws-secret-access-key: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
          aws-region: us-east-1
          
      - name: Start CodeBuild
        run: |
          BUILD_ID=$(aws codebuild start-build \
            --project-name poor-mans-vercel \
            --source-version ${{ github.sha }} \
            --environment-variables-override \
              name=DEPLOY_ENV,value=${{ github.ref == 'refs/heads/main' && 'production' || 'preview' }} \
            --query 'build.id' \
            --output text)
            
          echo "Build started: $BUILD_ID"
          
          # Wait for build to complete
          aws codebuild wait build-completed --ids $BUILD_ID
          
          # Get build status
          STATUS=$(aws codebuild batch-get-builds \
            --ids $BUILD_ID \
            --query 'builds[0].buildStatus' \
            --output text)
            
          if [ "$STATUS" != "SUCCEEDED" ]; then
            echo "Build failed!"
            exit 1
          fi
          
      - name: Comment PR with preview URL
        if: github.event_name == 'pull_request'
        uses: actions/github-script@v6
        with:
          script: |
            github.rest.issues.createComment({
              issue_number: context.issue.number,
              owner: context.repo.owner,
              repo: context.repo.repo,
              body: 'Preview deployed to: https://preview-${{ github.event.pull_request.number }}.example.com'
            })

Custom Domains with Route53

Add custom domain support:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
# Route53 and ACM for custom domains
resource "aws_acm_certificate" "main" {
  domain_name       = var.domain_name
  validation_method = "DNS"
  
  subject_alternative_names = [
    "*.${var.domain_name}"
  ]
  
  lifecycle {
    create_before_destroy = true
  }
}

resource "aws_route53_record" "cert_validation" {
  for_each = {
    for dvo in aws_acm_certificate.main.domain_validation_options : dvo.domain_name => {
      name   = dvo.resource_record_name
      record = dvo.resource_record_value
      type   = dvo.resource_record_type
    }
  }
  
  allow_overwrite = true
  name            = each.value.name
  records         = [each.value.record]
  ttl             = 60
  type            = each.value.type
  zone_id         = aws_route53_zone.main.zone_id
}

resource "aws_acm_certificate_validation" "main" {
  certificate_arn         = aws_acm_certificate.main.arn
  validation_record_fqdns = [for record in aws_route53_record.cert_validation : record.fqdn]
}

# Update CloudFront to use custom domain
resource "aws_cloudfront_distribution" "main" {
  # ... previous configuration ...
  
  aliases = [var.domain_name, "*.${var.domain_name}"]
  
  viewer_certificate {
    acm_certificate_arn = aws_acm_certificate.main.arn
    ssl_support_method  = "sni-only"
  }
}

Cost Breakdown

Here’s what this setup actually costs:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
Monthly costs (1000 visits/day, 100GB bandwidth):
- S3 Storage (10GB): $0.23
- S3 Requests: $0.40
- CloudFront Transfer: $0.85
- CloudFront Requests: $0.75
- Lambda@Edge: $0.50
- API Lambda: $0.20
- CodeBuild (10 builds): $0.50
- Route53: $0.50

Total: $3.93/month

Compare to managed platforms:

  • Vercel Pro: $20/month
  • Netlify Pro: $19/month
  • AWS Amplify: $15/month

Performance Optimizations

Make it blazing fast:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
// Optimize images on the fly with Lambda@Edge
exports.imageOptimizer = async (event) => {
  const request = event.Records[0].cf.request;
  const uri = request.uri;
  
  // Check if image optimization is requested
  const match = uri.match(/(.+)\.(jpg|jpeg|png|webp)$/);
  if (!match) return request;
  
  const params = new URLSearchParams(request.querystring);
  const width = params.get('w');
  const quality = params.get('q') || '85';
  
  if (!width) return request;
  
  // Modify request to optimized version
  request.uri = `${match[1]}-w${width}-q${quality}.webp`;
  
  // Check if optimized version exists
  try {
    await s3.headObject({
      Bucket: STATIC_BUCKET,
      Key: request.uri.slice(1)
    }).promise();
    
    return request;
  } catch (error) {
    // Generate optimized version
    const original = await s3.getObject({
      Bucket: STATIC_BUCKET,
      Key: uri.slice(1)
    }).promise();
    
    const optimized = await sharp(original.Body)
      .resize(parseInt(width))
      .webp({ quality: parseInt(quality) })
      .toBuffer();
      
    // Save optimized version
    await s3.putObject({
      Bucket: STATIC_BUCKET,
      Key: request.uri.slice(1),
      Body: optimized,
      ContentType: 'image/webp',
      CacheControl: 'public,max-age=31536000,immutable'
    }).promise();
    
    return request;
  }
};

Preview Deployments

Create preview environments for PRs:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
// Preview deployment handler
async function createPreviewDeployment(prNumber) {
  const previewDomain = `preview-${prNumber}.${BASE_DOMAIN}`;
  
  // Create preview CloudFront distribution
  const distribution = await cloudfront.createDistribution({
    DistributionConfig: {
      CallerReference: `preview-${prNumber}-${Date.now()}`,
      Aliases: {
        Quantity: 1,
        Items: [previewDomain]
      },
      DefaultRootObject: 'index.html',
      Origins: {
        Quantity: 1,
        Items: [{
          Id: `S3-preview-${prNumber}`,
          DomainName: `${PREVIEW_BUCKET}.s3.amazonaws.com`,
          S3OriginConfig: {
            OriginAccessIdentity: ''
          }
        }]
      },
      // ... rest of config
    }
  }).promise();
  
  // Create Route53 record
  await route53.changeResourceRecordSets({
    HostedZoneId: ZONE_ID,
    ChangeBatch: {
      Changes: [{
        Action: 'CREATE',
        ResourceRecordSet: {
          Name: previewDomain,
          Type: 'A',
          AliasTarget: {
            HostedZoneId: 'Z2FDTNDATAQYW2', // CloudFront zone ID
            DNSName: distribution.Distribution.DomainName,
            EvaluateTargetHealth: false
          }
        }
      }]
    }
  }).promise();
  
  return previewDomain;
}

Monitoring and Alerts

Know when things break:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
# CloudWatch alarms
resource "aws_cloudwatch_metric_alarm" "high_4xx_errors" {
  alarm_name          = "${var.project_name}-high-4xx-errors"
  comparison_operator = "GreaterThanThreshold"
  evaluation_periods  = "2"
  metric_name         = "4xxErrorRate"
  namespace           = "AWS/CloudFront"
  period              = "300"
  statistic           = "Average"
  threshold           = "5"
  alarm_description   = "This metric monitors 4xx error rate"
  
  dimensions = {
    DistributionId = aws_cloudfront_distribution.main.id
  }
}

resource "aws_cloudwatch_metric_alarm" "high_origin_latency" {
  alarm_name          = "${var.project_name}-high-origin-latency"
  comparison_operator = "GreaterThanThreshold"
  evaluation_periods  = "2"
  metric_name         = "OriginLatency"
  namespace           = "AWS/CloudFront"
  period              = "300"
  statistic           = "Average"
  threshold           = "1000"
  alarm_description   = "This metric monitors origin latency"
  
  dimensions = {
    DistributionId = aws_cloudfront_distribution.main.id
  }
}

The “But What About…” Section

“What about websockets?” Use API Gateway WebSocket APIs - adds ~$1/month for low traffic.

“What about databases?”

  • DynamoDB on-demand: Perfect for low traffic
  • RDS Aurora Serverless v2: Scales to zero
  • PlanetScale free tier: Good MySQL option

“What about cron jobs?” EventBridge + Lambda - practically free for hourly jobs.

“What about file uploads?”
Pre-signed S3 URLs - upload directly from browser.

Complete Deployment Script

Put it all together:

 1
 2
 3
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
#!/bin/bash
# deploy.sh - One command deployment

set -e

PROJECT_NAME=${1:-my-app}
DOMAIN=${2:-}

echo "Deploying Poor Man's Vercel..."

# Initialize Terraform
cd infrastructure
terraform init

# Plan deployment
terraform plan -var="project_name=$PROJECT_NAME" -var="domain_name=$DOMAIN" -out=tfplan

# Apply
terraform apply tfplan

# Get outputs
STATIC_BUCKET=$(terraform output -raw static_bucket)
DISTRIBUTION_ID=$(terraform output -raw distribution_id)
CODEBUILD_PROJECT=$(terraform output -raw codebuild_project)

# Trigger first build
aws codebuild start-build --project-name $CODEBUILD_PROJECT

echo "Deployment complete!"
echo "CloudFront URL: $(terraform output -raw cloudfront_url)"
echo "Custom domain: https://$DOMAIN (if configured)"

Results After 3 Months

My “Poor Man’s Vercel” stats:

  • Uptime: 99.95% (one 20-minute outage)
  • Build time: 45-90 seconds average
  • TTFB: 50-150ms globally
  • Total cost: $3.50-4.50/month
  • Sites deployed: 12 (personal projects + client demos)

When to Use This vs Vercel

Use this when:

  • You’re cost-conscious
  • You want to learn AWS
  • You need custom configurations
  • You’re already using AWS services

Use Vercel when:

  • You value your time over money
  • You need their edge middleware
  • You want zero-maintenance deployments
  • You’re part of a team

Conclusion

Is this as polished as Vercel? No. Does it handle 90% of use cases for 5% of the cost? Absolutely.

The real value isn’t just saving $15/month. It’s understanding how modern deployment platforms work under the hood. Plus, you can customize anything - want to add image optimization? Video transcoding? ML inference? Just add another Lambda.

The web should be accessible to build on. Your deployment platform shouldn’t cost more than your domain name.


Built your own deployment platform? I’d love to hear what creative solutions you came up with. Find me on Twitter @TheLogicalDev.

The complete infrastructure code is available on GitHub. Deploy your first app in under 10 minutes.