diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..03e5e1d71bd20e52b66524709d2b4d65ba2814fd
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,13 @@
+name: gcp-cloud-run-app-testing
+
+services:
+  test:
+    image: registry.gitlab.developers.cam.ac.uk/uis/devops/infra/dockerimages/logan-terraform:1.6
+    entrypoint: ["bash", "-c"]
+    environment:
+      # This unsets the GOOGLE_APPLICATION_CREDENTIALS as it is not required but the logan-terraform images sets it.
+      - GOOGLE_APPLICATION_CREDENTIALS=
+      - GOOGLE_IMPERSONATE_SERVICE_ACCOUNT=terraform-deploy@infra-testing-int-e2395220.iam.gserviceaccount.com
+    volumes:
+      - .:/workdir:rw
+      - ~/.config/gcloud/application_default_credentials.json:/root/.config/gcloud/application_default_credentials.json:ro
diff --git a/outputs.tf b/outputs.tf
index 91d01d1cf963e7057efe8e7fc3d309190cbeb7c1..3cce8b2b54bb5cb5bc2027897185f1c9ec06cbd4 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -1,5 +1,3 @@
-# outputs.tf defines outputs for the module.
-
 output "service" {
   description = "Webapp Cloud Run service resource"
   value       = google_cloud_run_v2_service.webapp
@@ -10,9 +8,20 @@ output "service_account" {
   value       = google_service_account.webapp
 }
 
+output "network_endpoint_group" {
+  description = "Network endpoint group for the load balancer."
+  value       = try(google_compute_region_network_endpoint_group.webapp[0], null)
+}
+
+output "ssl_policy" {
+  description = "The ssl_policy object, if one is being created."
+  value       = try(google_compute_ssl_policy.default[0], null)
+}
+
 output "load_balancer" {
   description = "Load balancer for the webapp"
   value       = try(module.webapp_http_load_balancer[0], null)
+  sensitive   = true
 }
 
 output "static_egress_ip" {
diff --git a/run_tests.sh b/run_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0bdc75a69bb3537cfb7b6cab36c8710782719421
--- /dev/null
+++ b/run_tests.sh
@@ -0,0 +1,99 @@
+#! /usr/bin/env bash
+
+set -e
+shopt -s expand_aliases
+
+usage() {
+    cat << EOF
+Usage:
+    ./run_tests.sh
+    ./run_tests.sh -t tests/default.tftest.hcl
+    ./run_tests.sh -t tests/default.tftest.hcl -t tests/cloudsql.tftest.hcl
+
+Options:
+    -c                      Run in CI/CD job mode meaning that terraform is executed directly, rather than via
+                            docker compose.
+    -t <test file path>     Execute specific test files only (all tests are executed by default).
+                            This option can be specified multiple times.
+    -v                      Add the verbose switch to the terraform test command.
+EOF
+}
+
+# The sed commands in this script must use gnu-sed on mac as the default sed on MacOS is a steaming pile of...
+if [[ "$(uname)" == "Darwin" ]]; then
+    if [ ! "$(command -v gsed)" ]; then
+        echo -e "This script requires gnu-sed on Mac OS. Install it with 'brew install gnu-sed'"
+        exit 1
+    fi
+    alias sed="gsed"
+fi
+
+cleanup() {
+    # Uncomment the prevent_destroy lifecycle argument if it has been commented out by the command further in the
+    # script.
+    sed -i 's/^    # prevent_destroy = true/    prevent_destroy = true/g' static_egress_ip.tf
+
+    # Remove the lockfile file as it is only necessary to enable the test run.
+    if [[ -f .terraform.lock.hcl ]]; then
+        rm .terraform.lock.hcl
+    fi
+
+    # Ensure the original versions.tf.json is restored.
+    if [[ -f versions.tf.json.bak ]]; then
+        mv versions.tf.json.bak versions.tf.json
+    fi
+}
+
+trap 'cleanup' EXIT INT TERM
+
+while getopts 'cht:v' option; do
+    case $option in
+        c) ci_run=1;;
+        h) usage; exit;;
+        t) tests+=("$OPTARG");;
+        v) verbose=1;;
+        *) all_tests=1;;
+    esac
+done
+shift $((OPTIND-1))
+
+if [[ ${#tests[@]} -gt 0 && -n $all_tests ]]; then
+    echo -e "Error: Cannot specify both -a and -t together.\n"
+    usage
+    exit 1
+fi
+
+test_args=("--var-file=tests/tests.tfvars")
+
+if [[ "${#tests[@]}" -gt 0 ]]; then
+    for test in "${tests[@]}"; do
+        test_args+=("--filter=$test")
+    done
+fi
+
+if [[ -n $verbose ]]; then
+    test_args+=(--verbose)
+fi
+
+# Comment out the prevent_destroy lifecycle argument otherwise the tests will fail as they cannot tear down the
+# resource.
+sed -i 's/^    prevent_destroy = true/    # prevent_destroy = true/g' static_egress_ip.tf
+
+# The terraform testing framework doesn't yet have a nice way to target specific versions of a provider. Therefore, we
+# are having to programatically change the Google provider versions in the versions.tf.json file for the time being.
+if [[ -n $GOOGLE_PROVIDER_VERSION_CONSTRAINT ]]; then
+    mv versions.tf.json versions.tf.json.bak
+    jq ".terraform.required_providers.google.version |= \"$GOOGLE_PROVIDER_VERSION_CONSTRAINT\"" versions.tf.json.bak \
+        > versions.tf.json
+fi
+
+if [[ -n $ci_run ]]; then
+    terraform init
+    terraform test "${test_args[@]}"
+else
+    docker compose run --rm test "$(cat << EOF
+terraform init
+terraform test ${test_args[@]}
+EOF
+    )"
+fi
diff --git a/tests/cloud_run_service.tftest.hcl b/tests/cloud_run_service.tftest.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..5cab38f235bd360dbb406590599007ddf72742c4
--- /dev/null
+++ b/tests/cloud_run_service.tftest.hcl
@@ -0,0 +1,320 @@
+run "setup" {
+  variables {
+    create_test_secrets = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_service_with_default_variable_values" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.ingress == "INGRESS_TRAFFIC_ALL"
+    error_message = "Ingress should be 'INGRESS_TRAFFIC_ALL'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.launch_stage == "GA"
+    error_message = "Launch stage should be 'GA'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.location == "europe-west2"
+    error_message = "Location should be 'europe-west2'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].execution_environment == "EXECUTION_ENVIRONMENT_GEN1"
+    error_message = "Execution environment should be 'EXECUTION_ENVIRONMENT_GEN1'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].timeout == "300s"
+    error_message = "Timeout should be '300s'."
+  }
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.template[0].containers) == 1
+    error_message = "A single container block should be defined."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service_iam_member.webapp_all_users_invoker[0].role == "roles/run.invoker"
+    error_message = "google_cloud_run_v2_service_iam_member.webapp_all_users_invoker.role should be 'roles/run.invoker'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service_iam_member.webapp_all_users_invoker[0].member == "allUsers"
+    error_message = "google_cloud_run_v2_service_iam_member.webapp_all_users_invoker.member should be 'allUsers'."
+  }
+  assert {
+    condition     = google_service_account.webapp.name != null
+    error_message = "A dedicated service account should be created for the Cloud Run service."
+  }
+}
+
+run "test_container_resources_block" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+        resources = {
+          startup_cpu_boost = true
+          cpu_idle          = true
+          limits = {
+            cpu = 2
+          }
+        }
+      }
+    }
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].resources[0].startup_cpu_boost == true
+    error_message = "startup_cpu_boost should be 'true'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].resources[0].cpu_idle == true
+    error_message = "cpu_idle should be 'true'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].resources[0].limits.cpu == "2"
+    error_message = "limits.cpu should be '2'."
+  }
+}
+
+run "setup_secret_iam" {
+  variables {
+    create_test_secrets     = true
+    create_test_secrets_iam = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_env_vars_and_secrets" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+        env = [
+          {
+            name  = "HELLO",
+            value = "WORLD!"
+          },
+          {
+            name = "SECRET",
+            value_source = {
+              secret_key_ref = {
+                secret = run.setup.secret_ids[0]
+              }
+            }
+          }
+        ]
+        volume_mounts = [
+          {
+            name       = "secret-volume",
+            mount_path = "/secrets"
+          }
+        ]
+      }
+    }
+    volumes = [
+      {
+        name = "secret-volume",
+        secret = {
+          secret = run.setup.secret_ids[1]
+          items = [
+            {
+              version = "1",
+              path    = "my-secret"
+            }
+          ]
+        }
+      }
+    ]
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].env[0].name == "HELLO"
+    error_message = "A standard environment variable with the name 'HELLO' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].env[0].value == "WORLD!"
+    error_message = "A standard environment variable with the value 'WORLD!' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].env[1].name == "SECRET"
+    error_message = "A secret-backed environment variable with the name 'SECRET' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].env[1].value_source[0].secret_key_ref[0].secret == run.setup.secret_ids[0]
+    error_message = "A secret-backed environment variable referencing the 'run.setup.secret_ids[0]' secret should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].name == "secret-volume"
+    error_message = "A volume mount with the name 'secret-volume' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].mount_path == "/secrets"
+    error_message = "A volume mount with the mount path '/secrets' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].name == "secret-volume"
+    error_message = "A volume with the name 'secret-volume' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].secret[0].secret == run.setup.secret_ids[1]
+    error_message = "A secret-backed volume referencing the 'run.setup.secret_ids[0]' secret should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].secret[0].items[0].version == "1"
+    error_message = "Secret version should be '1'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].secret[0].items[0].path == "my-secret"
+    error_message = "Secret path should be 'my-secret'."
+  }
+}
+
+run "test_service_with_multiple_containers" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp1 = {
+        name  = "webapp-1"
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+        ports = [
+          {
+            container_port = 8080
+          }
+        ]
+      }
+      webapp2 = {
+        name  = "webapp-2"
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+  }
+
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.template[0].containers) == 2
+    error_message = "The Cloud Run service should have two containers defined."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].name == "webapp-1"
+    error_message = "The first container definition should be configured with the name 'webapp-1'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].ports[0].container_port == 8080
+    error_message = "The first container definition should be configured with a single container_port of 8080."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[1].name == "webapp-2"
+    error_message = "The second container definition should be configured with the name 'webapp-2'."
+  }
+}
+
+run "test_traffic_distribution_across_multiple_revisions_initial_deploy" {
+  variables {
+    name     = run.setup.random_name
+    revision = "${run.setup.random_name}-v1-0-0"
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    traffic = [
+      {
+        type     = "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+        revision = "${run.setup.random_name}-v1-0-0"
+        percent  = 100
+      }
+    ]
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].revision == "${run.setup.random_name}-v1-0-0"
+    error_message = "The service revision name should be '${run.setup.random_name}-v1-0-0'."
+  }
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.traffic) == 1
+    error_message = "There should be a single traffic block configured."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[0].type == "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+    error_message = "There should be a single traffic block with the type 'TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[0].revision == "${run.setup.random_name}-v1-0-0"
+    error_message = "There should be a single traffic block with the revision '${run.setup.random_name}-v1-0-0'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[0].percent == 100
+    error_message = "There should be a single traffic block with percent set to 100."
+  }
+}
+
+run "test_traffic_distribution_across_multiple_revisions_split_50_50" {
+  variables {
+    name     = run.setup.random_name
+    revision = "${run.setup.random_name}-v1-1-0"
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    traffic = [
+      {
+        type     = "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+        revision = "${run.setup.random_name}-v1-0-0"
+        percent  = 50
+      },
+      {
+        type     = "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+        revision = "${run.setup.random_name}-v1-1-0"
+        percent  = 50
+      }
+    ]
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].revision == "${run.setup.random_name}-v1-1-0"
+    error_message = "The service revision name should be '${run.setup.random_name}-v1-1-0'."
+  }
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.traffic) == 2
+    error_message = "There should be two traffic blocks configured."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[0].type == "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+    error_message = "The first traffic block should be configured with the type 'TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[0].revision == "${run.setup.random_name}-v1-0-0"
+    error_message = "The first traffic block should be configured with the revision '${run.setup.random_name}-v1-0-0'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[0].percent == 50
+    error_message = "The first traffic block should be configured with percent set to 50."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[1].type == "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+    error_message = "The second traffic block should be configured with the type 'TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[1].revision == "${run.setup.random_name}-v1-1-0"
+    error_message = "The second traffic block should be configured with the revision '${run.setup.random_name}-v1-1-0'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[1].percent == 50
+    error_message = "The second traffic block should be configured with percent set to 50."
+  }
+}
diff --git a/tests/cloudsql.tftest.hcl b/tests/cloudsql.tftest.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..8cede82a3e327e86967393d990cdead49b33a7cd
--- /dev/null
+++ b/tests/cloudsql.tftest.hcl
@@ -0,0 +1,229 @@
+run "setup" {
+  variables {
+    create_test_sql_instances = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_cloudsql_mount_using_helper_variable" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    mount_cloudsql_instance = run.setup.instance_connection_names[0]
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].name == "cloudsql"
+    error_message = "A single volume should be configured named 'cloudsql'."
+  }
+  assert {
+    condition = [
+      for instance in google_cloud_run_v2_service.webapp.template[0].volumes[0].cloud_sql_instance[0].instances : instance
+    ][0] == run.setup.instance_connection_names[0]
+    error_message = "The cloudsql volume should be configured to use the run.setup.instance_connection_names[0] instance connection name."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].name == "cloudsql"
+    error_message = "A single volume mount should be configured with the name 'cloudsql'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].mount_path == "/cloudsql"
+    error_message = "A single volume mount should be configured with the mount path '/cloudsql'."
+  }
+}
+
+run "test_cloudsql_mount_to_single_container" {
+  variables {
+    name = run.setup.random_name
+    volumes = [
+      {
+        name = "cloudsql"
+        cloud_sql_instance = {
+          instances = [run.setup.instance_connection_names[0]]
+        }
+      }
+    ]
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+        volume_mounts = [
+          {
+            name       = "cloudsql"
+            mount_path = "/cloudsql"
+          }
+        ]
+      }
+    }
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].name == "cloudsql"
+    error_message = "A single volume should be configured named 'cloudsql'."
+  }
+  assert {
+    condition = (
+      [for instance in google_cloud_run_v2_service.webapp.template[0].volumes[0].cloud_sql_instance[0].instances : instance][0]
+      == run.setup.instance_connection_names[0]
+    )
+    error_message = "The cloudsql volume should be configured to use the run.setup.instance_connection_names[0] instance connection name."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].name == "cloudsql"
+    error_message = "A single volume mount should be configured with the name 'cloudsql'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].mount_path == "/cloudsql"
+    error_message = "A single volume mount should be configured with the mount path '/cloudsql'."
+  }
+}
+
+run "test_cloudsql_mount_multiple_cloudsql_instances" {
+  variables {
+    name = run.setup.random_name
+    volumes = [
+      {
+        name = "cloudsql"
+        cloud_sql_instance = {
+          instances = run.setup.instance_connection_names
+        }
+      }
+    ]
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+        volume_mounts = [
+          {
+            name       = "cloudsql"
+            mount_path = "/cloudsql"
+          }
+        ]
+      }
+    }
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].name == "cloudsql"
+    error_message = "A single volume should be configured named 'cloudsql'."
+  }
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.template[0].volumes[0].cloud_sql_instance[0].instances) == 2
+    error_message = "The cloudsql volume should be configured with exactly 2 instances."
+  }
+  assert {
+    condition = (
+      contains(
+        google_cloud_run_v2_service.webapp.template[0].volumes[0].cloud_sql_instance[0].instances,
+        run.setup.instance_connection_names[0]
+      )
+      && contains(
+        google_cloud_run_v2_service.webapp.template[0].volumes[0].cloud_sql_instance[0].instances,
+        run.setup.instance_connection_names[1]
+      )
+    )
+    error_message = "The cloudsql volume should be configured with the two instance connection names defined in var.volumes."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].name == "cloudsql"
+    error_message = "A single volume mount should be configured with the name 'cloudsql'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].mount_path == "/cloudsql"
+    error_message = "A single volume mount should be configured with the mount path '/cloudsql'."
+  }
+}
+
+run "test_cloudsql_mount_to_pre_deploy_job_using_helper_variable" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_pre_deploy_job = true
+    pre_deploy_job_container = {
+      image   = "golang:latest"
+      command = ["go"]
+      args    = ["version"]
+    }
+    pre_deploy_job_mount_cloudsql_instance = run.setup.instance_connection_names[0]
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].name == "cloudsql"
+    error_message = "A single volume should be configured named 'cloudsql'."
+  }
+  assert {
+    condition = (
+      [for instance in google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].cloud_sql_instance[0].instances : instance][0]
+      == run.setup.instance_connection_names[0]
+    )
+    error_message = "The cloudsql volume should be configured to use the run.setup.instance_connection_names[0] instance connection name."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].volume_mounts[0].name == "cloudsql"
+    error_message = "A single volume mount should be configured with the name 'cloudsql'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].volume_mounts[0].mount_path == "/cloudsql"
+    error_message = "A single volume mount should be configured with the mount path '/cloudsql'."
+  }
+}
+
+run "test_cloudsql_mount_to_pre_deploy_job" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_pre_deploy_job = true
+    pre_deploy_job_volumes = [
+      {
+        name = "cloudsql"
+        cloud_sql_instance = {
+          instances = [run.setup.instance_connection_names[0]]
+        }
+      }
+    ]
+    pre_deploy_job_container = {
+      image   = "golang:latest"
+      command = ["go"]
+      args    = ["version"]
+      volume_mounts = [
+        {
+          name       = "cloudsql"
+          mount_path = "/cloudsql"
+        }
+      ]
+    }
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].name == "cloudsql"
+    error_message = "A single volume should be configured named 'cloudsql'."
+  }
+  assert {
+    condition = (
+      [for instance in google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].cloud_sql_instance[0].instances : instance][0]
+      == run.setup.instance_connection_names[0]
+    )
+    error_message = "The cloudsql volume should be configured to use the run.setup.instance_connection_names[0] instance connection name."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].volume_mounts[0].name == "cloudsql"
+    error_message = "A single volume mount should be configured with the name 'cloudsql'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].volume_mounts[0].mount_path == "/cloudsql"
+    error_message = "A single volume mount should be configured with the mount path '/cloudsql'."
+  }
+}
diff --git a/tests/load_balancer.tftest.hcl b/tests/load_balancer.tftest.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..ed47d2f59ce53ea6c089b64f969ceb45ebd585af
--- /dev/null
+++ b/tests/load_balancer.tftest.hcl
@@ -0,0 +1,103 @@
+run "setup" {
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_service_with_default_variable_values_and_load_balancer_enabled" {
+  variables {
+    name                 = run.setup.random_name
+    enable_load_balancer = true
+    dns_names = {
+      webapp = "${run.setup.random_name}.test.example.gcp.uis.cam.ac.uk"
+    }
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.ingress == "INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER"
+    error_message = "Ingress should be 'INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.launch_stage == "GA"
+    error_message = "Launch stage should be 'GA'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.location == "europe-west2"
+    error_message = "Location should be 'europe-west2'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].execution_environment == "EXECUTION_ENVIRONMENT_GEN1"
+    error_message = "Execution environment should be 'EXECUTION_ENVIRONMENT_GEN1'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].timeout == "300s"
+    error_message = "Timeout should be '300s'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service_iam_member.webapp_all_users_invoker[0].role == "roles/run.invoker"
+    error_message = "google_cloud_run_v2_service_iam_member.webapp_all_users_invoker.role should be 'roles/run.invoker'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service_iam_member.webapp_all_users_invoker[0].member == "allUsers"
+    error_message = "google_cloud_run_v2_service_iam_member.webapp_all_users_invoker.member should be 'allUsers'."
+  }
+  assert {
+    condition     = google_service_account.webapp.name != null
+    error_message = "A dedicated service account should be created for the Cloud Run service."
+  }
+  assert {
+    condition     = google_compute_region_network_endpoint_group.webapp[0].network_endpoint_type == "SERVERLESS"
+    error_message = "Network endpoint group type should be 'SERVERLESS'."
+  }
+  assert {
+    condition     = google_compute_region_network_endpoint_group.webapp[0].cloud_run[0].service == google_cloud_run_v2_service.webapp.name
+    error_message = "Network endpoint group must contain the created Cloud Run service in its definition."
+  }
+  assert {
+    condition     = google_compute_ssl_policy.default[0].min_tls_version == "TLS_1_2"
+    error_message = "Minimum TLS version should be 'TLS_1_2'."
+  }
+  assert {
+    condition     = google_compute_ssl_policy.default[0].profile == "MODERN"
+    error_message = "SSL profile should be 'MODERN'."
+  }
+  assert {
+    condition     = module.webapp_http_load_balancer[0].backend_services["default"].protocol == "HTTP"
+    error_message = "The default backend service protocol should be 'HTTP'."
+  }
+  assert {
+    condition     = module.webapp_http_load_balancer[0].backend_services["default"].enable_cdn == false
+    error_message = "The default backend service should not enable CDN."
+  }
+  assert {
+    condition     = module.webapp_http_load_balancer[0].backend_services["default"].log_config[0].enable == true
+    error_message = "The default backend service log config should be enabled."
+  }
+  assert {
+    condition     = module.webapp_http_load_balancer[0].backend_services["default"].log_config[0].sample_rate == 1.0
+    error_message = "The default backend service log sample rate should be '1.0'."
+  }
+  assert {
+    condition     = length(module.webapp_http_load_balancer[0].backend_services["default"].backend) == 1
+    error_message = "The default backend service should be configured with a single backend block."
+  }
+  assert {
+    condition = contains([
+      for backend in module.webapp_http_load_balancer[0].backend_services["default"].backend : backend.group
+    ], google_compute_region_network_endpoint_group.webapp[0].self_link)
+    error_message = "The default backend service group should be configured to use the created network_endpoint_group."
+  }
+  assert {
+    condition     = module.webapp_http_load_balancer[0].http_proxy != null
+    error_message = "A http proxy resource should be created by the load balancer module."
+  }
+  assert {
+    condition     = module.webapp_http_load_balancer[0].https_proxy != null
+    error_message = "A https proxy resource should be created by the load balancer module."
+  }
+}
diff --git a/tests/monitoring.tftest.hcl b/tests/monitoring.tftest.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..73a3219fb5c30b539cf6479427de6a74eaca096e
--- /dev/null
+++ b/tests/monitoring.tftest.hcl
@@ -0,0 +1,315 @@
+run "setup" {
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_monitoring_with_alert_policies_created_in_default_project" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_monitoring = true
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.ingress == "INGRESS_TRAFFIC_ALL"
+    error_message = "Ingress should be 'INGRESS_TRAFFIC_ALL'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].ssl_cert_expiry_alert_policy.enabled == true
+    error_message = "SSL cert expiry alert policy should be enabled."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].ssl_cert_expiry_alert_policy.project == var.project
+    error_message = "SSL cert expiry alert policy should be created in the project specified in var.project."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].uptime_check_alert_policy.enabled == true
+    error_message = "Uptime alert policy should be enabled."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].uptime_check_alert_policy.project == var.project
+    error_message = "Uptime alert policy should be created in the project specified in var.project."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.period == "300s"
+    error_message = "Uptime check should be configured with period == '300s'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.timeout == "30s"
+    error_message = "Uptime check should be configured with timeout == '30s'."
+  }
+  assert {
+    condition     = length(module.uptime_monitoring["default"].https_uptime_check_config.http_check) == 1
+    error_message = "A single HTTP check should be configured."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].path == "/"
+    error_message = "The HTTP check path should be '/'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].port == 443
+    error_message = "The HTTP check port should be 443."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].request_method == "GET"
+    error_message = "The HTTP check request method should be 'GET'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].use_ssl == true
+    error_message = "The HTTP check should be configured to use SSL."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].validate_ssl == true
+    error_message = "The HTTP check should be configured to validate SSL."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.monitored_resource[0].type == "uptime_url"
+    )
+    error_message = "The uptime check should be configured to monitor a URL resource."
+  }
+}
+
+run "test_monitoring_with_alert_policies_created_in_scoping_project" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_monitoring          = true
+    monitoring_scoping_project = "infra-testing-meta-21f09a44"
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.ingress == "INGRESS_TRAFFIC_ALL"
+    error_message = "Ingress should be 'INGRESS_TRAFFIC_ALL'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].ssl_cert_expiry_alert_policy.enabled == true
+    error_message = "SSL cert expiry alert policy should be enabled."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].ssl_cert_expiry_alert_policy.project == var.monitoring_scoping_project
+    )
+    error_message = (
+      "SSL cert expiry alert policy should be created in the project specified in var.monitoring_scoping_project."
+    )
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].uptime_check_alert_policy.enabled == true
+    error_message = "Uptime alert policy should be enabled."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].uptime_check_alert_policy.project == var.monitoring_scoping_project
+    )
+    error_message = "Uptime alert policy should be created in the project specified in var.monitoring_scoping_project."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.period == "300s"
+    error_message = "Uptime check should be configured with period == '300s'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.timeout == "30s"
+    error_message = "Uptime check should be configured with timeout == '30s'."
+  }
+  assert {
+    condition     = length(module.uptime_monitoring["default"].https_uptime_check_config.http_check) == 1
+    error_message = "A single HTTP check should be configured."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].path == "/"
+    error_message = "The HTTP check path should be '/'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].port == 443
+    error_message = "The HTTP check port should be 443."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].request_method == "GET"
+    error_message = "The HTTP check request method should be 'GET'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].use_ssl == true
+    error_message = "The HTTP check should be configured to use SSL."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].validate_ssl == true
+    error_message = "The HTTP check should be configured to validate SSL."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.monitored_resource[0].type == "uptime_url"
+    )
+    error_message = "The uptime check should be configured to monitor a URL resource."
+  }
+}
+
+run "test_monitoring_with_auth_proxy" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_monitoring                 = true
+    allow_unauthenticated_invocations = false
+  }
+
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.monitored_resource[0].labels.host
+      == module.uptime_monitoring["default"].auth_proxy_host
+    )
+    error_message = "HTTP check path should be configured to use the authentication proxy function host."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].path
+      == module.uptime_monitoring["default"].auth_proxy_path
+    )
+    error_message = "HTTP check path should be configured to use the authentication proxy function path."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].auth_proxy_function.ingress_settings == "ALLOW_ALL"
+    error_message = "The authentication proxy function should be configured to allow all ingress."
+  }
+}
+
+run "test_monitoring_with_auth_proxy_and_vpc_access_connector" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_monitoring                 = true
+    allow_unauthenticated_invocations = false
+    ingress                           = "INGRESS_TRAFFIC_INTERNAL_ONLY"
+  }
+
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.monitored_resource[0].labels.host
+      == module.uptime_monitoring["default"].auth_proxy_host
+    )
+    error_message = "HTTP check path should be configured to use the authentication proxy function host."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].path
+      == module.uptime_monitoring["default"].auth_proxy_path
+    )
+    error_message = "HTTP check path should be configured to use the authentication proxy function path."
+  }
+  assert {
+    condition = (
+      google_compute_subnetwork.vpc_connector[0].network
+      == "https://www.googleapis.com/compute/v1/projects/${var.project}/global/networks/default"
+    )
+    error_message = "A subnet for the VPC connector should be created in the 'default' network."
+  }
+  assert {
+    condition     = google_vpc_access_connector.main[0].project == var.project
+    error_message = "A VPC connector should be created in the specified project."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].auth_proxy_function.ingress_settings == "ALLOW_ALL"
+    error_message = "The authentication proxy function should be configured to allow all ingress."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].auth_proxy_function.vpc_connector
+      == "projects/${var.project}/locations/${var.region}/connectors/${var.name}-conn"
+    )
+    error_message = "The authentication proxy function should be configured to use the created VPC connector."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].auth_proxy_function.vpc_connector_egress_settings == "ALL_TRAFFIC"
+    )
+    error_message = (
+      "The authentication proxy function should be configured to route all egress traffic via the VPC connector."
+    )
+  }
+}
+
+run "setup_vpc_connector" {
+  variables {
+    create_vpc_connector = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_monitoring_with_auth_proxy_and_an_existing_vpc_access_connector" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_monitoring                 = true
+    allow_unauthenticated_invocations = false
+    ingress                           = "INGRESS_TRAFFIC_INTERNAL_ONLY"
+    vpc_access = {
+      connector = run.setup_vpc_connector.vpc_connector_id
+      egress    = "ALL_TRAFFIC"
+    }
+  }
+
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.monitored_resource[0].labels.host
+      == module.uptime_monitoring["default"].auth_proxy_host
+    )
+    error_message = "HTTP check path should be configured to use the authentication proxy function host."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].path
+      == module.uptime_monitoring["default"].auth_proxy_path
+    )
+    error_message = "HTTP check path should be configured to use the authentication proxy function path."
+  }
+  assert {
+    condition     = try(google_compute_subnetwork.vpc_connector[0], null) == null
+    error_message = "A subnet should not be created."
+  }
+  assert {
+    condition     = try(google_vpc_access_connector.main[0], null) == null
+    error_message = "A VPC connector should not be created."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].auth_proxy_function.ingress_settings == "ALLOW_ALL"
+    error_message = "The authentication proxy function should be configured to allow all ingress."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].auth_proxy_function.vpc_connector
+      == "projects/${var.project}/locations/${var.region}/connectors/${var.name}-setup"
+    )
+    error_message = "The authentication proxy function should be configured to use the provided VPC connector."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].auth_proxy_function.vpc_connector_egress_settings == "ALL_TRAFFIC"
+    )
+    error_message = (
+      "The authentication proxy function should be configured to route all egress traffic via the VPC connector."
+    )
+  }
+}
diff --git a/tests/pre_deploy_job.tftest.hcl b/tests/pre_deploy_job.tftest.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..92bca7f3315dc036b347385cde0d36c053ec7d91
--- /dev/null
+++ b/tests/pre_deploy_job.tftest.hcl
@@ -0,0 +1,129 @@
+run "setup" {
+  variables {
+    create_test_secrets = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_pre_deploy_job" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_pre_deploy_job = true
+    pre_deploy_job_container = {
+      image   = "golang:latest"
+      command = ["go"]
+      args    = ["version"]
+    }
+  }
+}
+
+run "setup_secret_iam" {
+  variables {
+    create_test_secrets     = true
+    create_test_secrets_iam = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_pre_deploy_job_env_vars_and_secrets" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_pre_deploy_job = true
+    pre_deploy_job_container = {
+      image   = "golang:latest"
+      command = ["go"]
+      args    = ["version"]
+      env = [
+        {
+          name  = "HELLO",
+          value = "WORLD!"
+        },
+        {
+          name = "SECRET",
+          value_source = {
+            secret_key_ref = {
+              secret = run.setup.secret_ids[0]
+            }
+          }
+        }
+      ]
+      volume_mounts = [
+        {
+          name       = "secret-volume",
+          mount_path = "/secrets"
+        }
+      ]
+    }
+    pre_deploy_job_volumes = [
+      {
+        name = "secret-volume",
+        secret = {
+          secret = run.setup.secret_ids[1]
+          items = [
+            {
+              version = "1",
+              path    = "my-secret"
+            }
+          ]
+        }
+      }
+    ]
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].env[0].name == "HELLO"
+    error_message = "A standard environment variable with the name 'HELLO' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].env[0].value == "WORLD!"
+    error_message = "A standard environment variable with the value 'WORLD!' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].env[1].name == "SECRET"
+    error_message = "A secret-backed environment variable with the name 'SECRET' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].env[1].value_source[0].secret_key_ref[0].secret == run.setup.secret_ids[0]
+    error_message = "A secret-backed environment variable referencing the 'run.setup.secret_ids[0]' secret should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].volume_mounts[0].name == "secret-volume"
+    error_message = "A volume mount with the name 'secret-volume' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].volume_mounts[0].mount_path == "/secrets"
+    error_message = "A volume mount with the mount path '/secrets' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].name == "secret-volume"
+    error_message = "A volume with the name 'secret-volume' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].secret[0].secret == run.setup.secret_ids[1]
+    error_message = "A secret-backed volume referencing the 'run.setup.secret_ids[0]' secret should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].secret[0].items[0].version == "1"
+    error_message = "Secret version should be '1'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].secret[0].items[0].path == "my-secret"
+    error_message = "Secret path should be 'my-secret'."
+  }
+}
diff --git a/tests/setup/main.tf b/tests/setup/main.tf
new file mode 100644
index 0000000000000000000000000000000000000000..c20b8a03703a26a92c90da6918facd9b6a9271b6
--- /dev/null
+++ b/tests/setup/main.tf
@@ -0,0 +1,72 @@
+resource "random_id" "name" {
+  byte_length = 2
+  # "rapp" represents Cloud Run App and is required to ensure any resources created by this repo's tests are easily
+  # identifiable by the cleanup.sh script. We only have 4 characters to play with given some of Google's naming
+  # restrictions.
+  prefix = "test-rapp"
+}
+
+module "sql" {
+  count = var.create_test_sql_instances ? 2 : 0
+
+  source                      = "GoogleCloudPlatform/sql-db/google//modules/postgresql"
+  version                     = "~> 17.0"
+  database_version            = "POSTGRES_15"
+  name                        = "${random_id.name.hex}-${count.index}"
+  project_id                  = var.project
+  tier                        = "db-f1-micro"
+  availability_type           = "ZONAL"
+  region                      = var.region
+  zone                        = "${var.region}-a"
+  deletion_protection         = false
+  deletion_protection_enabled = false
+}
+
+resource "google_secret_manager_secret" "test" {
+  count = var.create_test_secrets ? 2 : 0
+
+  secret_id = "${random_id.name.hex}-secret-version-${count.index}"
+  project   = var.project
+
+  replication {
+    auto {}
+  }
+}
+
+resource "google_secret_manager_secret_version" "test" {
+  count = var.create_test_secrets ? 2 : 0
+
+  secret      = google_secret_manager_secret.test[count.index].id
+  secret_data = "secret-data"
+}
+
+resource "google_secret_manager_secret_iam_member" "test" {
+  count = var.create_test_secrets_iam ? 2 : 0
+
+  project   = var.project
+  secret_id = google_secret_manager_secret.test[count.index].id
+  role      = "roles/secretmanager.secretAccessor"
+  member    = "serviceAccount:${random_id.name.hex}-run@${var.project}.iam.gserviceaccount.com"
+}
+
+resource "google_compute_subnetwork" "test" {
+  count = var.create_vpc_connector ? 1 : 0
+
+  name          = "${random_id.name.hex}-setup"
+  project       = var.project
+  ip_cidr_range = var.test_ip_cidr_range
+  network       = "default"
+  region        = var.region
+}
+
+resource "google_vpc_access_connector" "test" {
+  count = var.create_vpc_connector ? 1 : 0
+
+  name    = "${random_id.name.hex}-setup"
+  project = var.project
+  region  = var.region
+
+  subnet {
+    name = google_compute_subnetwork.test[0].name
+  }
+}
diff --git a/tests/setup/outputs.tf b/tests/setup/outputs.tf
new file mode 100644
index 0000000000000000000000000000000000000000..de4e19dfffbcdcf971a6db66c80b6b4d84c09689
--- /dev/null
+++ b/tests/setup/outputs.tf
@@ -0,0 +1,24 @@
+output "random_name" {
+  description = "A random name to use for the test run."
+  value       = random_id.name.hex
+}
+
+output "instance_connection_names" {
+  description = "A list of created SQL instance connection names."
+  value       = [for k, v in module.sql : v.instance_connection_name]
+}
+
+output "secret_ids" {
+  description = "A list of created secret object IDs."
+  value       = [for k, v in google_secret_manager_secret.test : v.id]
+}
+
+output "subnetwork_id" {
+  description = "The ID of the subnetwork created to test VPC access for the Cloud Run service."
+  value       = var.create_vpc_connector ? google_compute_subnetwork.test[0].id : null
+}
+
+output "vpc_connector_id" {
+  description = "The ID of the VPC Access Connector object created to test VPC access for the Cloud Run service."
+  value       = var.create_vpc_connector ? google_vpc_access_connector.test[0].id : null
+}
diff --git a/tests/setup/variables.tf b/tests/setup/variables.tf
new file mode 100644
index 0000000000000000000000000000000000000000..1cc1db59d328d1d249d870ed0f5c4a3c515a8e0b
--- /dev/null
+++ b/tests/setup/variables.tf
@@ -0,0 +1,41 @@
+variable "project" {
+  description = "The ID of a project to deploy the testing resources to."
+  type        = string
+}
+
+variable "region" {
+  description = "The region/location to deploy test resources to."
+  type        = string
+}
+
+variable "create_test_sql_instances" {
+  description = "If true, two SQL instances will be deployed for testing."
+  type        = bool
+  default     = false
+}
+
+variable "create_test_secrets" {
+  description = "If true, two secret objects will be deployed for testing."
+  type        = bool
+  default     = false
+}
+
+variable "create_test_secrets_iam" {
+  description = <<EOI
+If true, IAM bindings will be created to allow the Cloud Run service to access the test secret objects.
+EOI
+  type        = bool
+  default     = false
+}
+
+variable "create_vpc_connector" {
+  description = "If true, create a VPC Access Connector and associated subnet for testing."
+  type        = bool
+  default     = false
+}
+
+variable "test_ip_cidr_range" {
+  description = "CIDR range for the subnet which is created for the VPC Access Connector."
+  type        = string
+  default     = "10.0.0.16/28"
+}
diff --git a/tests/tests.tfvars b/tests/tests.tfvars
new file mode 100644
index 0000000000000000000000000000000000000000..f23df58d2f01daf729ab9a773cfe4ecd26cb0080
--- /dev/null
+++ b/tests/tests.tfvars
@@ -0,0 +1,2 @@
+project = "infra-testing-int-e2395220"
+region  = "europe-west2"
diff --git a/tests/vpc_access.tftest.hcl b/tests/vpc_access.tftest.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..b3214b4e6d483cca1a2566ef8a29f3380dd4551a
--- /dev/null
+++ b/tests/vpc_access.tftest.hcl
@@ -0,0 +1,181 @@
+run "setup" {
+  variables {
+    create_vpc_connector = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_service_vpc_access" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    vpc_access = {
+      connector = run.setup.vpc_connector_id
+      egress    = "PRIVATE_RANGES_ONLY"
+    }
+  }
+
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.template[0].vpc_access) == 1
+    error_message = "The Cloud Run service should be configured with a single VPC Access Connector."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].vpc_access[0].connector == run.setup.vpc_connector_id
+    error_message = "The Cloud Run service should be configured to use the run.setup.vpc_connector_id VPC Access Connector."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].vpc_access[0].egress == "PRIVATE_RANGES_ONLY"
+    error_message = "The Cloud Run service should be configured to route private ranges only to the VPC Access Connector."
+  }
+}
+
+run "test_pre_deploy_job_vpc_access" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_pre_deploy_job = true
+    pre_deploy_job_container = {
+      image   = "golang:latest"
+      command = ["go"]
+      args    = ["version"]
+    }
+    pre_deploy_job_vpc_access = {
+      connector = run.setup.vpc_connector_id
+      egress    = "PRIVATE_RANGES_ONLY"
+    }
+  }
+
+  assert {
+    condition     = length(google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].vpc_access) == 1
+    error_message = "The Cloud Run service should be configured with a single VPC Access Connector."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].vpc_access[0].connector == run.setup.vpc_connector_id
+    error_message = "The Cloud Run service should be configured to use the run.setup.vpc_connector_id VPC Access Connector."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].vpc_access[0].egress == "PRIVATE_RANGES_ONLY"
+    error_message = "The Cloud Run service should be configured to route private ranges only to the VPC Access Connector."
+  }
+}
+
+run "test_static_egress_ip_config_with_existing_access_controller" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    vpc_access = {
+      connector = run.setup.vpc_connector_id
+    }
+    enable_static_egress_ip        = true
+    static_egress_ip_subnetwork_id = run.setup.subnetwork_id
+  }
+
+  assert {
+    condition     = length(google_compute_router.static_ip) == 1
+    error_message = "A single compute router resource should be created."
+  }
+  assert {
+    condition     = length(google_compute_address.static_ip) == 1
+    error_message = "A single compute address resource should be created."
+  }
+  assert {
+    condition     = length(google_compute_router_nat.static_ip) == 1
+    error_message = "A single compute router NAT resource should be created."
+  }
+  assert {
+    condition     = google_compute_router_nat.static_ip[0].router == google_compute_router.static_ip[0].name
+    error_message = "The NAT resource should be configured to use the google_compute_router.static_ip[0] router resource."
+  }
+  assert {
+    condition     = length(google_compute_router_nat.static_ip[0].subnetwork) == 1
+    error_message = "The NAT resource should be configured with a single subnetwork."
+  }
+  assert {
+    condition     = [for subnet in google_compute_router_nat.static_ip[0].subnetwork : subnet.name][0] == run.setup.subnetwork_id
+    error_message = "The NAT resource should be configured to use the run.setup.subnetwork_id subnetwork resource."
+  }
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.template[0].vpc_access) == 1
+    error_message = "The Cloud Run service should be configured with a single VPC Access Connector."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].vpc_access[0].connector == run.setup.vpc_connector_id
+    error_message = "The Cloud Run service should be configured to use the run.setup.vpc_connector_id VPC Access Connector."
+  }
+}
+
+run "test_static_egress_ip_config_with_defaults" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_static_egress_ip = true
+  }
+
+  assert {
+    condition     = length(google_compute_subnetwork.vpc_connector) == 1
+    error_message = "A single subnetwork should be created for the VPC Access Connector to utilise."
+  }
+  assert {
+    condition     = length(google_vpc_access_connector.main) == 1
+    error_message = "A single VPC Access Connector resource should be created."
+  }
+  assert {
+    condition     = google_vpc_access_connector.main[0].subnet[0].name == google_compute_subnetwork.vpc_connector[0].name
+    error_message = "The VPC Access Connector should be configured to use the google_compute_subnetwork.vpc_connector[0] subnetwork."
+  }
+  assert {
+    condition     = length(google_compute_router.static_ip) == 1
+    error_message = "A single compute router resource should be created."
+  }
+  assert {
+    condition     = length(google_compute_address.static_ip) == 1
+    error_message = "A single compute address resource should be created."
+  }
+  assert {
+    condition     = length(google_compute_router_nat.static_ip) == 1
+    error_message = "A single compute router NAT resource should be created."
+  }
+  assert {
+    condition     = google_compute_router_nat.static_ip[0].router == google_compute_router.static_ip[0].name
+    error_message = "The NAT resource should be configured to use the google_compute_router.static_ip[0] router resource."
+  }
+  assert {
+    condition     = length(google_compute_router_nat.static_ip[0].subnetwork) == 1
+    error_message = "The NAT resource should be configured with a single subnetwork."
+  }
+  assert {
+    condition     = [for subnet in google_compute_router_nat.static_ip[0].subnetwork : subnet.name][0] == google_compute_subnetwork.vpc_connector[0].id
+    error_message = "The NAT resource should be configured to use the google_compute_subnetwork.vpc_connector[0] subnetwork resource."
+  }
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.template[0].vpc_access) == 1
+    error_message = "The Cloud Run service should be configured with a single VPC Access Connector."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].vpc_access[0].connector == google_vpc_access_connector.main[0].id
+    error_message = "The Cloud Run service should be configured to use the google_vpc_access_connector.main[0].id resource."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].vpc_access[0].egress == "ALL_TRAFFIC"
+    error_message = "The Cloud Run service should be configured to route all egress traffic to the VPC Access Connector."
+  }
+}