diff --git a/.gitignore b/.gitignore
index 4e48cd591dea1402bffc821783cba6121e448ed5..0dff9e5191a7ba97e49f8d6aaf5a68d0abc3b9f1 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1,3 +1,5 @@
 .idea
 .DS_Store
-.terraform/*
+.terraform/
+terraform.*
+.terraform.lock.hcl
diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 6416c68b9dce8975ab9990b1917928a1ed9b8465..def6a3eb9b29c4ca801a50a62378afa7ccf1389b 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -1,3 +1,96 @@
 include:
-  - project: 'uis/devops/continuous-delivery/ci-templates'
-    file: '/terraform-module.yml'
+  - project: "uis/devops/continuous-delivery/ci-templates"
+    file: "/terraform-module.yml"
+    ref: "v3.7.0"
+  - project: "uis/devops/continuous-delivery/ci-templates"
+    file: "/pre-commit.yml"
+    ref: "v3.7.0"
+  - project: "uis/devops/continuous-delivery/ci-templates"
+    file: "/auto-devops/release-it.yml"
+    ref: "v3.7.0"
+  - project: "uis/devops/continuous-delivery/ci-templates"
+    file: "/commitlint.yml"
+    ref: "v3.7.0"
+
+variables:
+  LOGAN_IMAGE: registry.gitlab.developers.cam.ac.uk/uis/devops/infra/dockerimages/logan-terraform:1.6
+
+  # Disable the changelog check as it doesn't behave well with pre/beta releases. Also, the check is not required as
+  # we're using release-it for release automation which handles changelog auto-generation.
+  CHANGELOG_CHECK_DISABLED: 1
+
+# This is a workaround to allow the "tests" job matrix below to be manually triggered. Without this job, and the
+# associated "needs" dependency in the "tests" job, all tests would run on every push, which is undesirable given the
+# number of resources that they create. Instead, developers should manually trigger this job from the pipeline UI when
+# they require the test suite to run, for example as part of the MR review process.
+run_tests:
+  stage: test
+  when: manual
+  allow_failure: false
+  script: echo "Triggering test jobs..."
+
+.cleanup:
+  image: $LOGAN_IMAGE
+  script: ./tests/cleanup.sh
+  when: always
+  tags:
+    - $GKE_RUNNER_TAG
+
+pre-cleanup:
+  extends: .cleanup
+  stage: test
+  needs:
+    - run_tests
+
+tests:
+  stage: test
+  image: $LOGAN_IMAGE
+  variables:
+    GOOGLE_IMPERSONATE_SERVICE_ACCOUNT: "terraform-deploy@infra-testing-int-e2395220.iam.gserviceaccount.com"
+  script: |
+    # This unsets the GOOGLE_APPLICATION_CREDENTIALS as it is not required but the logan-terraform images sets it.
+    unset GOOGLE_APPLICATION_CREDENTIALS
+
+    ./run_tests.sh -c -t "tests/$TEST_FILE"
+  needs:
+    - run_tests
+    - pre-cleanup
+  tags:
+    - $GKE_RUNNER_TAG
+  parallel:
+    # This matrix runs each of our test files in parallel targeting v4.x and v5.x of the Google Terraform provider
+    # separately as we support both. It also ensures that subnet CIDR ranges do not clash when testing the VPC
+    # Access/static egress IP configurations.
+    matrix:
+      - TEST_FILE:
+          - cloud_run_service.tftest.hcl
+          - cloudsql.tftest.hcl
+          - load_balancer.tftest.hcl
+          - pre_deploy_job.tftest.hcl
+        GOOGLE_PROVIDER_VERSION_CONSTRAINT:
+          - "> 4, < 5"
+          - "> 5, < 6"
+      - TEST_FILE:
+          - monitoring.tftest.hcl
+        GOOGLE_PROVIDER_VERSION_CONSTRAINT: "> 4, < 5"
+        TF_VAR_static_egress_ip_cidr_range: "10.0.0.0/28"
+        TF_VAR_test_ip_cidr_range: "10.0.0.16/28"
+      - TEST_FILE:
+          - monitoring.tftest.hcl
+        GOOGLE_PROVIDER_VERSION_CONSTRAINT: "> 5, < 6"
+        TF_VAR_static_egress_ip_cidr_range: "10.0.0.32/28"
+        TF_VAR_test_ip_cidr_range: "10.0.0.48/28"
+      - TEST_FILE:
+          - vpc_access.tftest.hcl
+        GOOGLE_PROVIDER_VERSION_CONSTRAINT: "> 4, < 5"
+        TF_VAR_static_egress_ip_cidr_range: "10.0.0.64/28"
+        TF_VAR_test_ip_cidr_range: "10.0.0.80/28"
+      - TEST_FILE:
+          - vpc_access.tftest.hcl
+        GOOGLE_PROVIDER_VERSION_CONSTRAINT: "> 5, < 6"
+        TF_VAR_static_egress_ip_cidr_range: "10.0.0.96/28"
+        TF_VAR_test_ip_cidr_range: "10.0.0.112/28"
+
+post-cleanup:
+  extends: .cleanup
+  stage: review
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..841f7d888c789b20d395a5c218c333661a1fa7b9
--- /dev/null
+++ b/.pre-commit-config.yaml
@@ -0,0 +1,57 @@
+# See https://pre-commit.com/hooks.html for more hooks
+repos:
+  - repo: https://github.com/pre-commit/pre-commit-hooks
+    rev: v4.4.0
+    hooks:
+      - id: trailing-whitespace
+      - id: end-of-file-fixer
+      - id: check-yaml
+        args:
+          - --unsafe
+      - id: check-json
+      - id: check-toml
+      - id: check-xml
+      - id: check-added-large-files
+      - id: check-executables-have-shebangs
+      - id: check-merge-conflict
+      - id: check-symlinks
+      - id: detect-private-key
+      - id: mixed-line-ending
+      - id: pretty-format-json
+        args:
+          - --autofix
+          - --no-sort-keys
+      - id: debug-statements
+  - repo: https://github.com/DavidAnson/markdownlint-cli2
+    rev: v0.12.1
+    hooks:
+      # For rule definitions see https://github.com/markdownlint/markdownlint/blob/main/docs/RULES.md
+      - id: markdownlint-cli2
+        # Exclude README.md and CHANGELOG.md as they are auto-generated and contain a bunch of illegal HTML etc. We lint
+        # the source *.md files in ./docs though so at least our pure markdown is being checked.
+        exclude: (README|CHANGELOG).md
+  - repo: local
+    hooks:
+      - id: terraform-fmt
+        name: terraform-fmt
+        language: docker_image
+        pass_filenames: false
+        entry: registry.gitlab.developers.cam.ac.uk/uis/devops/infra/dockerimages/logan-terraform:1.6 terraform fmt
+      - id: tflint
+        name: tflint
+        language: docker_image
+        pass_filenames: false
+        entry: ghcr.io/terraform-linters/tflint:latest
+        args: []
+      - id: trivy
+        name: trivy
+        language: docker_image
+        pass_filenames: false
+        entry: aquasec/trivy:latest
+        args: ["--cache-dir", "/tmp/.trivy-cache", "--skip-dirs", "tests", "config", ".", "--exit-code", "1"]
+      - id: terraform-docs
+        name: terraform-docs
+        language: docker_image
+        pass_filenames: false
+        entry: quay.io/terraform-docs/terraform-docs:0.17.0
+        args: ["."]
diff --git a/.release-it.json b/.release-it.json
new file mode 100644
index 0000000000000000000000000000000000000000..6b36e796b9db3662849b770d5cf7bd0faf55023e
--- /dev/null
+++ b/.release-it.json
@@ -0,0 +1,18 @@
+{
+  "git": {
+    "commitMessage": "chore(release): ${version}"
+  },
+  "gitlab": {
+    "release": true,
+    "releaseName": "${version}"
+  },
+  "plugins": {
+    "@release-it/conventional-changelog": {
+      "infile": "CHANGELOG.md",
+      "header": "# Changelog",
+      "preset": {
+        "name": "conventionalcommits"
+      }
+    }
+  }
+}
diff --git a/.terraform-docs.yml b/.terraform-docs.yml
new file mode 100644
index 0000000000000000000000000000000000000000..de042c53024afb6d4c0fca5e00ffb121f378ae51
--- /dev/null
+++ b/.terraform-docs.yml
@@ -0,0 +1,13 @@
+formatter: markdown table
+version: "~> 0.17.0"
+header-from: docs/templates/header.md
+footer-from: docs/templates/footer.md
+output:
+  file: README.md
+sections:
+  show:
+    - header
+    - requirements
+    - inputs
+    - outputs
+    - footer
diff --git a/.tflint.hcl b/.tflint.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..b8edc994b7eea4e70abf8f7675811c57fa42ab3d
--- /dev/null
+++ b/.tflint.hcl
@@ -0,0 +1,4 @@
+plugin "terraform" {
+  enabled = true
+  preset  = "all"
+}
diff --git a/CHANGELOG b/CHANGELOG.md
similarity index 95%
rename from CHANGELOG
rename to CHANGELOG.md
index fc9be9c977e27303930506f0b8c2b89531bf66ee..8086045780fadea57d63714e027952e4af719411 100644
--- a/CHANGELOG
+++ b/CHANGELOG.md
@@ -1,10 +1,5 @@
 # Changelog
 
-All notable changes to this project will be documented in this file.
-
-The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
-and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
-
 ## [8.4.0] - 2023-09-11
 
 ### Added
diff --git a/LICENSE.txt b/LICENSE.txt
index be1ef151e6a7432cd033a67ce552ce9c46d05539..2046fa2e765cac2c524cbf9aa3840233277c6ca8 100644
--- a/LICENSE.txt
+++ b/LICENSE.txt
@@ -1,4 +1,4 @@
-Copyright (c) 2020, University of Cambridge Information Services
+Copyright (c) University of Cambridge Information Services
 
 Permission is hereby granted, free of charge, to any person obtaining a copy
 of this software and associated documentation files (the "Software"), to deal
diff --git a/README.md b/README.md
index 8b8e58ae8b96a91377bbc110b8f9003089495bfd..d7773a5a4db9a62050fd8d93b5e03f8f81ead19d 100644
--- a/README.md
+++ b/README.md
@@ -1,206 +1,129 @@
-# GCP Cloud Run manager terraform module
-
-This module manages a Cloud Run-hosted container. It takes care of making sure the container is
-connected to a Cloud SQL instance and sets environment variables on the application.
-
-Specify the project to deploy into on the command line. So, for example, to deploy to the project
-`my-project`:
-
-```console
-$ terraform init
-$ terraform apply -var project=my-project
-```
-
-In this example, terraform is configured to use default application credentials. For Google APIs and
-these credentials should correspond to a user with owner or editor access to the target project. You
-can use the `gcloud` command line tool to set your personal credentials as application default
-credentials. See the `gcloud auth application-default` command output for more information.
-
-## Versioning
-
-The `master` branch contains the tip of development and corresponds to the `v8` branch. The `v1`,
-`v2`, `v3` etc. branches will maintain source compatibility with the initial release.
-
-## Ingress style
-
-There are two supported ingress styles depending on `var.ingress_style` variable.
-
-`var.ingress_style` can be:
-
-- `domain-mapping` (default): passing DNS domains as `var.dns_names` or `var.dns_name`, which takes
-  precedence over `var.dns_names`, will create domain mappings to the Cloud Run service. Before
-  setting this, you _must_ have verified ownership of the provided domains with Google. [Instructions
-  on how to do
-  this](https://guidebook.devops.uis.cam.ac.uk/en/latest/notes/google-domain-verification/) can be
-  found in the DevOps division guidebook.
-
-- `load-balancer`: a load balancer will be configured instead of a domain mapping. The DNS domains
-  in `var.dns_names` or `var.dns_name`, which takes precedence over `var.dns_names`, will get
-  Google-managed or custom TLS certificates depending on `var.use_ssl_certificates` and
-  `var.ssl_certificates`. An IPv6 address can also be allocated to the load balancer if
-  `var.enable_ipv6` is `true`.
-
-## Pre-deploy Cloud Run job
-
-The `v8` release introduced an `enable_pre_deploy_job` variable. When set to `true` a [Cloud
-Run job](https://cloud.google.com/run/docs/create-jobs) is created to execute a configurable command
-_before_ the main Cloud Run service is deployed. The initial use case for this is to run database
-migrations, however in the future we're sure there'll be more.
-
-The pre-deploy job uses the image specified in `var.pre_deploy_job_image_name` if set, otherwise it
-falls back to the same `var.image_name` that the main service uses. The command and arguments that
-the job executes are configurable via the `pre_deploy_job_command` and `pre_deploy_job_args`
-variables.
-
-A `null_resource` is also configured to execute the pre-deploy job whenever it detects that the
-value of `var.image_name` has changed (or at every `apply` if `var.force_pre_deploy_job` is set to
-`true`). This uses the `gcloud run jobs execute` command and is run in the context of the
-`terraform-deploy` service account via an access token. Using `null_resource` is never ideal.
-However, in this situation it provides a very handy way to trigger this simple job so it has been
-accepted.
-
-To ensure that the pre-deploy job always runs _before_ a new revision of the Cloud Run webapp
-service is deployed, the resources in question are explicitly configured with `depends_on`
-relationships, as follows.
-
-1. The `google_cloud_run_v2_job.pre_deploy` Cloud Run job has no `depends_on` relationships defined
-   and is therefore deployed first.
-2. The `null_resource.pre_deploy_job_trigger` resource depends on
-   `google_cloud_run_v2_job.pre_deploy` and therefore won't be deployed until the Cloud Run job is
-   deployed successfully.
-3. Finally, the `google_cloud_run_service.webapp` Cloud Run service depends on
-   `null_resource.pre_deploy_job_trigger`, meaning it is only deployed once the
-   `null_resource.pre_deploy_job_trigger` has executed successfully.
-
-## Monitoring and Alerting
-
-If the variable [alerting_email_address](variables.tf) is set, the module adds basic uptime
-_alerting_ via email for failing http polling.
-
-If the variable [disable_monitoring](variables.tf) is true, the module will disable _monitoring_.
-This is different from disabling alerting; if no alerting email addresses are provided, the uptime
-checks will still be configured, there just won't be any alerts sent if they fail. Disabling
-monitoring will also disable alerting as without any monitoring there is nothing to alert(!)
-
-See [variables.tf](variables.tf) for how to configure alerting and monitoring.
-
-Note that the project containing resources to be monitored must be in a Stackdriver monitoring
-workspace and this must be configured manually. At the time of writing there is no terraform support
-for this. This module will error when applying if this is not so.
-
-Stackdriver distinguishes between workspaces and projects within those workspaces. Each workspace
-must have a host project and that project _must_ be the default project of the `google.stackdriver`
-provider used by this module. The `google.stackdriver` must be configured with credentials allowing
-monitoring resources to be created in the _host_ project.
-
-If the workspace host project differs from the project which contains the resources to be monitored,
-you can use a provider alias:
-
-```tf
-provider "google" {
-  project = "my-project"
-
-  # ... some credentials for the *project* admin ...
-}
-
-provider "google" {
-  project = "stackdriver-host-project"
-  alias   = "host"
-
-  # ... some credentials for the *product* admin ...
-}
-
-module "cloud_run_service" {
-  # ... other parameters ...
-
-  providers = {
-    google.stackdriver = google.host
-  }
-}
-```
-
-### Monitoring instances which require service account authentication
-
-If `allow_unauthenticated_invocations` is not true, a Cloud Function will be created which
-authenticates via a service account, allowing the StackDriver monitoring to call the Cloud Function,
-with the Cloud Function authentication and proxying the request to the Cloud Run instance.
-
-Because this requires a Cloud Function to be created, the `cloudfunctions.googleapis.com` service
-should be enabled on the project that houses the Cloud Run instance.
-
-## Static Egress IP
-
-A static egress IP can be allocated for the cloud run instance, using the variable
-`enable_static_egress_ip`. This will configure the necessary resources to route outbound traffic
-from the cloud run through a static ip.
-
-**Important!**
-
-The static ip is configured with `prevent_destroy = true`, meaning that it cannot be destroyed
-without removing it from terraform state using `terraform state rm` and then manually destroying the
-resource within the GCP console. This is to prevent accidental destruction of an IP which is likely
-to be whitelisted within firewall configuration that lives outside of our deployments.
-
-## Secrets as Volumes and Env Vars
-
-Secret Manager secrets can be as environment variables or volume mounts (files) in the running
-container.
-
-The service account that Cloud Run runs as needs access to the secrets for this feature to work.
-Thus, this module gives `secretAccessor` role to that service account for the secrets passed on
-`secrets_volume` and `secrets_envars`.
-
-Any number of items in the list is supported and not defining these variables when calling this
-module is acceptable. The path of the mounted file will be based on `path/name`.
-
-For the example configuration below the files will be `/secrets-1/foobarfile1` and
-`/secrets-2/foobarfile2`. A common `path` for multiple secrets is not supported, they must be
-unique.
-
-> Note: `name` should only have alphanumeric characters, hyphens and underscores.
-
-Setting `version = ""` is equivalent to `version = "latest"` but the variable is not optional.
-
-```tf
-module "webapp" {
-  source = "git::https://gitlab.developers.cam.ac.uk/uis/devops/infra/terraform/gcp-cloud-run-app.git?ref=v3"
-
-...
-
-  secrets_volume = [
-    {
-      name    = "foobarfile1"
-      path    = "/secret-1"
-      id      = google_secret_manager_secret.secret-a.secret_id
-      version = "latest"
-    },
-    {
-      name    = "foobarfile2"
-      path    = "/secret-2"
-      id      = google_secret_manager_secret.secret-b.secret_id
-      version = "2"
-    }
-  ]
-
-  secrets_envars = [
-      {
-          name    = "FOOBAR1"
-          id      = google_secret_manager_secret.secret-c.secret_id
-          version = "latest"
-      },
-      {
-          name    = "FOOBAR2"
-          id      = google_secret_manager_secret.secret-d.secret_id
-          version = ""
-      }
-  ]
-
-...
-```
-
-## Passing Image Names to the Module
-
-Originally, the module did not deploy images except on the very first use (using
-`gcr.io/cloudrun/hello:latest`).
-
-Currently, the module deploys the image from the mandatory variable `image_name`.
+<!-- BEGIN_TF_DOCS -->
+# GCP Cloud Run Terraform Module
+
+This module manages the deployment of containerised applications on Cloud Run.
+It includes the following functionality:
+
+- Creation of the main Cloud Run service.
+- Creation of a dedicated service account with required IAM bindings.
+- Deployment of an optional load balancer configuration.
+- Deployment of an optional "pre-deployment" Cloud Run job to handle tasks such
+  as database migrations.
+- Deployment of an optional static egress IP address for the Cloud Run service.
+- Configuration of simple uptime checks and SSL certificate expiry alerts.
+- Convenience variables to configure certain aspects of the Cloud Run service
+  which would otherwise be complex, such as mounting Cloud SQL instances.
+
+## Cloud Run v2 API
+
+Following release `9.0.0`, this module has been refactored to use the Cloud Run
+`v2` API resources exclusively (i.e. `google_cloud_run_v2_service`). This means
+that many input variables are now different and updating from a previous version
+will require some changes to your module definition.
+
+## Examples
+
+See the [docs/examples.md](docs/examples.md) page for a full list of detailed
+usage examples.
+
+## Pre-deploy Cloud Run Job
+
+The `8.0.0` release introduced the `enable_pre_deploy_job` variable which, when
+set to `true`, creates a Cloud Run job to execute a configurable command before
+new Cloud Run service revisions are deployed. This is a useful way to run
+database migrations and other commands which are tightly coupled to the release
+cadence of the main Cloud Run service.
+
+The pre-deploy job is configured via the `pre_deploy_job_*` variables which can
+be found in `variables.tf`.
+
+For more information on how the pre-deploy Cloud Run job works see the
+[pre-deploy-job.md](../docs/pre-deploy-job.md) page.
+
+## Requirements
+
+| Name | Version |
+|------|---------|
+| <a name="requirement_terraform"></a> [terraform](#requirement\_terraform) | ~> 1.3 |
+| <a name="requirement_google"></a> [google](#requirement\_google) | >= 4.0 |
+| <a name="requirement_null"></a> [null](#requirement\_null) | ~> 3.0 |
+
+## Inputs
+
+| Name | Description | Type | Default | Required |
+|------|-------------|------|---------|:--------:|
+| <a name="input_alerting_notification_channels"></a> [alerting\_notification\_channels](#input\_alerting\_notification\_channels) | A list of notification channel IDs to send uptime alerts to. The format for<br>the channel IDs should be<br>"projects/[PROJECT\_ID]/notificationChannels/[CHANNEL\_ID]". | `list(string)` | `[]` | no |
+| <a name="input_alerting_success_threshold_percent"></a> [alerting\_success\_threshold\_percent](#input\_alerting\_success\_threshold\_percent) | If the percentage of successful uptime checks within the given uptime period<br>falls below this, an alert will be triggered. Set to 100 to trigger an alert<br>if any uptime check fails, set to a lower number to tolerate failures without<br>alerting.<br><br>Experience has taught us that uptime checks can fail semi-regularly due to<br>transient problems outside our control, therefore we allow some leeway before<br>triggering an alert. | `number` | `75` | no |
+| <a name="input_alerting_uptime_period"></a> [alerting\_uptime\_period](#input\_alerting\_uptime\_period) | Frequency of uptime checks | `string` | `"300s"` | no |
+| <a name="input_alerting_uptime_timeout"></a> [alerting\_uptime\_timeout](#input\_alerting\_uptime\_timeout) | Timeout for http polling. | `string` | `"30s"` | no |
+| <a name="input_allow_unauthenticated_invocations"></a> [allow\_unauthenticated\_invocations](#input\_allow\_unauthenticated\_invocations) | If true, the webapp will allow unauthenticated invocations. If false, the<br>webapp requires authentication as a Google user with the Cloud Run invoker<br>permission on the deployment. | `bool` | `true` | no |
+| <a name="input_containers"></a> [containers](#input\_containers) | Configure one or more container instances for the service. See<br>https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_service#nested_containers<br>for information on the available arguments. | <pre>map(object({<br>    name    = optional(string)<br>    image   = string<br>    command = optional(list(string))<br>    args    = optional(list(string))<br>    env = optional(list(object({<br>      name  = string<br>      value = optional(string)<br>      value_source = optional(object({<br>        secret_key_ref = optional(object({<br>          secret  = string<br>          version = optional(string, "latest")<br>        }))<br>      }))<br>    })), [])<br>    resources = optional(object({<br>      limits            = optional(map(string))<br>      cpu_idle          = optional(bool)<br>      startup_cpu_boost = optional(bool)<br>    }))<br>    ports = optional(list(object({<br>      name           = optional(string)<br>      container_port = optional(number)<br>    })), [])<br>    volume_mounts = optional(list(object({<br>      name       = string<br>      mount_path = string<br>    })), [])<br>    working_dir = optional(string)<br>    liveness_probe = optional(object({<br>      initial_delay_seconds = optional(number)<br>      timeout_seconds       = optional(number)<br>      period_seconds        = optional(number)<br>      failure_threshold     = optional(number)<br>      http_get = optional(object({<br>        path = optional(string)<br>        port = optional(number)<br>        http_headers = optional(list(object({<br>          name  = string<br>          value = optional(string)<br>        })), [])<br>      }))<br>      grpc = optional(object({<br>        port    = optional(number)<br>        service = optional(string)<br>      }))<br>    }))<br>    startup_probe = optional(object({<br>      initial_delay_seconds = optional(number)<br>      timeout_seconds       = optional(number)<br>      period_seconds        = optional(number)<br>      failure_threshold     = optional(number)<br>      http_get = optional(object({<br>        path = optional(string)<br>        port = optional(number)<br>        http_headers = optional(list(object({<br>          name  = string<br>          value = optional(string)<br>        })), [])<br>      }))<br>      tcp_socket = optional(object({<br>        port = number<br>      }))<br>      grpc = optional(object({<br>        port    = optional(number)<br>        service = optional(string)<br>      }))<br>    }))<br>  }))</pre> | n/a | yes |
+| <a name="input_create_ipv6_address"></a> [create\_ipv6\_address](#input\_create\_ipv6\_address) | Allocate an IPv6 address to the load balancer if var.enable\_ipv6 is true. | `bool` | `false` | no |
+| <a name="input_description"></a> [description](#input\_description) | A description for the Cloud Run service. | `string` | `null` | no |
+| <a name="input_dns_names"></a> [dns\_names](#input\_dns\_names) | DNS names to configure for the web application. Note that DNS records are<br>\_NOT\_ created, they are used in the load balancer module to ensure the SSL<br>certificate is generated with the required SANs. The map's keys are arbitrary<br>and are only required to avoid errors when the DNS name is a value which<br>Terraform does not know until after the apply operation.<br><br>For example:<br><br>{ my\_awesome\_dns\_name = "awesome.example.com" } | `map(string)` | `{}` | no |
+| <a name="input_enable_alerting"></a> [enable\_alerting](#input\_enable\_alerting) | Enable alerting policies. | `bool` | `true` | no |
+| <a name="input_enable_ipv6"></a> [enable\_ipv6](#input\_enable\_ipv6) | Whether to enable IPv6 address on the CDN load-balancer. | `bool` | `false` | no |
+| <a name="input_enable_load_balancer"></a> [enable\_load\_balancer](#input\_enable\_load\_balancer) | Whether to configure a load balancer or use the default run.app generated<br>hostname. | `bool` | `false` | no |
+| <a name="input_enable_monitoring"></a> [enable\_monitoring](#input\_enable\_monitoring) | Optional. If true, create uptime and SSL expiry checks.<br><br>Note that this is different from not specifying an alerting email address. If<br>no alerting email address is specified the uptime checks are still created,<br>they just don't alert if they fail. | `bool` | `false` | no |
+| <a name="input_enable_pre_deploy_job"></a> [enable\_pre\_deploy\_job](#input\_enable\_pre\_deploy\_job) | Configure a Cloud Run Job to be executed *before* the main Cloud Run service<br>is deployed. This is useful for running database migrations for example. | `bool` | `false` | no |
+| <a name="input_enable_static_egress_ip"></a> [enable\_static\_egress\_ip](#input\_enable\_static\_egress\_ip) | Whether to assign a static ip for egress from this cloud run instance. If<br>enabled, the "vpcaccess.googleapis.com" API must also be enabled on the<br>project. | `bool` | `false` | no |
+| <a name="input_encryption_key"></a> [encryption\_key](#input\_encryption\_key) | The ID of a customer managed encryption key (CMEK) to use to encrypt this<br>container image. | `string` | `null` | no |
+| <a name="input_execution_environment"></a> [execution\_environment](#input\_execution\_environment) | The sandbox environment to host this revision. Possible values are<br>EXECUTION\_ENVIRONMENT\_GEN1, and EXECUTION\_ENVIRONMENT\_GEN2. | `string` | `"EXECUTION_ENVIRONMENT_GEN1"` | no |
+| <a name="input_grant_sql_client_role_to_webapp_sa"></a> [grant\_sql\_client\_role\_to\_webapp\_sa](#input\_grant\_sql\_client\_role\_to\_webapp\_sa) | When set to true the roles/cloudsql.client role will be granted to the webapp<br>service account at the project level to allow it to connect to Cloud SQL. | `bool` | `false` | no |
+| <a name="input_ingress"></a> [ingress](#input\_ingress) | The ingress setting for the Cloud Run service. Possible values are<br>INGRESS\_TRAFFIC\_ALL, INGRESS\_TRAFFIC\_INTERNAL\_ONLY, and<br>INGRESS\_TRAFFIC\_INTERNAL\_LOAD\_BALANCER.<br><br>If var.use\_load\_balancer == true, the provided var.ingress will be ignored and<br>the ingress will be set automatically to<br>"INGRESS\_TRAFFIC\_INTERNAL\_LOAD\_BALANCER". | `string` | `"INGRESS_TRAFFIC_ALL"` | no |
+| <a name="input_launch_stage"></a> [launch\_stage](#input\_launch\_stage) | The launch stage for the Cloud Run service. Possible values are UNIMPLEMENTED,<br>PRELAUNCH, EARLY\_ACCESS, ALPHA, BETA, GA, and DEPRECATED. | `string` | `"GA"` | no |
+| <a name="input_max_instance_request_concurrency"></a> [max\_instance\_request\_concurrency](#input\_max\_instance\_request\_concurrency) | Sets the maximum number of requests that each serving instance can receive. | `number` | `null` | no |
+| <a name="input_min_ports_per_vm"></a> [min\_ports\_per\_vm](#input\_min\_ports\_per\_vm) | When using Cloud NAT to provide an egress route, Cloud NAT's minimum ports per<br>VM can be configured to determine how many concurrent connections can be<br>established to the same destination IP address and port. | `number` | `64` | no |
+| <a name="input_monitoring_path"></a> [monitoring\_path](#input\_monitoring\_path) | Path component of url to be monitored. | `string` | `"/"` | no |
+| <a name="input_monitoring_scoping_project"></a> [monitoring\_scoping\_project](#input\_monitoring\_scoping\_project) | The ID of a Cloud Monitoring scoping project to create monitoring resources<br>in. If omitted, var.project will be used instead. | `string` | `null` | no |
+| <a name="input_mount_cloudsql_instance"></a> [mount\_cloudsql\_instance](#input\_mount\_cloudsql\_instance) | Mount a single CloudSQL instance in each container for the service. This value<br>should be the Cloud SQL instance connection name, for example<br>"example-devel-e662dd2b:europe-west2:sql-6e1dd60b". This is a convenience<br>variable to simplify mounting a single Cloud SQL instance. If you require more<br>control you can define one or more Cloud SQL mounts directly using<br>var.containers and var.volumes instead. | `string` | `null` | no |
+| <a name="input_name"></a> [name](#input\_name) | Prefix used to form resource names. | `string` | `"webapp"` | no |
+| <a name="input_pre_deploy_job_annotations"></a> [pre\_deploy\_job\_annotations](#input\_pre\_deploy\_job\_annotations) | Map of key/value pairs containing annotations to assign to the pre-deploy Cloud<br>Run job. | `map(string)` | `null` | no |
+| <a name="input_pre_deploy_job_container"></a> [pre\_deploy\_job\_container](#input\_pre\_deploy\_job\_container) | Configure the container instance for the pre-deploy job. See<br>https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_job#nested_containers<br>for more information on these options. | <pre>object({<br>    name    = optional(string)<br>    image   = optional(string)<br>    command = optional(list(string))<br>    args    = optional(list(string))<br>    env = optional(list(object({<br>      name  = string<br>      value = optional(string)<br>      value_source = optional(object({<br>        secret_key_ref = optional(object({<br>          secret  = string<br>          version = optional(string, "latest")<br>        }))<br>      }))<br>    })), [])<br>    resources = optional(object({<br>      limits = optional(map(string))<br>    }))<br>    ports = optional(list(object({<br>      name           = optional(string)<br>      container_port = optional(number)<br>    })), [])<br>    volume_mounts = optional(list(object({<br>      name       = string<br>      mount_path = string<br>    })), [])<br>    working_dir = optional(string)<br>  })</pre> | `null` | no |
+| <a name="input_pre_deploy_job_encryption_key"></a> [pre\_deploy\_job\_encryption\_key](#input\_pre\_deploy\_job\_encryption\_key) | The ID of a customer managed encryption key (CMEK) to use to encrypt this<br>container image. | `string` | `null` | no |
+| <a name="input_pre_deploy_job_execution_environment"></a> [pre\_deploy\_job\_execution\_environment](#input\_pre\_deploy\_job\_execution\_environment) | The execution environment to host this task. Possible values are<br>EXECUTION\_ENVIRONMENT\_GEN1, and EXECUTION\_ENVIRONMENT\_GEN2 | `string` | `"EXECUTION_ENVIRONMENT_GEN2"` | no |
+| <a name="input_pre_deploy_job_force"></a> [pre\_deploy\_job\_force](#input\_pre\_deploy\_job\_force) | When true, and only when used in addition to var.pre\_deploy\_job\_trigger, the<br>pre-deploy Cloud Run job is executed at every terraform apply, regardless of<br># the status of var.pre\_deploy\_job\_container.image. | `bool` | `false` | no |
+| <a name="input_pre_deploy_job_labels"></a> [pre\_deploy\_job\_labels](#input\_pre\_deploy\_job\_labels) | Map of key/value pairs containing labels to assign to the pre-deploy Cloud Run<br>job. | `map(string)` | `null` | no |
+| <a name="input_pre_deploy_job_launch_stage"></a> [pre\_deploy\_job\_launch\_stage](#input\_pre\_deploy\_job\_launch\_stage) | The launch stage for the pre-deploy Cloud Run job. Possible values are UNIMPLEMENTED,<br>PRELAUNCH, EARLY\_ACCESS, ALPHA, BETA, GA, and DEPRECATED. | `string` | `"GA"` | no |
+| <a name="input_pre_deploy_job_max_retries"></a> [pre\_deploy\_job\_max\_retries](#input\_pre\_deploy\_job\_max\_retries) | Configure the maximum number of retries for the pre-deploy job. | `number` | `null` | no |
+| <a name="input_pre_deploy_job_mount_cloudsql_instance"></a> [pre\_deploy\_job\_mount\_cloudsql\_instance](#input\_pre\_deploy\_job\_mount\_cloudsql\_instance) | Mount a CloudSQL instance in the pre-deploy job container. This is a<br>convenience variable to simplify mounting a Cloud SQL instance. However, if<br>you require more control over this you should define it directly in<br>var.pre\_deploy\_job\_container instead. | `string` | `null` | no |
+| <a name="input_pre_deploy_job_parallelism"></a> [pre\_deploy\_job\_parallelism](#input\_pre\_deploy\_job\_parallelism) | Specifies the maximum desired number of tasks the execution should run at<br>given time. | `number` | `null` | no |
+| <a name="input_pre_deploy_job_task_count"></a> [pre\_deploy\_job\_task\_count](#input\_pre\_deploy\_job\_task\_count) | Specifies the desired number of tasks the execution should run. | `number` | `null` | no |
+| <a name="input_pre_deploy_job_timeout"></a> [pre\_deploy\_job\_timeout](#input\_pre\_deploy\_job\_timeout) | Configure a timeout, in seconds, for the pre-deploy job. | `string` | `null` | no |
+| <a name="input_pre_deploy_job_trigger"></a> [pre\_deploy\_job\_trigger](#input\_pre\_deploy\_job\_trigger) | When true, the pre-deploy Cloud Run job is executed via a<br>null\_resource-triggered gcloud command whenever Terraform detects that<br>var.pre\_deploy\_job\_container.image has changed. | `bool` | `true` | no |
+| <a name="input_pre_deploy_job_volumes"></a> [pre\_deploy\_job\_volumes](#input\_pre\_deploy\_job\_volumes) | Configure one or more volumes for the pre-deploy job. See<br>https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_job#nested_volumes<br>for more information on these options. | <pre>list(object({<br>    name = string<br>    secret = optional(object({<br>      secret       = string<br>      default_mode = optional(number)<br>      items = optional(list(object({<br>        path    = string<br>        version = optional(string)<br>        mode    = optional(number)<br>      })), [])<br>    }))<br>    cloud_sql_instance = optional(object({<br>      instances = optional(list(string))<br>    }))<br>  }))</pre> | `[]` | no |
+| <a name="input_pre_deploy_job_vpc_access"></a> [pre\_deploy\_job\_vpc\_access](#input\_pre\_deploy\_job\_vpc\_access) | Configure VPC access for the pre-deploy job. See<br>https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_job#nested_vpc_access<br>for more information on these options. | <pre>object({<br>    connector = optional(string)<br>    egress    = optional(string)<br>    network_interfaces = optional(object({<br>      network    = optional(string)<br>      subnetwork = optional(string)<br>      tags       = optional(string)<br>    }))<br>  })</pre> | `null` | no |
+| <a name="input_project"></a> [project](#input\_project) | Project containing the webapp. | `string` | n/a | yes |
+| <a name="input_region"></a> [region](#input\_region) | Location used to create Cloud Run service and other resources. | `string` | n/a | yes |
+| <a name="input_revision"></a> [revision](#input\_revision) | The unique name for the revision. If this field is omitted, it will be<br>automatically generated based on the Service name. | `string` | `null` | no |
+| <a name="input_scaling"></a> [scaling](#input\_scaling) | The minimum number of auto-scaled instances defaults to 0, thus, the container<br>will stop if it doesn't receive requests for a period of time and the<br>following request will make the container start from cold. This should be<br>carefully considered for containers that take a significant amount of time<br>starting from cold.<br><br>For a container with N-workers, the maximum number of auto-scaled instances<br>should be less than 1/N of the maximum connection count for the Cloud SQL<br>instance. | <pre>object({<br>    min_instance_count = optional(number)<br>    max_instance_count = optional(number)<br>  })</pre> | `null` | no |
+| <a name="input_service_account_display_name"></a> [service\_account\_display\_name](#input\_service\_account\_display\_name) | If non-empty, override the default display name of the webapp service account. | `string` | `""` | no |
+| <a name="input_service_account_id"></a> [service\_account\_id](#input\_service\_account\_id) | A service account is always created for the web application. If non-empty this<br>variable overrides the default service account id. The default id is formed<br>from the "name" variable value with "-run" appended. | `string` | `""` | no |
+| <a name="input_service_annotations"></a> [service\_annotations](#input\_service\_annotations) | Map containing additional annotations to be added to the Cloud Run service<br>itself. | `map(string)` | `{}` | no |
+| <a name="input_service_labels"></a> [service\_labels](#input\_service\_labels) | A set of key/value label pairs to assign to the Cloud Run service. | `map(string)` | `{}` | no |
+| <a name="input_session_affinity"></a> [session\_affinity](#input\_session\_affinity) | Enables session affinity. For more information, go to<br>https://cloud.google.com/run/docs/configuring/session-affinity. | `bool` | `null` | no |
+| <a name="input_sql_instance_project"></a> [sql\_instance\_project](#input\_sql\_instance\_project) | Project containing SQL instance. Defaults to var.project. | `string` | `null` | no |
+| <a name="input_ssl_certificates"></a> [ssl\_certificates](#input\_ssl\_certificates) | A list of self-links to any custom TLS certificates to add to the load<br>balancer. Requires that var.use\_load\_balancer be "true". The self-link is<br>available as the "self\_link" attribute of "google\_compute\_ssl\_certificate"<br>resources. | `list(any)` | `[]` | no |
+| <a name="input_ssl_policy"></a> [ssl\_policy](#input\_ssl\_policy) | By default, the google\_compute\_ssl\_policy.default SSL policy is applied to the<br>load balancer in load\_balancer.tf. This sets the SSL profile to MODERN and<br>restricts TLS to >= 1.2. If a different SSL policy is required, it should be<br>created outside of this module and its ID passed through using this variable. | `string` | `null` | no |
+| <a name="input_static_egress_ip_cidr_range"></a> [static\_egress\_ip\_cidr\_range](#input\_static\_egress\_ip\_cidr\_range) | The cidr range used to create a subnet that this cloud run will use if assigned<br>a static ip | `string` | `"10.124.0.0/28"` | no |
+| <a name="input_static_egress_ip_subnetwork_id"></a> [static\_egress\_ip\_subnetwork\_id](#input\_static\_egress\_ip\_subnetwork\_id) | When using an existing VPC Access Connector with the static egress IP<br>configuration an existing subnetwork must be provided. | `string` | `null` | no |
+| <a name="input_template_annotations"></a> [template\_annotations](#input\_template\_annotations) | Map containing additional annotations to be added to the Cloud Run service<br>template. | `map(string)` | `{}` | no |
+| <a name="input_template_labels"></a> [template\_labels](#input\_template\_labels) | A set of key/value label pairs to assign to the Cloud Run service revision. | `map(string)` | `{}` | no |
+| <a name="input_timeout_seconds"></a> [timeout\_seconds](#input\_timeout\_seconds) | The maximum duration, in seconds, the instance is allowed for responding to a<br>request. Maximum is 900s. | `string` | `"300s"` | no |
+| <a name="input_traffic"></a> [traffic](#input\_traffic) | Configure traffic allocation between one or more service revisions. | <pre>list(object({<br>    type     = optional(string)<br>    revision = optional(string)<br>    percent  = optional(number)<br>    tag      = optional(string)<br>  }))</pre> | `[]` | no |
+| <a name="input_use_ssl_certificates"></a> [use\_ssl\_certificates](#input\_use\_ssl\_certificates) | Whether to use the custom TLS certs in var.ssl\_certificates for the load<br>balancer or the Google-managed certs for the specified var.dns\_names. | `bool` | `false` | no |
+| <a name="input_volumes"></a> [volumes](#input\_volumes) | Configure one or more volumes for the service. See<br>https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_service#nested_volumes<br>for more information on these options. | <pre>list(object({<br>    name = string<br>    secret = optional(object({<br>      secret       = string<br>      default_mode = optional(number)<br>      items = optional(list(object({<br>        path    = string<br>        version = optional(string)<br>        mode    = optional(number)<br>      })), [])<br>    }))<br>    cloud_sql_instance = optional(object({<br>      instances = optional(list(string))<br>    }))<br>  }))</pre> | `[]` | no |
+| <a name="input_vpc_access"></a> [vpc\_access](#input\_vpc\_access) | Configure VPC access for the Cloud Run service. For more information on these<br>options see<br>https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_service#nested_vpc_access | <pre>object({<br>    connector = optional(string)<br>    egress    = optional(string)<br>    network_interfaces = optional(object({<br>      network    = optional(string)<br>      subnetwork = optional(string)<br>      tags       = optional(string)<br>    }))<br>  })</pre> | `null` | no |
+| <a name="input_vpc_access_connector_max_throughput"></a> [vpc\_access\_connector\_max\_throughput](#input\_vpc\_access\_connector\_max\_throughput) | Optional. The maximum throughput of the connector in megabytes per second.<br>Defaults to 300. | `number` | `300` | no |
+
+## Outputs
+
+| Name | Description |
+|------|-------------|
+| <a name="output_load_balancer"></a> [load\_balancer](#output\_load\_balancer) | Load balancer for the webapp |
+| <a name="output_network_endpoint_group"></a> [network\_endpoint\_group](#output\_network\_endpoint\_group) | Network endpoint group for the load balancer. |
+| <a name="output_service"></a> [service](#output\_service) | Webapp Cloud Run service resource |
+| <a name="output_service_account"></a> [service\_account](#output\_service\_account) | Service account which service runs as |
+| <a name="output_ssl_policy"></a> [ssl\_policy](#output\_ssl\_policy) | The ssl\_policy object, if one is being created. |
+| <a name="output_static_egress_ip"></a> [static\_egress\_ip](#output\_static\_egress\_ip) | The static egress IP assigned to this cloud run instance. Only populated<br>if the variable `enable_static_egress_ip` is true. |
+<!-- END_TF_DOCS -->
diff --git a/dashboard.json b/dashboard.json
deleted file mode 100644
index a470ecad228ad9e14590ebcdb172ad05a611b8fd..0000000000000000000000000000000000000000
--- a/dashboard.json
+++ /dev/null
@@ -1,334 +0,0 @@
-{
-  "displayName": "Cloud Run service: ${service_name}",
-  "mosaicLayout": {
-    "columns": 12,
-    "tiles": [
-      {
-        "width": 6,
-        "height": 4,
-        "widget": {
-          "title": "Request Counts",
-          "xyChart": {
-            "dataSets": [
-              {
-                "timeSeriesQuery": {
-                  "timeSeriesFilter": {
-                    "filter": "metric.type=\"run.googleapis.com/request_count\" resource.type=\"cloud_run_revision\" resource.label.\"service_name\"=\"${service_name}\" metric.label.\"response_code\"!=\"404\"",
-                    "aggregation": {
-                      "alignmentPeriod": "60s",
-                      "perSeriesAligner": "ALIGN_RATE",
-                      "crossSeriesReducer": "REDUCE_SUM",
-                      "groupByFields": [
-                        "metric.label.\"response_code_class\""
-                      ]
-                    }
-                  }
-                },
-                "plotType": "LINE",
-                "minAlignmentPeriod": "60s",
-                "targetAxis": "Y1"
-              }
-            ],
-            "timeshiftDuration": "0s",
-            "yAxis": {
-              "label": "y1Axis",
-              "scale": "LINEAR"
-            },
-            "chartOptions": {
-              "mode": "COLOR"
-            }
-          }
-        }
-      },
-      {
-        "xPos": 6,
-        "width": 6,
-        "height": 4,
-        "widget": {
-          "title": "Request Latencies",
-          "xyChart": {
-            "dataSets": [
-              {
-                "timeSeriesQuery": {
-                  "timeSeriesFilter": {
-                    "filter": "metric.type=\"run.googleapis.com/request_latencies\" resource.type=\"cloud_run_revision\" resource.label.\"service_name\"=\"${service_name}\" resource.label.\"location\"=\"${region}\"",
-                    "aggregation": {
-                      "alignmentPeriod": "60s",
-                      "perSeriesAligner": "ALIGN_DELTA",
-                      "crossSeriesReducer": "REDUCE_PERCENTILE_99",
-                      "groupByFields": [
-                        "resource.label.\"service_name\""
-                      ]
-                    }
-                  }
-                },
-                "plotType": "LINE",
-                "minAlignmentPeriod": "60s",
-                "targetAxis": "Y1"
-              },
-              {
-                "timeSeriesQuery": {
-                  "timeSeriesFilter": {
-                    "filter": "metric.type=\"run.googleapis.com/request_latencies\" resource.type=\"cloud_run_revision\" resource.label.\"service_name\"=\"${service_name}\" resource.label.\"location\"=\"${region}\"",
-                    "aggregation": {
-                      "alignmentPeriod": "60s",
-                      "perSeriesAligner": "ALIGN_DELTA",
-                      "crossSeriesReducer": "REDUCE_PERCENTILE_95",
-                      "groupByFields": [
-                        "resource.label.\"service_name\""
-                      ]
-                    }
-                  }
-                },
-                "plotType": "LINE",
-                "minAlignmentPeriod": "60s",
-                "targetAxis": "Y1"
-              },
-              {
-                "timeSeriesQuery": {
-                  "timeSeriesFilter": {
-                    "filter": "metric.type=\"run.googleapis.com/request_latencies\" resource.type=\"cloud_run_revision\" resource.label.\"service_name\"=\"${service_name}\" resource.label.\"location\"=\"${region}\"",
-                    "aggregation": {
-                      "alignmentPeriod": "60s",
-                      "perSeriesAligner": "ALIGN_DELTA",
-                      "crossSeriesReducer": "REDUCE_PERCENTILE_50",
-                      "groupByFields": [
-                        "resource.label.\"service_name\""
-                      ]
-                    }
-                  }
-                },
-                "plotType": "LINE",
-                "minAlignmentPeriod": "60s",
-                "targetAxis": "Y1"
-              }
-            ],
-            "timeshiftDuration": "0s",
-            "yAxis": {
-              "label": "y1Axis",
-              "scale": "LINEAR"
-            },
-            "chartOptions": {
-              "mode": "COLOR"
-            }
-          }
-        }
-      },
-      {
-        "yPos": 4,
-        "width": 6,
-        "height": 3,
-        "widget": {
-          "title": "Container CPU Utilisation",
-          "xyChart": {
-            "dataSets": [
-              {
-                "timeSeriesQuery": {
-                  "timeSeriesFilter": {
-                    "filter": "metric.type=\"run.googleapis.com/container/cpu/utilizations\" resource.type=\"cloud_run_revision\" resource.label.\"service_name\"=\"${service_name}\" resource.label.\"location\"=\"${region}\"",
-                    "aggregation": {
-                      "alignmentPeriod": "60s",
-                      "perSeriesAligner": "ALIGN_SUM",
-                      "crossSeriesReducer": "REDUCE_PERCENTILE_99"
-                    }
-                  }
-                },
-                "plotType": "LINE",
-                "minAlignmentPeriod": "60s",
-                "targetAxis": "Y1"
-              },
-              {
-                "timeSeriesQuery": {
-                  "timeSeriesFilter": {
-                    "filter": "metric.type=\"run.googleapis.com/container/cpu/utilizations\" resource.type=\"cloud_run_revision\" resource.label.\"service_name\"=\"${service_name}\" resource.label.\"location\"=\"${region}\"",
-                    "aggregation": {
-                      "alignmentPeriod": "60s",
-                      "perSeriesAligner": "ALIGN_SUM",
-                      "crossSeriesReducer": "REDUCE_PERCENTILE_95"
-                    }
-                  }
-                },
-                "plotType": "LINE",
-                "minAlignmentPeriod": "60s",
-                "targetAxis": "Y1"
-              },
-              {
-                "timeSeriesQuery": {
-                  "timeSeriesFilter": {
-                    "filter": "metric.type=\"run.googleapis.com/container/cpu/utilizations\" resource.type=\"cloud_run_revision\" resource.label.\"service_name\"=\"${service_name}\" resource.label.\"location\"=\"${region}\"",
-                    "aggregation": {
-                      "alignmentPeriod": "60s",
-                      "perSeriesAligner": "ALIGN_SUM",
-                      "crossSeriesReducer": "REDUCE_PERCENTILE_50"
-                    }
-                  }
-                },
-                "plotType": "LINE",
-                "minAlignmentPeriod": "60s",
-                "targetAxis": "Y1"
-              }
-            ],
-            "timeshiftDuration": "0s",
-            "yAxis": {
-              "label": "y1Axis",
-              "scale": "LINEAR"
-            },
-            "chartOptions": {
-              "mode": "COLOR"
-            }
-          }
-        }
-      },
-      {
-        "xPos": 6,
-        "yPos": 4,
-        "width": 6,
-        "height": 3,
-        "widget": {
-          "title": "Container Memory Utiliation",
-          "xyChart": {
-            "dataSets": [
-              {
-                "timeSeriesQuery": {
-                  "timeSeriesFilter": {
-                    "filter": "metric.type=\"run.googleapis.com/container/memory/utilizations\" resource.type=\"cloud_run_revision\" resource.label.\"service_name\"=\"${service_name}\" resource.label.\"location\"=\"${region}\"",
-                    "aggregation": {
-                      "alignmentPeriod": "60s",
-                      "perSeriesAligner": "ALIGN_SUM",
-                      "crossSeriesReducer": "REDUCE_PERCENTILE_99",
-                      "groupByFields": [
-                        "resource.label.\"service_name\""
-                      ]
-                    }
-                  }
-                },
-                "plotType": "LINE",
-                "minAlignmentPeriod": "60s",
-                "targetAxis": "Y1"
-              },
-              {
-                "timeSeriesQuery": {
-                  "timeSeriesFilter": {
-                    "filter": "metric.type=\"run.googleapis.com/container/memory/utilizations\" resource.type=\"cloud_run_revision\" resource.label.\"service_name\"=\"${service_name}\" resource.label.\"location\"=\"${region}\"",
-                    "aggregation": {
-                      "alignmentPeriod": "60s",
-                      "perSeriesAligner": "ALIGN_SUM",
-                      "crossSeriesReducer": "REDUCE_PERCENTILE_95",
-                      "groupByFields": [
-                        "resource.label.\"service_name\""
-                      ]
-                    }
-                  }
-                },
-                "plotType": "LINE",
-                "minAlignmentPeriod": "60s",
-                "targetAxis": "Y1"
-              },
-              {
-                "timeSeriesQuery": {
-                  "timeSeriesFilter": {
-                    "filter": "metric.type=\"run.googleapis.com/container/memory/utilizations\" resource.type=\"cloud_run_revision\" resource.label.\"service_name\"=\"${service_name}\" resource.label.\"location\"=\"${region}\"",
-                    "aggregation": {
-                      "alignmentPeriod": "60s",
-                      "perSeriesAligner": "ALIGN_SUM",
-                      "crossSeriesReducer": "REDUCE_PERCENTILE_50",
-                      "groupByFields": [
-                        "resource.label.\"service_name\""
-                      ]
-                    }
-                  }
-                },
-                "plotType": "LINE",
-                "minAlignmentPeriod": "60s",
-                "targetAxis": "Y1"
-              }
-            ],
-            "timeshiftDuration": "0s",
-            "yAxis": {
-              "label": "y1Axis",
-              "scale": "LINEAR"
-            },
-            "chartOptions": {
-              "mode": "COLOR"
-            }
-          }
-        }
-      },
-      {
-        "yPos": 7,
-        "width": 6,
-        "height": 3,
-        "widget": {
-          "title": "Instance Count",
-          "xyChart": {
-            "dataSets": [
-              {
-                "timeSeriesQuery": {
-                  "timeSeriesFilter": {
-                    "filter": "metric.type=\"run.googleapis.com/container/instance_count\" resource.type=\"cloud_run_revision\" resource.labels.service_name=\"${service_name}\" resource.labels.location=\"${region}\"",
-                    "aggregation": {
-                      "alignmentPeriod": "60s",
-                      "perSeriesAligner": "ALIGN_MAX",
-                      "crossSeriesReducer": "REDUCE_SUM",
-                      "groupByFields": [
-                        "resource.label.service_name",
-                        "metric.label.state"
-                      ]
-                    }
-                  },
-                  "unitOverride": "count"
-                },
-                "plotType": "LINE",
-                "minAlignmentPeriod": "60s",
-                "targetAxis": "Y1"
-              }
-            ],
-            "xAxis": {
-              "scale": "LINEAR"
-            },
-            "chartOptions": {
-              "mode": "COLOR"
-            }
-          }
-        }
-      },
-      {
-        "xPos": 6,
-        "yPos": 7,
-        "width": 6,
-        "height": 3,
-        "widget": {
-          "title": "Billable instance time",
-          "xyChart": {
-            "dataSets": [
-              {
-                "timeSeriesQuery": {
-                  "timeSeriesFilter": {
-                    "filter": "metric.type=\"run.googleapis.com/container/billable_instance_time\" resource.type=\"cloud_run_revision\" resource.labels.service_name=\"${service_name}\" resource.labels.location=\"${region}\"",
-                    "aggregation": {
-                      "alignmentPeriod": "60s",
-                      "perSeriesAligner": "ALIGN_RATE",
-                      "crossSeriesReducer": "REDUCE_SUM",
-                      "groupByFields": [
-                        "resource.label.service_name"
-                      ]
-                    }
-                  }
-                },
-                "plotType": "LINE",
-                "minAlignmentPeriod": "60s",
-                "targetAxis": "Y1"
-              }
-            ],
-            "xAxis": {
-              "scale": "LINEAR"
-            },
-            "chartOptions": {
-              "mode": "COLOR"
-            }
-          }
-        }
-      }
-    ]
-  }
-}
diff --git a/dashboard.tf b/dashboard.tf
deleted file mode 100644
index dcf1d6555da35588d3ed69b5199519ac13796af0..0000000000000000000000000000000000000000
--- a/dashboard.tf
+++ /dev/null
@@ -1,19 +0,0 @@
-# Create a monitoring dashboard for the provisioned Cloud Run service.
-#
-# The dashboard, defined in ./dashboard.json, contains:
-# - Request counts broken down by response code class (ignoring 404s).
-# - Request latencies for 50th, 95th and 99th percentile.
-# - Container CPU and memory utilisations for 50th, 95th and 99th percentile.
-# - Container instance count and billable instance time.
-locals {
-  dashboard_template = templatefile("${path.module}/dashboard.json", {
-    service_name = var.name
-    region       = var.cloud_run_region
-  })
-}
-
-resource "google_monitoring_dashboard" "dashboard" {
-  count = var.create_monitoring_dashboard ? 1 : 0
-
-  dashboard_json = local.dashboard_template
-}
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8c2784bd4e3987b82b1ffce52d86a3fc53b1d449
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,27 @@
+# This docker-compose file is only used for running integration tests and generating docs. As such we're making some
+# allowances with regards to ignoring kics-scan checks.
+# kics-scan ignore
+
+name: gcp-cloud-run-app-testing
+
+services:
+  test:
+    image: registry.gitlab.developers.cam.ac.uk/uis/devops/infra/dockerimages/logan-terraform:1.6
+    entrypoint: ["bash", "-c"]
+    environment:
+      # This unsets the GOOGLE_APPLICATION_CREDENTIALS as it is not required but the logan-terraform images sets it.
+      - GOOGLE_APPLICATION_CREDENTIALS=
+      - GOOGLE_IMPERSONATE_SERVICE_ACCOUNT=terraform-deploy@infra-testing-int-e2395220.iam.gserviceaccount.com
+    volumes:
+      - .:/workdir:rw
+      - ~/.config/gcloud/application_default_credentials.json:/root/.config/gcloud/application_default_credentials.json:ro
+    cap_drop:
+      - "ALL"
+  terraform-docs:
+    image: quay.io/terraform-docs/terraform-docs:0.17.0
+    entrypoint: ["."]
+    working_dir: /workdir
+    volumes:
+      - .:/workdir:rw
+    cap_drop:
+      - "ALL"
diff --git a/docs/examples.md b/docs/examples.md
new file mode 100644
index 0000000000000000000000000000000000000000..60e75de4afe38456870fbccbc7fb60e9fee3dcf9
--- /dev/null
+++ b/docs/examples.md
@@ -0,0 +1,345 @@
+# Example Usage
+
+This page contains some examples of the different ways this module can be
+configured.
+
+## Basic
+
+A basic Cloud Run service with a single container definition.
+
+```hcl
+module "webapp" {
+  source  = "gitlab.developers.cam.ac.uk/uis/gcp-cloud-run-app/devops"
+  version = "~> 9.0"
+
+  region  = "europe-west2"
+  project = "example-project-id-1234"
+
+  containers = {
+    webapp = {
+      image = "us-docker.pkg.dev/cloudrun/container/hello"
+    }
+  }
+}
+```
+
+## Load balancer
+
+A basic Cloud Run service configured to use a load balancer for ingress.
+
+```hcl
+module "webapp" {
+  source  = "gitlab.developers.cam.ac.uk/uis/gcp-cloud-run-app/devops"
+  version = "~> 9.0"
+
+  region  = "europe-west2"
+  project = "example-project-id-1234"
+
+  containers = {
+    webapp = {
+      image = "us-docker.pkg.dev/cloudrun/container/hello"
+    }
+  }
+
+  enable_load_balancer = true
+
+  dns_names = {
+    webapp = "webapp.test.example.com"
+  }
+}
+
+resource "google_dns_record_set" "load_balancer_webapp" {
+  name         = "webapp.test.example.com."
+  type         = "A"
+  ttl          = 300
+  managed_zone = "example-zone"
+  project      = "example-project-id-1234"
+
+  rrdatas = [
+    module.webapp.load_balancer.external_ip
+  ]
+}
+```
+
+## Secret environment variables and volumes
+
+A Cloud Run service configured to load environment variables and mount volumes
+via Google Secret Manager secret objects.
+
+Note that you need to grant the created service account identity the
+ability to access the secret objects _outside_ of this module call.
+
+```hcl
+resource "google_secret_manager_secret" "main" {
+  secret_id = "my-secret"
+  project   = "example-project-id-1234"
+
+  replication {
+    auto {}
+  }
+}
+
+resource "google_secret_manager_secret_version" "main" {
+  secret      = google_secret_manager_secret.main.id
+  secret_data = "my-secret-data"
+}
+
+resource "google_secret_manager_secret_iam_member" "main" {
+  project   = "example-project-id-1234"
+  secret_id = google_secret_manager_secret.main.id
+  role      = "roles/secretmanager.secretAccessor"
+  member    = "serviceAccount:${module.webapp.service_account.email}"
+}
+
+module "webapp" {
+  source  = "gitlab.developers.cam.ac.uk/uis/gcp-cloud-run-app/devops"
+  version = "~> 9.0"
+
+  region  = "europe-west2"
+  project = "example-project-id-1234"
+
+  containers = {
+    webapp = {
+      image = "us-docker.pkg.dev/cloudrun/container/hello"
+      env = [
+        {
+          name = "SECRET",
+          value_source = {
+            secret_key_ref = {
+              secret  = google_secret_manager_secret.main.id
+              version = "latest"
+            }
+          }
+        }
+      ]
+      volume_mounts = [
+        {
+          name       = "secret-volume",
+          mount_path = "/secrets"
+        }
+      ]
+    }
+  }
+  volumes = [
+    {
+      name = "secret-volume",
+      secret = {
+        secret = google_secret_manager_secret.main.id
+        items = [
+          {
+            version = "latest",
+            path    = "my-secret"
+          }
+        ]
+      }
+    }
+  ]
+}
+```
+
+## Mounting CloudSQL instances
+
+A Cloud Run service which mounts an existing CloudSQL instance using the
+`mount_cloudsql_instance` helper variable.
+
+```hcl
+module "webapp" {
+  source  = "gitlab.developers.cam.ac.uk/uis/gcp-cloud-run-app/devops"
+  version = "~> 9.0"
+
+  region  = "europe-west2"
+  project = "example-project-id-1234"
+
+  containers = {
+    webapp = {
+      image = "us-docker.pkg.dev/cloudrun/container/hello"
+    }
+  }
+
+  mount_cloudsql_instance = module.sql.instance_connection_name
+}
+
+module "sql" {
+  source  = "GoogleCloudPlatform/sql-db/google//modules/postgresql"
+  version = "~> 17.0"
+
+  database_version            = "POSTGRES_15"
+  name                        = "test-sql-1234"
+  project_id                  = "example-project-id-1234"
+  tier                        = "db-f1-micro"
+  availability_type           = "ZONAL"
+  region                      = "europe-west2"
+  zone                        = "europe-west2-a"
+  deletion_protection         = false
+  deletion_protection_enabled = false
+}
+```
+
+## Pre-deploy job
+
+A Cloud Run service with a corresponding "pre-deploy" Cloud Run job. See the
+[Pre-deploy Cloud Run Job](../README.md#pre-deploy-cloud-run-job) section in the
+README.md for more information.
+
+```hcl
+module "webapp" {
+  source  = "gitlab.developers.cam.ac.uk/uis/gcp-cloud-run-app/devops"
+  version = "~> 9.0"
+
+  region  = "europe-west2"
+  project = "example-project-id-1234"
+
+  containers = {
+    webapp = {
+      image = "registry.gitlab.developers.cam.ac.uk/uis/devops/infra/dockerimages/django:5.0-py3.12"
+    }
+  }
+
+  mount_cloudsql_instance = module.sql.instance_connection_name
+
+  enable_pre_deploy_job = true
+
+  pre_deploy_job_container = {
+    image   = "registry.gitlab.developers.cam.ac.uk/uis/devops/infra/dockerimages/django:5.0-py3.12"
+    command = ["python3"]
+    args    = ["/usr/src/app/manage.py", "migrate"]
+  }
+
+  pre_deploy_job_mount_cloudsql_instance = module.sql.instance_connection_name
+}
+
+module "sql" {
+  source  = "GoogleCloudPlatform/sql-db/google//modules/postgresql"
+  version = "~> 17.0"
+
+  database_version            = "POSTGRES_15"
+  name                        = "test-sql-1234"
+  project_id                  = "example-project-id-1234"
+  tier                        = "db-f1-micro"
+  availability_type           = "ZONAL"
+  region                      = "europe-west2"
+  zone                        = "europe-west2-a"
+  deletion_protection         = false
+  deletion_protection_enabled = false
+}
+```
+
+## Multi-container deployment
+
+A Cloud Run service which defines multiple containers (sidecars). For more
+information see the [Cloud Run
+documentation](https://cloud.google.com/run/docs/deploying#sidecars).
+
+```hcl
+module "webapp" {
+  source  = "gitlab.developers.cam.ac.uk/uis/gcp-cloud-run-app/devops"
+  version = "~> 9.0"
+
+  region  = "europe-west2"
+  project = "example-test-b99f7ad6"
+
+  containers = {
+    webapp1 = {
+      name  = "webapp-1"
+      image = "us-docker.pkg.dev/cloudrun/container/hello"
+      ports = [
+        {
+          container_port = 8080
+        }
+      ]
+    }
+    webapp2 = {
+      name  = "webapp-2"
+      image = "us-docker.pkg.dev/cloudrun/container/hello"
+    }
+  }
+}
+```
+
+## Canary release traffic distribution
+
+A Cloud Run service which allocates incoming traffic equally between two
+revisions.
+
+This example uses the `revision` variable to deploy named revisions of the Cloud
+Run service. This allows you to target these named revisions specifically to
+split traffic between one or more revisions via the `traffic` variable.
+
+```hcl
+module "webapp" {
+  source  = "gitlab.developers.cam.ac.uk/uis/gcp-cloud-run-app/devops"
+  version = "~> 9.0"
+
+  region  = "europe-west2"
+  project = "example-project-id-1234"
+
+  revision = "v1-1-0"
+
+  containers = {
+    webapp = {
+      image = "us-docker.pkg.dev/cloudrun/container/hello"
+    }
+  }
+
+  traffic = [
+    {
+      type     = "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+      revision = "v1-0-0"
+      percent  = 50
+    },
+    {
+      type     = "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+      revision = "v1-1-0"
+      percent  = 50
+    }
+  ]
+}
+```
+
+## Static egress IP configuration
+
+A Cloud Run service configured with a static IP address for egress. See the
+[Static Outbound IP
+Address](https://cloud.google.com/run/docs/configuring/static-outbound-ip) page
+in the Cloud Run documentation for details of this implementation.
+
+The address is available in the `static_egress_ip` output of this module.
+
+```hcl
+module "webapp" {
+  source  = "gitlab.developers.cam.ac.uk/uis/gcp-cloud-run-app/devops"
+  version = "~> 9.0"
+
+  enable_static_egress_ip = true
+  region                  = "europe-west2"
+  project                 = "example-test-b99f7ad6"
+
+  containers = {
+    webapp = {
+      image = "us-docker.pkg.dev/cloudrun/container/hello"
+    }
+  }
+}
+```
+
+## Uptime and SSL Monitoring
+
+A basic Cloud Run service with default monitoring enabled.
+
+```hcl
+module "webapp" {
+  source  = "gitlab.developers.cam.ac.uk/uis/gcp-cloud-run-app/devops"
+  version = "~> 9.0"
+
+  region  = "europe-west2"
+  project = "example-project-id-1234"
+
+  enable_monitoring = true
+
+  containers = {
+    webapp = {
+      image = "us-docker.pkg.dev/cloudrun/container/hello"
+    }
+  }
+}
+```
diff --git a/docs/pre-deploy-job.md b/docs/pre-deploy-job.md
new file mode 100644
index 0000000000000000000000000000000000000000..9e618883c6b7c359fc974d46430457a509d2f32e
--- /dev/null
+++ b/docs/pre-deploy-job.md
@@ -0,0 +1,32 @@
+# Pre-deploy Cloud Run job
+
+The `8.0.0` release introduced the `enable_pre_deploy_job` variable which, when
+set to `true`, creates a Cloud Run job to execute a configurable command before
+new Cloud Run service revisions are deployed. This is a useful way to run
+database migrations and other commands which are tightly coupled to the release
+cadence of the main Cloud Run service.
+
+The pre-deploy job is configured via the `pre_deploy_job_*` variables which can
+be found in `variables.tf`.
+
+## Triggering the Cloud Run job
+
+The Cloud Run job is executed by a `null_resource` resource which simply runs
+the `gcloud run jobs execute` command. The `null_resource` is triggered each
+time the `var.pre_deploy_job_container.image` value changes by default, although
+you can force it to run via the `pre_deploy_job_force` variable.
+
+## Order of operations
+
+To ensure that the pre-deploy job runs before a new revision of the Cloud Run
+service is deployed, the resources in question are explicitly configured with
+`depends_on` relationships as follows.
+
+- The `google_cloud_run_v2_job.pre_deploy` Cloud Run job has no `depends_on`
+  relationships defined and is therefore deployed first.
+- The `null_resource.pre_deploy_job_trigger` resource depends on
+  `google_cloud_run_v2_job.pre_deploy` and therefore won't be deployed until the
+  Cloud Run job is deployed successfully.
+- Finally, the `google_cloud_run_service.webapp` Cloud Run service depends on
+  `null_resource.pre_deploy_job_trigger`, meaning it is only deployed once the
+  `null_resource.pre_deploy_job_trigger` has executed successfully.
diff --git a/docs/templates/footer.md b/docs/templates/footer.md
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/docs/templates/header.md b/docs/templates/header.md
new file mode 100644
index 0000000000000000000000000000000000000000..13f31184f5ff33fb4f39baae522865ff1fab1d4c
--- /dev/null
+++ b/docs/templates/header.md
@@ -0,0 +1,40 @@
+# GCP Cloud Run Terraform Module
+
+This module manages the deployment of containerised applications on Cloud Run.
+It includes the following functionality:
+
+- Creation of the main Cloud Run service.
+- Creation of a dedicated service account with required IAM bindings.
+- Deployment of an optional load balancer configuration.
+- Deployment of an optional "pre-deployment" Cloud Run job to handle tasks such
+  as database migrations.
+- Deployment of an optional static egress IP address for the Cloud Run service.
+- Configuration of simple uptime checks and SSL certificate expiry alerts.
+- Convenience variables to configure certain aspects of the Cloud Run service
+  which would otherwise be complex, such as mounting Cloud SQL instances.
+
+## Cloud Run v2 API
+
+Following release `9.0.0`, this module has been refactored to use the Cloud Run
+`v2` API resources exclusively (i.e. `google_cloud_run_v2_service`). This means
+that many input variables are now different and updating from a previous version
+will require some changes to your module definition.
+
+## Examples
+
+See the [docs/examples.md](docs/examples.md) page for a full list of detailed
+usage examples.
+
+## Pre-deploy Cloud Run Job
+
+The `8.0.0` release introduced the `enable_pre_deploy_job` variable which, when
+set to `true`, creates a Cloud Run job to execute a configurable command before
+new Cloud Run service revisions are deployed. This is a useful way to run
+database migrations and other commands which are tightly coupled to the release
+cadence of the main Cloud Run service.
+
+The pre-deploy job is configured via the `pre_deploy_job_*` variables which can
+be found in `variables.tf`.
+
+For more information on how the pre-deploy Cloud Run job works see the
+[pre-deploy-job.md](../docs/pre-deploy-job.md) page.
diff --git a/docs/testing.md b/docs/testing.md
new file mode 100644
index 0000000000000000000000000000000000000000..a0b865dbd096b3851490e189978d215f46fbb9c0
--- /dev/null
+++ b/docs/testing.md
@@ -0,0 +1,117 @@
+# Testing
+
+This project makes use of Terraform's built-in `test` command to run integration
+tests. The tests are configured in the `tests` directory and deploy resources to
+our dedicated Infra Testing GCP project.
+
+## Running tests locally
+
+To run tests locally you can use the `run_tests.sh` helper script. By default
+the script will execute _all_ tests, however, it is often useful to target a
+specific test file using the `-t` option. For example, to run the
+`cloud_run_service.tftest.hcl` test you would use the following command.
+
+```bash
+# You must have authenticated and set the application-default credentials.
+gcloud auth application-default login
+
+./run_tests.sh -t tests/cloud_run_service.tftest.hcl
+```
+
+Note that the `test` service defined in `docker-compose.yml` sets the
+`GOOGLE_IMPERSONATE_SERVICE_ACCOUNT` variable. You must have permission to
+impersonate this service account to be able to run these tests.
+
+## GitLab CI/CD test jobs
+
+The `tests` job in the `.gitlab-ci.yml` file is configured to run all of the
+test files in the `tests` directory. The tests are run in parallel using a
+`matrix` job. Variables are used to allow us to specify version constraints to
+test against multiple supported major versions of the Google Terraform provider.
+
+The `tests` job is configured to require a manual trigger. This is due to the
+number of resources that the jobs will deploy and the length of time the jobs
+take to complete. With this in mind, you should generally only need to run the
+tests job at the point you open a merge request, ensuring the job is passing
+before requesting a review.
+
+## Resource teardown and cleanup
+
+If a test job fails Terraform attempts to teardown any resources it has already
+created. This seems to work well the majority of the time. However, to protect
+against resources not being destroyed, and potentially costing £££, there are
+two `cleanup` jobs configured in the `.gitlab-ci.yml` file, `pre-cleanup` and
+`post-cleanup`. These jobs both run the `tests/cleanup.sh` script which is
+configured to check for any resources that _could_ have been created and delete
+any that it finds. We run the `pre-cleanup` job to ensure that there is a clean
+environment prior to the current test run, avoiding any subnet clashes etc.
+
+It's also fine to run the `tests/cleanup.sh` script from your local machine to
+perform an ad-hoc cleanup. First authenticate your `gcloud` session and then
+simply run the script, for example:
+
+```bash
+gcloud auth login
+
+./tests/cleanup.sh
+```
+
+## Troubleshooting
+
+### Google's eventually consistent APIs
+
+Many of Google's APIs are eventually consistent. This often causes issues as IAM
+bindings and API enablement can be delayed causing our Terraform to fail.
+Unfortunately, this is simply unavoidable and the only workaround is to rerun
+the failed job.
+
+#### Error 403: Permission 'iam.serviceaccounts.actAs' denied on service account
+
+The following error is an example of the eventual consistency issue. If you're
+unlucky enough to see this failure you should simply retry the job as often it
+just works the second time.
+
+```bash
+tests/cloud_run_service.tftest.hcl... in progress
+  run "setup"... pass
+  run "test_service_with_default_variable_values"... fail
+â•·
+│ Error: Error creating Service: googleapi: Error 403: Permission
+│ 'iam.serviceaccounts.actAs' denied on service account
+│ test-fab59940-run@infra-testing-int-e2395220.iam.gserviceaccount.com
+│ (or it may not exist).
+│
+│   with google_cloud_run_v2_service.webapp,
+│   on main.tf line 23, in resource "google_cloud_run_v2_service" "webapp":
+│   23: resource "google_cloud_run_v2_service" "webapp" {
+│
+╵
+```
+
+### Invalid IPCidrRange: 10.124.0.0/28 conflicts with existing subnetwork
+
+This error usually means a previous test run failed to tear down its resources
+correctly so the subnet range is already in use. You should investigate the
+previous test runs and destroy all orphaned resources before rerunning the
+failed job.
+
+```bash
+tests/monitoring.tftest.hcl... in progress
+  run "setup"... pass
+  run "test_monitoring_with_alert_policies_created_in_default_project"... pass
+  run "test_monitoring_with_alert_policies_created_in_scoping_project"... pass
+  run "test_monitoring_with_auth_proxy"... pass
+  run "test_monitoring_with_auth_proxy_and_vpc_access_connector"... fail
+â•·
+│ Error: Error waiting to create Subnetwork: Error waiting for Creating
+│ Subnetwork: Invalid IPCidrRange: 10.124.0.0/28 conflicts with existing
+│ subnetwork 'test-209abb96-vpc-connector' in region 'europe-west2'.
+│
+│
+│   with google_compute_subnetwork.vpc_connector[0],
+│   on static_egress_ip.tf line 6, in resource "google_compute_subnetwork"
+│ "vpc_connector":
+│    6: resource "google_compute_subnetwork" "vpc_connector" {
+│
+╵
+```
diff --git a/examples/.gitignore b/examples/.gitignore
deleted file mode 100644
index 11d931940f81da56486fa8dbb5840e12ac427080..0000000000000000000000000000000000000000
--- a/examples/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-# Ignore files created when running examples
-*.tfstate.*
-*.tfstate
-.terraform
diff --git a/examples/root-example/README.md b/examples/root-example/README.md
deleted file mode 100644
index 1df4581ec3fb8b62767aa5bc8dd6bf43a161e590..0000000000000000000000000000000000000000
--- a/examples/root-example/README.md
+++ /dev/null
@@ -1,18 +0,0 @@
-# Simple example
-
-This is a simple example of deploying a Cloud Run hosted container into a
-GCP project. Specify the project to deploy into and the sql instance to connect to
-on the command line. The sql instance connection is optional, use an empty string
-if you do not want to connect Cloud Run to a SQL instance.
-So, for example, to deploy to the project ``my-project``:
-
-```console
-$ terraform init
-$ terraform apply -var project=my-project -var sql_instance_connection_name=example
-```
-
-In this example, terraform is configured to use default application credentials.
-For Google APIs and these credentials should correspond to a user with owner or
-editor access to the target project. You can use the ``gcloud`` command line
-tool to set your personal credentials as application default credentials. See
-the ``gcloud auth application-default`` command output for more information.
diff --git a/examples/root-example/main.tf b/examples/root-example/main.tf
deleted file mode 100644
index 797076166eeb741e9a280691bed13a9a80d42547..0000000000000000000000000000000000000000
--- a/examples/root-example/main.tf
+++ /dev/null
@@ -1,11 +0,0 @@
-# A Secret Manager secret which holds the secret "test_scret"
-module "webapp" {
-  source  = "git::https://gitlab.developers.cam.ac.uk/uis/devops/infra/terraform/gcp-cloud-run-app.git"
-  project = var.project
-
-  cloud_run_region = "europe-west2"
-  max_scale        = 20
-  min_scale        = 2
-
-  sql_instance_connection_name = var.sql_instance_connection_name
-}
diff --git a/examples/root-example/providers.tf b/examples/root-example/providers.tf
deleted file mode 100644
index ee647e5139e8a6c66028f89e5f3cd382ae2a72bf..0000000000000000000000000000000000000000
--- a/examples/root-example/providers.tf
+++ /dev/null
@@ -1,9 +0,0 @@
-# Both the google and google-beta provider are configured identically to use
-# default application credentials.
-provider "google" {
-  version = "~> 3.13"
-}
-
-provider "google-beta" {
-  version = "~> 3.13"
-}
diff --git a/examples/root-example/variables.tf b/examples/root-example/variables.tf
deleted file mode 100644
index 63a4e92c63df040852ce273d4d7c379bc3350b22..0000000000000000000000000000000000000000
--- a/examples/root-example/variables.tf
+++ /dev/null
@@ -1,7 +0,0 @@
-variable "project" {
-  description = "GCP project id to create cluster in"
-}
-
-variable "sql_instance_connection_name" {
-  description = "SQL instance connection name. Assumes the instance is in the same project."
-}
diff --git a/load_balancer.tf b/load_balancer.tf
index 8092e96639f8e12e17b32937ab3b00a7f44960ce..68008af5aeb7daecef5a4eb9cc7c8453cc9da43a 100644
--- a/load_balancer.tf
+++ b/load_balancer.tf
@@ -1,24 +1,24 @@
-# load_balancer.tf configures Cloud Load Balancer resources for the Cloud Run
-# service if var.ingress_style == "load-balancer".
+# load_balancer.tf configures Cloud Load Balancer resources for the Cloud Run service if
+# var.enable_load_balancer == true.
 
 # A network endpoint group for the "webapp" application.
 resource "google_compute_region_network_endpoint_group" "webapp" {
-  count = var.ingress_style == "load-balancer" ? 1 : 0
+  count = var.enable_load_balancer ? 1 : 0
 
   name                  = var.name
+  project               = var.project
   network_endpoint_type = "SERVERLESS"
-  region                = var.cloud_run_region
+  region                = var.region
   cloud_run {
-    service = google_cloud_run_service.webapp.name
+    service = google_cloud_run_v2_service.webapp.name
   }
-
-  provider = google-beta
 }
 
 resource "google_compute_ssl_policy" "default" {
-  count = var.ingress_style == "load-balancer" && var.ssl_policy == null ? 1 : 0
+  count = var.enable_load_balancer && var.ssl_policy == null ? 1 : 0
 
   name            = "${var.name}-modern"
+  project         = var.project
   profile         = "MODERN"
   min_tls_version = "TLS_1_2"
 }
@@ -28,23 +28,23 @@ resource "google_compute_ssl_policy" "default" {
 #
 # [1] https://registry.terraform.io/modules/GoogleCloudPlatform/lb-http/google/latest/submodules/serverless_negs
 module "webapp_http_load_balancer" {
-  count = var.ingress_style == "load-balancer" ? 1 : 0
+  count = var.enable_load_balancer ? 1 : 0
 
   # The double slash is important(!)
   source  = "GoogleCloudPlatform/lb-http/google//modules/serverless_negs"
-  version = ">= 6.2.0, < 7.0.0"
+  version = "~> 9.0"
 
   project = var.project
   name    = var.name
 
-  ssl            = true
-  https_redirect = true
+  ssl                             = true
+  managed_ssl_certificate_domains = [for k, v in var.dns_names : v]
+  ssl_policy                      = var.ssl_policy == null ? google_compute_ssl_policy.default[0].id : var.ssl_policy
+  https_redirect                  = true
 
   # Use custom TLS certs if var.use_ssl_certificates is true, otherwise, use the Google-managed certs.
-  use_ssl_certificates            = var.use_ssl_certificates
-  ssl_certificates                = var.ssl_certificates
-  managed_ssl_certificate_domains = local.dns_names
-  ssl_policy                      = var.ssl_policy == null ? google_compute_ssl_policy.default[0].id : var.ssl_policy
+  use_ssl_certificates = var.use_ssl_certificates
+  ssl_certificates     = var.ssl_certificates
 
   # Whether to create an IPv6 address to the load balancer.
   enable_ipv6         = var.enable_ipv6
@@ -53,10 +53,12 @@ module "webapp_http_load_balancer" {
   backends = {
     default = {
       description             = null
+      protocol                = "HTTP"
       enable_cdn              = false
       custom_request_headers  = null
       custom_response_headers = null
       security_policy         = null
+      compression_mode        = null
 
       log_config = {
         enable      = true
diff --git a/locals.tf b/locals.tf
index 3ca469d0bfc3da460523931c1dc3195a17b9a106..868e37a2354bc4d2bb456eca2daf32a2e73cc648 100644
--- a/locals.tf
+++ b/locals.tf
@@ -4,69 +4,44 @@ locals {
   # Project containing existing Cloud SQL instance.
   sql_instance_project = coalesce(var.sql_instance_project, var.project)
 
-  # Should a DNS domain mapping be created?
-  domain_mapping_present = anytrue([for dm in google_cloud_run_domain_mapping.webapp : true])
+  ingress = var.enable_load_balancer ? "INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER" : var.ingress
 
-  # DNS names for web app
-  dns_names = var.dns_name != "" ? [var.dns_name] : var.dns_names
+  # Whether we should monitor the custom domain - only possible if there are a dns names set and unauthenticated
+  # invocation is enabled.
+  can_monitor_custom_dns = length(var.dns_names) > 0 && var.allow_unauthenticated_invocations
 
-  # DNS records for webapp. Merge records from any domain mappings or load balancers.
-  dns_records = flatten(concat(
-    [
-      for domain_mapping in google_cloud_run_domain_mapping.webapp : [
-        {
-          type   = domain_mapping.status[0].resource_records[0].type
-          rrdata = domain_mapping.status[0].resource_records[0].rrdata
-        }
-      ]
-    ],
-    [
-      for load_balancer in module.webapp_http_load_balancer : [
-        {
-          type   = "A"
-          rrdata = load_balancer.external_ip
-        },
-        {
-          type   = "AAAA"
-          rrdata = load_balancer.external_ipv6_address
-        }
-      ]
-    ]
-  ))
-
-  pre_deploy_job_image_name = var.pre_deploy_job_image_name == null ? var.image_name : var.pre_deploy_job_image_name
-
-  pre_deploy_job_environment_variables = var.pre_deploy_job_environment_variables == null ? var.environment_variables : var.pre_deploy_job_environment_variables
-
-  # Certain ingress styles imply that we disallow external access to the base Cloud Run service.
-  webapp_allowed_ingress = lookup({
-    load-balancer = "internal-and-cloud-load-balancing"
-  }, var.ingress_style, var.allowed_ingress)
-
-  # Whether we should monitor the custom domain - only possible if there are a dns names
-  # set and unauthenticated invocation is enabled.
-  can_monitor_custom_dns = length(local.dns_names) > 0 && var.allow_unauthenticated_invocations
-
-  # Holds which VPC connector can be used for the auth proxy Cloud Function egress settings
-  auth_proxy_egress_connector = var.enable_static_egress_ip ? google_vpc_access_connector.static-ip-connector[0].id : var.auth_proxy_egress_connector
+  create_vpc_connector = var.vpc_access == null && (
+    var.enable_static_egress_ip || (var.enable_monitoring && local.ingress != "INGRESS_TRAFFIC_ALL")
+  )
 
-  # Map containing the hosts to monitor and whether an auth proxy and egress connector
-  # should be configured.
-  monitor_hosts = var.disable_monitoring ? {} : merge(
-    {
-      webapp = {
-        host                    = trimsuffix(trimprefix(google_cloud_run_service.webapp.status[0].url, "https://"), "/"),
-        enable_auth_proxy       = !var.allow_unauthenticated_invocations || local.webapp_allowed_ingress != "all",
-        enable_egress_connector = local.webapp_allowed_ingress != "all"
-      },
+  # Determines which VPC connector should be used for the Cloud Run service.
+  vpc_access = local.create_vpc_connector ? {
+    connector          = google_vpc_access_connector.main[0].id
+    egress             = "ALL_TRAFFIC"
+    network_interfaces = null
+  } : var.vpc_access
+
+  auth_proxy_vpc_access = local.create_vpc_connector ? {
+    connector          = google_vpc_access_connector.main[0].id
+    egress             = "ALL_TRAFFIC"
+    network_interfaces = null
+  } : var.vpc_access
+
+  # Map containing the hosts to monitor and whether an auth proxy and egress vpc access connector should be configured.
+  monitor_hosts = var.enable_monitoring ? merge({
+    default = {
+      host                    = trimsuffix(trimprefix(google_cloud_run_v2_service.webapp.uri, "https://"), "/"),
+      enable_auth_proxy       = var.allow_unauthenticated_invocations == false || local.ingress != "INGRESS_TRAFFIC_ALL",
+      enable_egress_connector = local.ingress != "INGRESS_TRAFFIC_ALL"
+    },
     },
     local.can_monitor_custom_dns ? {
-      for dns_name in local.dns_names :
-      (dns_name) => {
-        host                    = dns_name
-        enable_auth_proxy       = local.webapp_allowed_ingress == "internal",
-        enable_egress_connector = local.webapp_allowed_ingress == "internal"
+      for k, v in var.dns_names :
+      k => {
+        host                    = v
+        enable_auth_proxy       = local.ingress == "INGRESS_TRAFFIC_INTERNAL_ONLY",
+        enable_egress_connector = local.ingress == "INGRESS_TRAFFIC_INTERNAL_ONLY"
       }
     } : {}
-  )
+  ) : {}
 }
diff --git a/main.tf b/main.tf
index b0eadf31294ef2543eb76bc0bd2e0ba4fff128e1..037bc9aa1952b05ed4976ccfcd7980dae383c8f4 100644
--- a/main.tf
+++ b/main.tf
@@ -20,235 +20,256 @@ resource "google_project_iam_member" "webapp_sql_client" {
   member  = "serviceAccount:${google_service_account.webapp.email}"
 }
 
-# A Cloud Run service which hosts the webapp
-
-resource "google_cloud_run_service" "webapp" {
-  name                       = var.name
-  location                   = var.cloud_run_region
-  project                    = var.project
-  autogenerate_revision_name = true
-
-  metadata {
-    annotations = merge(
-      {
-        "serving.knative.dev/creator" : "placeholder",
-        "serving.knative.dev/lastModifier" : "placeholder",
-
-        # As mentioned at https://www.terraform.io/docs/configuration/resources.html#ignore_changes
-        # placeholders need to be created as the adding the key to the map is
-        # considered a change and not ignored by ignore_changes. This needs to
-        # *always* be present in the config in order for it to appear in
-        # ignore_changes.
-        "run.googleapis.com/ingress-status" : "placeholder",
-      },
-
-      # Add the beta launch stage if required.
-      var.enable_beta_launch_stage ? {
-        # Required to be able to set ingress type and secrets volume mounts
-        "run.googleapis.com/launch-stage" : "BETA",
-      } : {},
-
-      # Specify the allowable ingress types.
-      {
-        "run.googleapis.com/ingress" : local.webapp_allowed_ingress,
-      },
-
-      var.service_annotations,
-    )
-  }
+resource "google_cloud_run_v2_service" "webapp" {
+  name         = var.name
+  location     = var.region
+  description  = var.description
+  project      = var.project
+  labels       = var.service_labels
+  client       = "terraform"
+  ingress      = local.ingress
+  launch_stage = var.launch_stage
+  annotations  = var.service_annotations
 
   template {
-    metadata {
-      annotations = merge(
-        # Annotations which are always set:
-        {
-          # Maximum number of auto-scaled instances.  For a container with
-          # N-workers, maxScale should be less than 1/N of the maximum connection
-          # count for the Cloud SQL instance.
-          "autoscaling.knative.dev/maxScale" = var.max_scale
-
-          # Minim number of instances.
-          "autoscaling.knative.dev/minScale" = var.min_scale
-
-          # As mentioned at https://www.terraform.io/docs/configuration/resources.html#ignore_changes
-          # placeholders need to be created as the adding the key to the map is
-          # considered a change and not ignored by ignore_changes
-          "client.knative.dev/user-image"     = "placeholder"
-          "run.googleapis.com/client-name"    = "placeholder"
-          "run.googleapis.com/client-version" = "placeholder"
-          "run.googleapis.com/sandbox"        = "gvisor"
-        },
-
-        # Annotations which are only set if there is a Cloud SQL instance:
-        (var.sql_instance_connection_name != "") ? {
-          # Cloud SQL instances to auto-magically make appear in the container as
-          # Unix sockets.
-          "run.googleapis.com/cloudsql-instances" = var.sql_instance_connection_name
-        } : {},
-
-        # Annocations which are only set if we are allocating a static egress ip:
-        var.enable_static_egress_ip ? {
-          # Assign the vpc connector and indicate that it should be used for all traffic
-          "run.googleapis.com/vpc-access-egress"    = "all"
-          "run.googleapis.com/vpc-access-connector" = google_vpc_access_connector.static-ip-connector[0].id
-        } : {},
-
-        # Additional template annotations passed as a variable.
-        var.template_annotations,
-      )
+    revision                         = var.revision
+    labels                           = var.template_labels
+    annotations                      = var.template_annotations
+    timeout                          = var.timeout_seconds
+    service_account                  = google_service_account.webapp.email
+    execution_environment            = var.execution_environment
+    encryption_key                   = var.encryption_key
+    max_instance_request_concurrency = var.max_instance_request_concurrency
+    session_affinity                 = var.session_affinity
+
+    dynamic "scaling" {
+      for_each = var.scaling != null ? [var.scaling] : []
+      content {
+        min_instance_count = scaling.value["min_instance_count"]
+        max_instance_count = scaling.value["max_instance_count"]
+      }
     }
-
-    spec {
-      # Maximum number of concurrent requests to an instance before it is
-      # auto-scaled. For webapps which use connection pooling, it should be safe
-      # to set this number without regard to the connection limit of the Cloud
-      # SQL instance. This can be no greater than 80.
-      #
-      # See https://cloud.google.com/run/docs/about-concurrency.
-      container_concurrency = var.container_concurrency
-
-      service_account_name = google_service_account.webapp.email
-
-      containers {
-        image = var.image_name
-
-        resources {
-          limits = {
-            cpu    = var.cpu_limit
-            memory = var.memory_limit
+    dynamic "vpc_access" {
+      for_each = local.vpc_access != null ? [local.vpc_access] : []
+      content {
+        connector = vpc_access.value["connector"]
+        egress    = vpc_access.value["egress"]
+        dynamic "network_interfaces" {
+          for_each = vpc_access.value["network_interfaces"] != null ? [vpc_access.value["network_interfaces"]] : []
+          iterator = network_interface
+          content {
+            network    = network_interface.value["network"]
+            subnetwork = network_interface.value["subnetwork"]
           }
         }
-
+      }
+    }
+    dynamic "containers" {
+      for_each = var.containers
+      iterator = container
+      content {
+        name        = container.value["name"]
+        image       = container.value["image"]
+        command     = container.value["command"]
+        args        = container.value["args"]
+        working_dir = container.value["working_dir"]
         dynamic "env" {
-          for_each = var.environment_variables
+          for_each = container.value["env"]
           content {
-            name  = env.key
-            value = env.value
+            name  = env.value["name"]
+            value = env.value["value"]
+            dynamic "value_source" {
+              for_each = env.value["value_source"] != null ? [env.value["value_source"]] : []
+              content {
+                dynamic "secret_key_ref" {
+                  for_each = value_source.value["secret_key_ref"] != null ? [value_source.value["secret_key_ref"]] : []
+                  content {
+                    secret  = secret_key_ref.value["secret"]
+                    version = secret_key_ref.value["version"]
+                  }
+                }
+              }
+            }
           }
         }
-
-        dynamic "env" {
-          for_each = var.secrets_envars
+        dynamic "resources" {
+          for_each = container.value["resources"] != null ? [container.value["resources"]] : []
+          iterator = resource
+          content {
+            limits            = resource.value["limits"]
+            cpu_idle          = resource.value["cpu_idle"]
+            startup_cpu_boost = resource.value["startup_cpu_boost"]
+          }
+        }
+        dynamic "ports" {
+          for_each = container.value["ports"]
+          iterator = port
+          content {
+            name           = port.value["name"]
+            container_port = port.value["container_port"]
+          }
+        }
+        dynamic "volume_mounts" {
+          for_each = container.value["volume_mounts"]
+          iterator = volume_mount
+          content {
+            name       = volume_mount.value["name"]
+            mount_path = volume_mount.value["mount_path"]
+          }
+        }
+        dynamic "volume_mounts" {
+          for_each = var.mount_cloudsql_instance != null ? [1] : []
+          iterator = instance
           content {
-            name = env.value["name"]
-            value_from {
-              secret_key_ref {
-                name = env.value["id"]
-                key  = coalesce(env.value["version"], "latest")
+            name       = "cloudsql"
+            mount_path = "/cloudsql"
+          }
+        }
+        dynamic "liveness_probe" {
+          for_each = container.value["liveness_probe"] != null ? [container.value["liveness_probe"]] : []
+          content {
+            initial_delay_seconds = liveness_probe.value["initial_delay_seconds"]
+            timeout_seconds       = liveness_probe.value["timeout_seconds"]
+            period_seconds        = liveness_probe.value["period_seconds"]
+            failure_threshold     = liveness_probe.value["failure_threshold"]
+            dynamic "http_get" {
+              for_each = liveness_probe.value["http_get"] != null ? [liveness_probe.value["http_get"]] : []
+              content {
+                path = http_get.value["path"]
+                port = http_get.value["port"]
+                dynamic "http_headers" {
+                  for_each = http_get.value["http_headers"]
+                  iterator = http_header
+                  content {
+                    name  = http_header.value["name"]
+                    value = http_header.value["value"]
+                  }
+                }
+              }
+            }
+            dynamic "grpc" {
+              for_each = liveness_probe.value["grpc"] != null ? [liveness_probe.value["grpc"]] : []
+              content {
+                port    = grpc.value["port"]
+                service = grpc.value["service"]
               }
             }
           }
         }
-
-        dynamic "volume_mounts" {
-          for_each = var.secrets_volume
+        dynamic "startup_probe" {
+          for_each = container.value["startup_probe"] != null ? [container.value["startup_probe"]] : []
           content {
-            name       = volume_mounts.value["name"]
-            mount_path = volume_mounts.value["path"]
+            initial_delay_seconds = startup_probe.value["initial_delay_seconds"]
+            timeout_seconds       = startup_probe.value["timeout_seconds"]
+            period_seconds        = startup_probe.value["period_seconds"]
+            failure_threshold     = startup_probe.value["failure_threshold"]
+            dynamic "http_get" {
+              for_each = startup_probe.value["http_get"] != null ? [startup_probe.value["http_get"]] : []
+              content {
+                path = http_get.value["path"]
+                port = http_get.value["port"]
+                dynamic "http_headers" {
+                  for_each = http_get.value["http_headers"]
+                  iterator = http_header
+                  content {
+                    name  = http_header.value["name"]
+                    value = http_header.value["value"]
+                  }
+                }
+              }
+            }
+            dynamic "tcp_socket" {
+              for_each = startup_probe.value["tcp_socket"] != null ? [startup_probe.value["tcp_socket"]] : []
+              content {
+                port = tcp_socket.value["port"]
+              }
+            }
+            dynamic "grpc" {
+              for_each = startup_probe.value["grpc"] != null ? [startup_probe.value["grpc"]] : []
+              content {
+                port    = grpc.value["port"]
+                service = grpc.value["service"]
+              }
+            }
           }
         }
       }
-
-      dynamic "volumes" {
-        for_each = var.secrets_volume
-        content {
-          name = volumes.value["name"]
-          secret {
-            secret_name = volumes.value["id"]
-            items {
-              key  = coalesce(volumes.value["version"], "latest")
-              path = volumes.value["name"]
+    }
+    dynamic "volumes" {
+      for_each = var.volumes
+      iterator = volume
+      content {
+        name = volume.value["name"]
+        dynamic "secret" {
+          for_each = volume.value["secret"] != null ? [volume.value["secret"]] : []
+          content {
+            secret       = secret.value["secret"]
+            default_mode = secret.value["default_mode"]
+            dynamic "items" {
+              for_each = length(secret.value["items"]) >= 1 ? secret.value["items"] : []
+              iterator = item
+              content {
+                path    = item.value["path"]
+                version = item.value["version"]
+                mode    = item.value["mode"]
+              }
             }
           }
         }
+        dynamic "cloud_sql_instance" {
+          for_each = volume.value["cloud_sql_instance"] != null ? [volume.value["cloud_sql_instance"]] : []
+          content {
+            instances = cloud_sql_instance.value["instances"]
+          }
+        }
+      }
+    }
+    dynamic "volumes" {
+      for_each = var.mount_cloudsql_instance != null ? [1] : []
+      iterator = instance
+      content {
+        name = "cloudsql"
+        cloud_sql_instance {
+          instances = [var.mount_cloudsql_instance]
+        }
       }
-
-      timeout_seconds = var.timeout_seconds
     }
   }
-
-  traffic {
-    percent         = 100
-    latest_revision = true
-  }
-
-  lifecycle {
-    ignore_changes = [
-      # Some common annotations which we don't care about.
-      template[0].metadata[0].annotations["client.knative.dev/user-image"],
-      template[0].metadata[0].annotations["run.googleapis.com/client-name"],
-      template[0].metadata[0].annotations["run.googleapis.com/client-version"],
-      template[0].metadata[0].annotations["run.googleapis.com/operation-id"],
-      template[0].metadata[0].labels["run.googleapis.com/startupProbeType"],
-      template[0].metadata[0].annotations["run.googleapis.com/sandbox"],
-      metadata[0].annotations["run.googleapis.com/client-name"],
-      metadata[0].annotations["run.googleapis.com/client-version"],
-      metadata[0].annotations["run.googleapis.com/operation-id"],
-      metadata[0].labels["run.googleapis.com/startupProbeType"],
-
-      # These are only changed when "run.googleapis.com/launch-stage" is "BETA".
-      # It's non-trivial to make ignore_changes dependent on input variables so
-      # we always ignore these annotations even if, strictly speaking, we only
-      # need to do so is var.enable_beta_launch_stage is true.
-      metadata[0].annotations["serving.knative.dev/creator"],
-      metadata[0].annotations["serving.knative.dev/lastModifier"],
-
-      # If the allowed ingress variable is specified, ignore feedback about
-      # its status. We cannot make the presence of this ignore be dependent on
-      # "allowed_ingress" since ignore_changes needs to be a static list.
-      metadata[0].annotations["run.googleapis.com/ingress-status"],
-    ]
+  dynamic "traffic" {
+    for_each = var.traffic
+    content {
+      type     = traffic.value["type"]
+      revision = traffic.value["revision"]
+      percent  = traffic.value["percent"]
+      tag      = traffic.value["tag"]
+    }
   }
-
   depends_on = [
-    google_secret_manager_secret_iam_member.secrets_access,
     null_resource.pre_deploy_job_trigger
   ]
-
-  # Google Beta provider is required for mounting secrets AToW
-  provider = google-beta
 }
 
 # Allow unauthenticated invocations for the webapp.
-resource "google_cloud_run_service_iam_member" "webapp_all_users_invoker" {
+resource "google_cloud_run_v2_service_iam_member" "webapp_all_users_invoker" {
   count = var.allow_unauthenticated_invocations ? 1 : 0
 
-  location = google_cloud_run_service.webapp.location
-  project  = google_cloud_run_service.webapp.project
-  service  = google_cloud_run_service.webapp.name
+  location = google_cloud_run_v2_service.webapp.location
+  project  = google_cloud_run_v2_service.webapp.project
+  name     = google_cloud_run_v2_service.webapp.name
   role     = "roles/run.invoker"
   member   = "allUsers"
 }
 
-# Domain mapping for default web-application. Only present if the domain is
-# verified. We use the custom DNS name of the webapp if provided but otherwise
-# the webapp is hosted at [SERVICE NAME].[PROJECT DNS ZONE]. We can't create
-# the domain mapping if the domain is *not* verified because Google won't let
-# us.
-resource "google_cloud_run_domain_mapping" "webapp" {
-  for_each = toset(var.ingress_style == "domain-mapping" ? local.dns_names : [])
-
-  location = var.cloud_run_region
-  name     = each.key
-
-  metadata {
-    # For managed Cloud Run, the namespace *must* be the project name.
-    namespace = var.project
-  }
-
-  spec {
-    route_name = google_cloud_run_service.webapp.name
-  }
-}
-
 module "uptime_monitoring" {
   for_each = local.monitor_hosts
 
-  source                      = "git::https://gitlab.developers.cam.ac.uk/uis/devops/infra/terraform/gcp-site-monitoring.git?ref=v3"
+  source  = "gitlab.developers.cam.ac.uk/uis/gcp-site-monitoring/devops"
+  version = "~> 3.2"
+
   host                        = each.value.host
   project                     = var.project
-  alert_email_addresses       = var.alerting_email_address != "" ? [var.alerting_email_address] : []
-  alert_notification_channels = var.alert_notification_channels
+  monitoring_scoping_project  = coalesce(var.monitoring_scoping_project, var.project)
+  alert_notification_channels = var.alerting_notification_channels
 
   uptime_check = {
     # Accept either e.g. "60s" or 60 for timeout and periods for compatibility
@@ -258,117 +279,156 @@ module "uptime_monitoring" {
     path                      = var.monitoring_path
     success_threshold_percent = var.alerting_success_threshold_percent
 
-    alert_enabled = var.alerting_enabled
+    alert_enabled = var.enable_alerting
   }
 
   tls_check = {
-    alert_enabled = var.alerting_enabled
+    alert_enabled = var.enable_alerting
   }
 
   # If required, configure the monitoring to use an authentication proxy, allowing
   # the monitoring checks to invoke the cloud run instance.
   authentication_proxy = {
-    enabled                   = each.value.enable_auth_proxy
-    cloud_run_project         = google_cloud_run_service.webapp.project
-    cloud_run_service_name    = google_cloud_run_service.webapp.name
-    cloud_run_region          = var.cloud_run_region
-    egress_connector          = each.value.enable_egress_connector ? local.auth_proxy_egress_connector : ""
-    egress_connector_settings = each.value.enable_egress_connector && local.auth_proxy_egress_connector != "" ? "ALL_TRAFFIC" : null
-  }
-
-  providers = {
-    google = google.stackdriver
+    enabled                     = each.value.enable_auth_proxy
+    cloud_run_project           = google_cloud_run_v2_service.webapp.project
+    cloud_run_service_name      = google_cloud_run_v2_service.webapp.name
+    cloud_run_region            = var.region
+    egress_connector            = each.value.enable_egress_connector && local.auth_proxy_vpc_access != null ? local.auth_proxy_vpc_access.connector : null
+    egress_connector_settings   = each.value.enable_egress_connector && local.auth_proxy_vpc_access != null ? local.auth_proxy_vpc_access.egress : null
+    source_bucket_force_destroy = true
   }
 }
 
-# This extracts information about any currently running Cloud Run revision before
-# starting the plan walk. This is current behaviour, but may change in future see
-# https://github.com/hashicorp/terraform/issues/17034.
-data "google_cloud_run_service" "webapp" {
-  name     = var.name
-  location = var.cloud_run_region
-}
-
-# Configure a Cloud Run Job which will be executed before the deployment of the google_cloud_run_service.webapp
+# Configure a Cloud Run Job which will be executed before the deployment of the google_cloud_run_v2_service.webapp
 # resource. This is primarily useful to run database migrations, however other use cases may exist.
 resource "google_cloud_run_v2_job" "pre_deploy" {
   count = var.enable_pre_deploy_job ? 1 : 0
 
-  name     = "${var.name}-pre-deploy"
-  location = var.cloud_run_region
-  project  = var.project
+  name         = "${var.name}-pre-deploy"
+  location     = var.region
+  project      = var.project
+  launch_stage = var.pre_deploy_job_launch_stage
 
   template {
+    labels      = var.pre_deploy_job_labels
+    annotations = var.pre_deploy_job_annotations
+    parallelism = var.pre_deploy_job_parallelism
+    task_count  = var.pre_deploy_job_task_count
     template {
-      service_account = google_service_account.webapp.email
+      service_account       = google_service_account.webapp.email
+      timeout               = var.pre_deploy_job_timeout
+      execution_environment = var.pre_deploy_job_execution_environment
+      encryption_key        = var.pre_deploy_job_encryption_key
+      max_retries           = var.pre_deploy_job_max_retries
 
-      dynamic "volumes" {
-        for_each = var.sql_instance_connection_name != "" ? [1] : []
-
-        content {
-          name = "cloudsql"
-          cloud_sql_instance {
-            instances = [var.sql_instance_connection_name]
+      containers {
+        name        = var.pre_deploy_job_container.name
+        image       = var.pre_deploy_job_container.image
+        command     = var.pre_deploy_job_container.command
+        args        = var.pre_deploy_job_container.args
+        working_dir = var.pre_deploy_job_container.working_dir
+        dynamic "env" {
+          for_each = var.pre_deploy_job_container.env
+          content {
+            name  = env.value["name"]
+            value = env.value["value"]
+            dynamic "value_source" {
+              for_each = env.value["value_source"] != null ? [env.value["value_source"]] : []
+              content {
+                dynamic "secret_key_ref" {
+                  for_each = value_source.value["secret_key_ref"] != null ? [value_source.value["secret_key_ref"]] : []
+                  content {
+                    secret  = secret_key_ref.value["secret"]
+                    version = secret_key_ref.value["version"]
+                  }
+                }
+              }
+            }
           }
         }
-      }
-      dynamic "volumes" {
-        for_each = var.secrets_volume
-
-        content {
-          name = volumes.value["name"]
-          secret {
-            secret = volumes.value["id"]
-            items {
-              version = coalesce(volumes.value["version"], "latest")
-              path    = volumes.value["name"]
-              mode    = 0
-            }
+        dynamic "resources" {
+          for_each = var.pre_deploy_job_container.resources != null ? [var.pre_deploy_job_container.resources] : []
+          iterator = resource
+          content {
+            limits = resource.value["limits"]
           }
         }
-      }
-
-      containers {
-        image   = local.pre_deploy_job_image_name
-        command = var.pre_deploy_job_command
-        args    = var.pre_deploy_job_args
-
-        dynamic "env" {
-          for_each = local.pre_deploy_job_environment_variables
-
+        dynamic "ports" {
+          for_each = var.pre_deploy_job_container.ports
+          iterator = port
           content {
-            name  = env.key
-            value = env.value
+            name           = port.value["name"]
+            container_port = port.value["container_port"]
           }
         }
-        dynamic "env" {
-          for_each = var.secrets_envars
-
+        dynamic "volume_mounts" {
+          for_each = var.pre_deploy_job_container.volume_mounts
+          iterator = volume_mount
           content {
-            name = env.value["name"]
-            value_source {
-              secret_key_ref {
-                secret  = env.value["id"]
-                version = coalesce(env.value["version"], "latest")
-              }
-            }
+            name       = volume_mount.value["name"]
+            mount_path = volume_mount.value["mount_path"]
           }
         }
-
         dynamic "volume_mounts" {
-          for_each = var.sql_instance_connection_name != "" ? [1] : []
-
+          for_each = var.pre_deploy_job_mount_cloudsql_instance != null ? [1] : []
+          iterator = instance
           content {
             name       = "cloudsql"
             mount_path = "/cloudsql"
           }
         }
-        dynamic "volume_mounts" {
-          for_each = var.secrets_volume
-
-          content {
-            name       = volume_mounts.value["name"]
-            mount_path = volume_mounts.value["path"]
+      }
+      dynamic "volumes" {
+        for_each = var.pre_deploy_job_volumes
+        iterator = volume
+        content {
+          name = volume.value["name"]
+          dynamic "secret" {
+            for_each = volume.value["secret"] != null ? [volume.value["secret"]] : []
+            content {
+              secret       = secret.value["secret"]
+              default_mode = secret.value["default_mode"]
+              dynamic "items" {
+                for_each = secret.value["items"]
+                iterator = item
+                content {
+                  path    = item.value["path"]
+                  version = item.value["version"]
+                  mode    = item.value["mode"]
+                }
+              }
+            }
+          }
+          dynamic "cloud_sql_instance" {
+            for_each = volume.value["cloud_sql_instance"] != null ? [volume.value["cloud_sql_instance"]] : []
+            content {
+              instances = cloud_sql_instance.value["instances"]
+            }
+          }
+        }
+      }
+      dynamic "volumes" {
+        for_each = var.pre_deploy_job_mount_cloudsql_instance != null ? [1] : []
+        iterator = instance
+        content {
+          name = "cloudsql"
+          cloud_sql_instance {
+            instances = [var.pre_deploy_job_mount_cloudsql_instance]
+          }
+        }
+      }
+      dynamic "vpc_access" {
+        for_each = var.pre_deploy_job_vpc_access != null ? [var.pre_deploy_job_vpc_access] : []
+        content {
+          connector = vpc_access.value["connector"]
+          egress    = vpc_access.value["egress"]
+          dynamic "network_interfaces" {
+            for_each = vpc_access.value["network_interfaces"] != null ? [vpc_access.value["network_interfaces"]] : []
+            iterator = network_interface
+            content {
+              network    = network_interface.value["network"]
+              subnetwork = network_interface.value["subnetwork"]
+            }
           }
         }
       }
@@ -376,20 +436,20 @@ resource "google_cloud_run_v2_job" "pre_deploy" {
   }
 }
 
-# Trigger the pre-deploy job using the gcloud CLI whenever the var.image_name value changes.
+# Trigger the pre-deploy job using the gcloud CLI whenever the var.pre_deploy_job_container.image value changes.
 resource "null_resource" "pre_deploy_job_trigger" {
-  count = var.enable_pre_deploy_job && var.trigger_pre_deploy_job ? 1 : 0
+  count = var.enable_pre_deploy_job && var.pre_deploy_job_trigger ? 1 : 0
 
   triggers = merge({
-    image_name = var.image_name
-    }, var.force_pre_deploy_job ? {
+    image_name = var.pre_deploy_job_container.image
+    }, var.pre_deploy_job_force ? {
     timestamp = timestamp()
   } : {})
 
   provisioner "local-exec" {
     command = <<EOI
 gcloud --project ${var.project} run jobs execute \
-  --region ${var.cloud_run_region} --wait ${google_cloud_run_v2_job.pre_deploy[0].name}
+  --region ${var.region} --wait ${google_cloud_run_v2_job.pre_deploy[0].name}
 EOI
 
     environment = {
@@ -403,4 +463,3 @@ EOI
     google_cloud_run_v2_job.pre_deploy
   ]
 }
-
diff --git a/outputs.tf b/outputs.tf
index 30279816fc485375e946fe2f62ac3599cb7aee5e..3cce8b2b54bb5cb5bc2027897185f1c9ec06cbd4 100644
--- a/outputs.tf
+++ b/outputs.tf
@@ -1,8 +1,6 @@
-# outputs.tf defines outputs for the module.
-
 output "service" {
   description = "Webapp Cloud Run service resource"
-  value       = google_cloud_run_service.webapp
+  value       = google_cloud_run_v2_service.webapp
 }
 
 output "service_account" {
@@ -10,41 +8,20 @@ output "service_account" {
   value       = google_service_account.webapp
 }
 
-output "load_balancer" {
-  description = "Load balancer for the webapp"
-  value       = module.webapp_http_load_balancer
-}
-
-output "domain_mapping_present" {
-  description = "Flag indicating if a domain mapping is present for the webapp"
-  value       = local.domain_mapping_present
-}
-
-output "domain_mapping_resource_record" {
-  value       = try(local.dns_records[0], {})
-  description = <<EOI
-    Deprecated. Use dns_resource_records output instead.
-
-    Resource record for DNS hostnames. If a domain mapping or load balancing is configured
-    the following keys will be set: type and rrdata. If no mapping is configured, the
-    map will be empty.
-  EOI
+output "network_endpoint_group" {
+  description = "Network endpoint group for the load balancer."
+  value       = try(google_compute_region_network_endpoint_group.webapp[0], null)
 }
 
-output "dns_resource_records" {
-  value       = local.dns_records
-  description = <<EOI
-    List of DNS records for web application. Each element is an object with "type" and "rrdata"
-    keys.
-  EOI
+output "ssl_policy" {
+  description = "The ssl_policy object, if one is being created."
+  value       = try(google_compute_ssl_policy.default[0], null)
 }
 
-output "domain_mapping_dns_name" {
-  description = <<EOI
-DNS name (minus trailing dot) of webapp. Will be blank if no DNS name
-configured.
-EOI
-  value       = var.dns_name
+output "load_balancer" {
+  description = "Load balancer for the webapp"
+  value       = try(module.webapp_http_load_balancer[0], null)
+  sensitive   = true
 }
 
 output "static_egress_ip" {
@@ -52,5 +29,5 @@ output "static_egress_ip" {
 The static egress IP assigned to this cloud run instance. Only populated
 if the variable `enable_static_egress_ip` is true.
 EOI
-  value       = var.enable_static_egress_ip ? google_compute_address.static-ip[0].address : ""
+  value       = var.enable_static_egress_ip ? google_compute_address.static_ip[0].address : ""
 }
diff --git a/run_tests.sh b/run_tests.sh
new file mode 100755
index 0000000000000000000000000000000000000000..0bdc75a69bb3537cfb7b6cab36c8710782719421
--- /dev/null
+++ b/run_tests.sh
@@ -0,0 +1,99 @@
+#! /usr/bin/env bash
+
+set -e
+shopt -s expand_aliases
+
+usage() {
+    cat << EOF
+Usage:
+    ./run_tests.sh
+    ./run_tests.sh -t tests/default.tftest.hcl
+    ./run_tests.sh -t tests/default.tftest.hcl -t tests/cloudsql.tftest.hcl
+
+Options:
+    -c                      Run in CI/CD job mode meaning that terraform is executed directly, rather than via
+                            docker compose.
+    -t <test file path>     Execute specific test files only (all tests are executed by default).
+                            This option can be specified multiple times.
+    -v                      Add the verbose switch to the terraform test command.
+EOF
+}
+
+# The sed commands in this script must use gnu-sed on mac as the default sed on MacOS is a steaming pile of...
+if [[ "$(uname)" == "Darwin" ]]; then
+    if [ ! "$(command -v gsed)" ]; then
+        echo -e "This script requires gnu-sed on Mac OS. Install it with 'brew install gnu-sed'"
+        exit 1
+    fi
+    alias sed="gsed"
+fi
+
+cleanup() {
+    # Uncomment the prevent_destroy lifecycle argument if it has been commented out by the command further in the
+    # script.
+    sed -i 's/^    # prevent_destroy = true/    prevent_destroy = true/g' static_egress_ip.tf
+
+    # Remove the lockfile file as it is only necessary to enable the test run.
+    if [[ -f .terraform.lock.hcl ]]; then
+        rm .terraform.lock.hcl
+    fi
+
+    # Ensure the original versions.tf.json is restored.
+    if [[ -f versions.tf.json.bak ]]; then
+        mv versions.tf.json.bak versions.tf.json
+    fi
+}
+
+trap 'cleanup' EXIT INT TERM
+
+while getopts 'cht:v' option; do
+    case $option in
+        c) ci_run=1;;
+        h) usage; exit;;
+        t) tests+=("$OPTARG");;
+        v) verbose=1;;
+        *) all_tests=1;;
+    esac
+done
+shift $((OPTIND-1))
+
+if [[ ${#tests[@]} -gt 0 && -n $all_tests ]]; then
+    echo -e "Error: Cannot specify both -a and -t together.\n"
+    usage
+    exit 1
+fi
+
+test_args=("--var-file=tests/tests.tfvars")
+
+if [[ "${#tests[@]}" -gt 0 ]]; then
+    for test in "${tests[@]}"; do
+        test_args+=("--filter=$test")
+    done
+fi
+
+if [[ -n $verbose ]]; then
+    test_args+=(--verbose)
+fi
+
+# Comment out the prevent_destroy lifecycle argument otherwise the tests will fail as they cannot tear down the
+# resource.
+sed -i 's/^    prevent_destroy = true/    # prevent_destroy = true/g' static_egress_ip.tf
+
+# The terraform testing framework doesn't yet have a nice way to target specific versions of a provider. Therefore, we
+# are having to programatically change the Google provider versions in the versions.tf.json file for the time being.
+if [[ -n $GOOGLE_PROVIDER_VERSION_CONSTRAINT ]]; then
+    mv versions.tf.json versions.tf.json.bak
+    jq ".terraform.required_providers.google.version |= \"$GOOGLE_PROVIDER_VERSION_CONSTRAINT\"" versions.tf.json.bak \
+        > versions.tf.json
+fi
+
+if [[ -n $ci_run ]]; then
+    terraform init
+    terraform test "${test_args[@]}"
+else
+    docker compose run --rm test "$(cat << EOF
+terraform init
+terraform test ${test_args[@]}
+EOF
+    )"
+fi
diff --git a/secrets.tf b/secrets.tf
deleted file mode 100644
index bf1da4fa796bb1bad7e0ef4c5ed5ab72d6f5450f..0000000000000000000000000000000000000000
--- a/secrets.tf
+++ /dev/null
@@ -1,22 +0,0 @@
-# secrets.tf 
-# The Cloud Run service accounts need access to the Secret Manager secrets
-# to be able to mount them as volumes or envars.
-
-locals {
-  # List containing Secret Manager secrets IDs used in `var.secrets_volume`
-  # and `var.secrets_envars`
-  secret_ids = toset([
-    for secret in concat(var.secrets_volume, var.secrets_envars) : secret.id
-  ])
-}
-
-# Grant the Cloud Run service account "secretmanager.secretAccessor" role
-# to be able to access Secret Manager secrets to be mounted as volumes 
-# or environment variables.
-resource "google_secret_manager_secret_iam_member" "secrets_access" {
-  for_each  = local.secret_ids
-  project   = var.project
-  secret_id = each.key
-  role      = "roles/secretmanager.secretAccessor"
-  member    = "serviceAccount:${google_service_account.webapp.email}"
-}
diff --git a/static_egress_ip.tf b/static_egress_ip.tf
index b3538218edef82ea869b153d248865d4341cf170..a150db50ba825db32a8d43c97028debc9a6a5a7e 100644
--- a/static_egress_ip.tf
+++ b/static_egress_ip.tf
@@ -1,46 +1,48 @@
-# Create a subnet, vpc access connector, router and nat to allow a cloud run instance
-# to be assigned a static ip.
-# The VPC connector is assigned to the cloud run's annotations.
-# This follows the steps detailed here: https://cloud.google.com/run/docs/configuring/static-outbound-ip
+# Create a subnet, vpc access connector, router,  and nat to allow a cloud run instance to be assigned a static ip. The
+# VPC connector is assigned to the cloud run's annotations. This follows the steps detailed here:
+# https://cloud.google.com/run/docs/configuring/static-outbound-ip
 
-resource "google_compute_subnetwork" "isolated-subnet" {
-  count         = var.enable_static_egress_ip ? 1 : 0
-  name          = "${var.name}-isolated-subnet-for-static-ip"
-  ip_cidr_range = var.static_egress_ip_cidr_range
-  network       = "default"
-  region        = var.cloud_run_region
+# trivy:ignore:AVD-GCP-0029
+# kics-scan disable=40430747-442d-450a-a34f-dc57149f4609
+resource "google_compute_subnetwork" "vpc_connector" {
+  count = local.create_vpc_connector ? 1 : 0
+
+  name                     = "${var.name}-vpc-connector"
+  project                  = var.project
+  ip_cidr_range            = var.static_egress_ip_cidr_range
+  network                  = "default"
+  region                   = var.region
+  private_ip_google_access = true
 }
 
-resource "google_vpc_access_connector" "static-ip-connector" {
-  count = var.enable_static_egress_ip ? 1 : 0
-  name  = "${var.name}-connector"
-  subnet {
-    name = google_compute_subnetwork.isolated-subnet[0].name
-  }
-  region = var.cloud_run_region
+resource "google_vpc_access_connector" "main" {
+  count = local.create_vpc_connector ? 1 : 0
 
+  name           = "${var.name}-conn"
+  project        = var.project
+  region         = var.region
   max_throughput = var.vpc_access_connector_max_throughput
 
-  lifecycle {
-    ignore_changes = [
-      network,
-    ]
+  subnet {
+    name = google_compute_subnetwork.vpc_connector[0].name
   }
-
-  provider = google-beta
 }
 
-resource "google_compute_router" "static-ip-router" {
-  count   = var.enable_static_egress_ip ? 1 : 0
+resource "google_compute_router" "static_ip" {
+  count = var.enable_static_egress_ip ? 1 : 0
+
   name    = "${var.name}-ip-router"
+  project = var.project
   network = "default"
-  region  = var.cloud_run_region
+  region  = var.region
 }
 
-resource "google_compute_address" "static-ip" {
-  count  = var.enable_static_egress_ip ? 1 : 0
-  name   = "${var.name}-static-ip"
-  region = var.cloud_run_region
+resource "google_compute_address" "static_ip" {
+  count = var.enable_static_egress_ip ? 1 : 0
+
+  name    = "${var.name}-static-ip"
+  project = var.project
+  region  = var.region
 
   # We do not expect that static IPs should be regularly removed and recreated,
   # as it's likely that they will be used within firewall configuration outside
@@ -51,18 +53,43 @@ resource "google_compute_address" "static-ip" {
   }
 }
 
-resource "google_compute_router_nat" "static-ip-nat" {
-  count                  = var.enable_static_egress_ip ? 1 : 0
-  name                   = "${var.name}-static-ip-nat"
-  router                 = google_compute_router.static-ip-router[0].name
-  nat_ips                = [google_compute_address.static-ip[0].self_link]
-  nat_ip_allocate_option = "MANUAL_ONLY"
-  region                 = var.cloud_run_region
-  min_ports_per_vm       = var.min_ports_per_vm
+resource "google_compute_router_nat" "static_ip" {
+  count = var.enable_static_egress_ip ? 1 : 0
 
+  name                               = "${var.name}-static-ip-nat"
+  project                            = var.project
+  router                             = google_compute_router.static_ip[0].name
+  nat_ips                            = [google_compute_address.static_ip[0].self_link]
+  nat_ip_allocate_option             = "MANUAL_ONLY"
+  region                             = var.region
+  min_ports_per_vm                   = var.min_ports_per_vm
   source_subnetwork_ip_ranges_to_nat = "LIST_OF_SUBNETWORKS"
+
   subnetwork {
-    name                    = google_compute_subnetwork.isolated-subnet[0].id
+    name = (
+      local.create_vpc_connector ? google_compute_subnetwork.vpc_connector[0].id : var.static_egress_ip_subnetwork_id
+    )
     source_ip_ranges_to_nat = ["ALL_IP_RANGES"]
   }
-}
\ No newline at end of file
+}
+
+moved {
+  from = google_compute_subnetwork.isolated-subnet
+  to   = google_compute_subnetwork.vpc_connector
+}
+moved {
+  from = google_vpc_access_connector.static-ip-connector
+  to   = google_vpc_access_connector.main
+}
+moved {
+  from = google_compute_router.static-ip-router
+  to   = google_compute_router.static_ip
+}
+moved {
+  from = google_compute_address.static-ip
+  to   = google_compute_address.static_ip
+}
+moved {
+  from = google_compute_router_nat.static-ip-nat
+  to   = google_compute_router_nat.static_ip
+}
diff --git a/tests/cleanup.sh b/tests/cleanup.sh
new file mode 100755
index 0000000000000000000000000000000000000000..a7ba70850ba12f1868a6eccf4c3f5e88d9e74c4b
--- /dev/null
+++ b/tests/cleanup.sh
@@ -0,0 +1,221 @@
+#! /usr/bin/env bash
+
+set -e
+
+current_verbosity=$(gcloud config get core/verbosity)
+gcloud config set core/verbosity error
+
+cleanup() {
+    gcloud config unset auth/impersonate_service_account
+    gcloud config set core/verbosity "$current_verbosity"
+}
+
+trap 'cleanup' EXIT INT TERM
+
+TEST_PREFIX="test-rapp"
+GCP_PROJECT="infra-testing-int-e2395220"
+GCP_PROJECT_META="infra-testing-meta-21f09a44"
+GCP_REGION="europe-west2"
+GCP_SERVICE_ACCOUNT="terraform-deploy@infra-testing-int-e2395220.iam.gserviceaccount.com"
+
+gcloud config set auth/impersonate_service_account $GCP_SERVICE_ACCOUNT
+
+echo "Cleaning up Cloud Run services..."
+mapfile -t services < <(
+    gcloud --project="$GCP_PROJECT" run services --region="$GCP_REGION" list \
+        --filter="metadata.name ~ ${TEST_PREFIX}.*" --format="value(metadata.name)"
+)
+
+for service in "${services[@]}"; do
+    echo "Removing Cloud Run service '${service}'"
+    gcloud --project="$GCP_PROJECT" run services --region="$GCP_REGION" delete "$service" --quiet
+done
+
+echo "Cleaning up IAM service accounts..."
+mapfile -t service_accounts < <(
+    gcloud --project="$GCP_PROJECT" iam service-accounts list \
+        --filter="email ~ ${TEST_PREFIX}[0-9a-fA-F]+?-run|${TEST_PREFIX}[0-9a-fA-F]+?-uptime" \
+        --format="value(email)"
+)
+
+for account in "${service_accounts[@]}"; do
+    gcloud --project="$GCP_PROJECT" iam service-accounts delete "$account" --quiet
+done
+
+echo "Cleaning up Cloud Functions..."
+mapfile -t functions < <(
+    gcloud --project="$GCP_PROJECT" functions list \
+        --filter="name ~ .*${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for function in "${functions[@]}"; do
+    gcloud --project="$GCP_PROJECT" functions delete --region="$GCP_REGION" "$function" --quiet
+done
+
+echo "Cleaning up Cloud Storage buckets..."
+mapfile -t buckets < <(
+    gcloud --project="$GCP_PROJECT" storage buckets list \
+        --filter="name ~ ${TEST_PREFIX::8}-uptime" --format="value(storage_url)"
+)
+
+for bucket in "${buckets[@]}"; do
+    gcloud --project="$GCP_PROJECT" storage rm -r "$bucket" --quiet
+done
+
+echo "Cleaning up Cloud Monitoring resources..."
+mapfile -t alert_policies < <(
+    gcloud alpha --project="$GCP_PROJECT" monitoring policies list \
+        --filter="displayName ~ Uptime\scheck\sfor\s${TEST_PREFIX}[0-9a-fA-F]+?-.*|SSL\sexpiry\scheck\sfor\s${TEST_PREFIX}[0-9a-fA-F]+?-.*" \
+        --format="value(name)"
+)
+
+for policy in "${alert_policies[@]}"; do
+    gcloud alpha monitoring policies delete "$policy" --quiet
+done
+
+mapfile -t alert_policies_meta < <(
+    gcloud alpha --project="$GCP_PROJECT_META" monitoring policies list \
+        --filter="displayName ~ Uptime\scheck\sfor\s${TEST_PREFIX}[0-9a-fA-F]+?-.*|SSL\sexpiry\scheck\sfor\s${TEST_PREFIX}[0-9a-fA-F]+?-.*" \
+        --format="value(name)"
+)
+
+for policy_meta in "${alert_policies_meta[@]}"; do
+    gcloud alpha monitoring policies delete "$policy_meta" --quiet
+done
+
+mapfile -t uptime_checks < <(
+    gcloud --project="$GCP_PROJECT" monitoring uptime list-configs \
+        --filter="displayName ~ ${TEST_PREFIX}[0-9a-fA-F]+?-.*" --format="value(name)"
+)
+
+for check in "${uptime_checks[@]}"; do
+    gcloud monitoring uptime delete "$check" --quiet
+done
+
+echo "Cleaning up Cloud Run jobs..."
+mapfile -t jobs < <(
+    gcloud --project="$GCP_PROJECT" run jobs list \
+        --filter="metadata.name ~ ${TEST_PREFIX}.*" --format="value(metadata.name)"
+)
+
+for job in "${jobs[@]}"; do
+    gcloud --project="$GCP_PROJECT" run jobs --region="$GCP_REGION" delete "$job" --quiet
+done
+
+echo "Cleaning up load balancer resources..."
+mapfile -t http_proxies < <(
+    gcloud --project="$GCP_PROJECT" compute target-http-proxies list \
+        --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for http_proxy in "${http_proxies[@]}"; do
+    gcloud --project="$GCP_PROJECT" compute target-http-proxies delete "$http_proxy" --global --quiet
+done
+
+mapfile -t https_proxies < <(
+    gcloud --project="$GCP_PROJECT" compute target-https-proxies list \
+        --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for https_proxy in "${https_proxies[@]}"; do
+    gcloud --project="$GCP_PROJECT" compute target-https-proxies delete "$https_proxy" --global --quiet
+done
+
+mapfile -t ssl_certs < <(
+    gcloud --project="$GCP_PROJECT" compute ssl-certificates list \
+        --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for cert in "${ssl_certs[@]}"; do
+    gcloud --project="$GCP_PROJECT" compute ssl-certificates delete "$cert" --global --quiet
+done
+
+mapfile -t url_maps < <(
+    gcloud --project="$GCP_PROJECT" compute url-maps list \
+        --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for url_map in "${url_maps[@]}"; do
+    gcloud --project="$GCP_PROJECT" compute url-maps delete "$url_map" --global --quiet
+done
+
+mapfile -t backend_services < <(
+    gcloud --project="$GCP_PROJECT" compute backend-services list \
+        --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for service in "${backend_services[@]}"; do
+    gcloud --project="$GCP_PROJECT" compute backend-services delete "$service" --global --quiet
+done
+
+mapfile -t serverless_negs < <(
+    gcloud --project="$GCP_PROJECT" compute network-endpoint-groups list \
+        --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for neg in "${serverless_negs[@]}"; do
+    gcloud --project="$GCP_PROJECT" compute network-endpoint-groups delete "$neg" --region="$GCP_REGION" --quiet
+done
+
+mapfile -t ssl_policies < <(
+    gcloud --project="$GCP_PROJECT" compute ssl-policies list \
+        --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for ssl_policy in "${ssl_policies[@]}"; do
+    gcloud --project="$GCP_PROJECT" compute ssl-policies delete "$ssl_policy" --global --quiet
+done
+
+echo "Cleaning up network resources..."
+mapfile -t connectors < <(
+    gcloud --project="$GCP_PROJECT" compute networks vpc-access connectors list --region="$GCP_REGION" \
+        --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for conn in "${connectors[@]}"; do
+    gcloud --project="$GCP_PROJECT" compute networks vpc-access connectors delete "$conn" \
+        --region="$GCP_REGION" --quiet
+done
+
+mapfile -t routers < <(
+    gcloud --project="$GCP_PROJECT" compute routers list --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for router in "${routers[@]}"; do
+    gcloud --project="$GCP_PROJECT" compute routers delete "$router" --region="$GCP_REGION" --quiet
+done
+
+mapfile -t addresses < <(
+    gcloud --project="$GCP_PROJECT" compute addresses list --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)" \
+        --global
+)
+
+for address in "${addresses[@]}"; do
+    gcloud --project="$GCP_PROJECT" compute addresses delete "$address" --global --quiet
+done
+
+mapfile -t subnets < <(
+    gcloud --project="$GCP_PROJECT" compute networks subnets list \
+        --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for subnet in "${subnets[@]}"; do
+    gcloud --project="$GCP_PROJECT" compute networks subnets delete "$subnet" --region="$GCP_REGION" --quiet
+done
+
+echo "Cleaning up test setup resources..."
+mapfile -t instances < <(
+    gcloud --project="$GCP_PROJECT" sql instances list --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for instance in "${instances[@]}"; do
+    gcloud --project="$GCP_PROJECT" sql instances delete "$instance" --quiet
+done
+
+mapfile -t secrets < <(
+    gcloud --project="$GCP_PROJECT" secrets list --filter="name ~ ${TEST_PREFIX}.*" --format="value(name)"
+)
+
+for secret in "${secrets[@]}"; do
+    gcloud --project="$GCP_PROJECT" secrets delete "$secret" --quiet
+done
diff --git a/tests/cloud_run_service.tftest.hcl b/tests/cloud_run_service.tftest.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..5cab38f235bd360dbb406590599007ddf72742c4
--- /dev/null
+++ b/tests/cloud_run_service.tftest.hcl
@@ -0,0 +1,320 @@
+run "setup" {
+  variables {
+    create_test_secrets = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_service_with_default_variable_values" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.ingress == "INGRESS_TRAFFIC_ALL"
+    error_message = "Ingress should be 'INGRESS_TRAFFIC_ALL'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.launch_stage == "GA"
+    error_message = "Launch stage should be 'GA'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.location == "europe-west2"
+    error_message = "Location should be 'europe-west2'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].execution_environment == "EXECUTION_ENVIRONMENT_GEN1"
+    error_message = "Execution environment should be 'EXECUTION_ENVIRONMENT_GEN1'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].timeout == "300s"
+    error_message = "Timeout should be '300s'."
+  }
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.template[0].containers) == 1
+    error_message = "A single container block should be defined."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service_iam_member.webapp_all_users_invoker[0].role == "roles/run.invoker"
+    error_message = "google_cloud_run_v2_service_iam_member.webapp_all_users_invoker.role should be 'roles/run.invoker'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service_iam_member.webapp_all_users_invoker[0].member == "allUsers"
+    error_message = "google_cloud_run_v2_service_iam_member.webapp_all_users_invoker.member should be 'allUsers'."
+  }
+  assert {
+    condition     = google_service_account.webapp.name != null
+    error_message = "A dedicated service account should be created for the Cloud Run service."
+  }
+}
+
+run "test_container_resources_block" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+        resources = {
+          startup_cpu_boost = true
+          cpu_idle          = true
+          limits = {
+            cpu = 2
+          }
+        }
+      }
+    }
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].resources[0].startup_cpu_boost == true
+    error_message = "startup_cpu_boost should be 'true'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].resources[0].cpu_idle == true
+    error_message = "cpu_idle should be 'true'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].resources[0].limits.cpu == "2"
+    error_message = "limits.cpu should be '2'."
+  }
+}
+
+run "setup_secret_iam" {
+  variables {
+    create_test_secrets     = true
+    create_test_secrets_iam = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_env_vars_and_secrets" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+        env = [
+          {
+            name  = "HELLO",
+            value = "WORLD!"
+          },
+          {
+            name = "SECRET",
+            value_source = {
+              secret_key_ref = {
+                secret = run.setup.secret_ids[0]
+              }
+            }
+          }
+        ]
+        volume_mounts = [
+          {
+            name       = "secret-volume",
+            mount_path = "/secrets"
+          }
+        ]
+      }
+    }
+    volumes = [
+      {
+        name = "secret-volume",
+        secret = {
+          secret = run.setup.secret_ids[1]
+          items = [
+            {
+              version = "1",
+              path    = "my-secret"
+            }
+          ]
+        }
+      }
+    ]
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].env[0].name == "HELLO"
+    error_message = "A standard environment variable with the name 'HELLO' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].env[0].value == "WORLD!"
+    error_message = "A standard environment variable with the value 'WORLD!' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].env[1].name == "SECRET"
+    error_message = "A secret-backed environment variable with the name 'SECRET' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].env[1].value_source[0].secret_key_ref[0].secret == run.setup.secret_ids[0]
+    error_message = "A secret-backed environment variable referencing the 'run.setup.secret_ids[0]' secret should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].name == "secret-volume"
+    error_message = "A volume mount with the name 'secret-volume' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].mount_path == "/secrets"
+    error_message = "A volume mount with the mount path '/secrets' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].name == "secret-volume"
+    error_message = "A volume with the name 'secret-volume' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].secret[0].secret == run.setup.secret_ids[1]
+    error_message = "A secret-backed volume referencing the 'run.setup.secret_ids[0]' secret should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].secret[0].items[0].version == "1"
+    error_message = "Secret version should be '1'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].secret[0].items[0].path == "my-secret"
+    error_message = "Secret path should be 'my-secret'."
+  }
+}
+
+run "test_service_with_multiple_containers" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp1 = {
+        name  = "webapp-1"
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+        ports = [
+          {
+            container_port = 8080
+          }
+        ]
+      }
+      webapp2 = {
+        name  = "webapp-2"
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+  }
+
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.template[0].containers) == 2
+    error_message = "The Cloud Run service should have two containers defined."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].name == "webapp-1"
+    error_message = "The first container definition should be configured with the name 'webapp-1'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].ports[0].container_port == 8080
+    error_message = "The first container definition should be configured with a single container_port of 8080."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[1].name == "webapp-2"
+    error_message = "The second container definition should be configured with the name 'webapp-2'."
+  }
+}
+
+run "test_traffic_distribution_across_multiple_revisions_initial_deploy" {
+  variables {
+    name     = run.setup.random_name
+    revision = "${run.setup.random_name}-v1-0-0"
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    traffic = [
+      {
+        type     = "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+        revision = "${run.setup.random_name}-v1-0-0"
+        percent  = 100
+      }
+    ]
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].revision == "${run.setup.random_name}-v1-0-0"
+    error_message = "The service revision name should be '${run.setup.random_name}-v1-0-0'."
+  }
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.traffic) == 1
+    error_message = "There should be a single traffic block configured."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[0].type == "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+    error_message = "There should be a single traffic block with the type 'TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[0].revision == "${run.setup.random_name}-v1-0-0"
+    error_message = "There should be a single traffic block with the revision '${run.setup.random_name}-v1-0-0'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[0].percent == 100
+    error_message = "There should be a single traffic block with percent set to 100."
+  }
+}
+
+run "test_traffic_distribution_across_multiple_revisions_split_50_50" {
+  variables {
+    name     = run.setup.random_name
+    revision = "${run.setup.random_name}-v1-1-0"
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    traffic = [
+      {
+        type     = "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+        revision = "${run.setup.random_name}-v1-0-0"
+        percent  = 50
+      },
+      {
+        type     = "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+        revision = "${run.setup.random_name}-v1-1-0"
+        percent  = 50
+      }
+    ]
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].revision == "${run.setup.random_name}-v1-1-0"
+    error_message = "The service revision name should be '${run.setup.random_name}-v1-1-0'."
+  }
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.traffic) == 2
+    error_message = "There should be two traffic blocks configured."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[0].type == "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+    error_message = "The first traffic block should be configured with the type 'TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[0].revision == "${run.setup.random_name}-v1-0-0"
+    error_message = "The first traffic block should be configured with the revision '${run.setup.random_name}-v1-0-0'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[0].percent == 50
+    error_message = "The first traffic block should be configured with percent set to 50."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[1].type == "TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION"
+    error_message = "The second traffic block should be configured with the type 'TRAFFIC_TARGET_ALLOCATION_TYPE_REVISION'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[1].revision == "${run.setup.random_name}-v1-1-0"
+    error_message = "The second traffic block should be configured with the revision '${run.setup.random_name}-v1-1-0'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.traffic[1].percent == 50
+    error_message = "The second traffic block should be configured with percent set to 50."
+  }
+}
diff --git a/tests/cloudsql.tftest.hcl b/tests/cloudsql.tftest.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..8cede82a3e327e86967393d990cdead49b33a7cd
--- /dev/null
+++ b/tests/cloudsql.tftest.hcl
@@ -0,0 +1,229 @@
+run "setup" {
+  variables {
+    create_test_sql_instances = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_cloudsql_mount_using_helper_variable" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    mount_cloudsql_instance = run.setup.instance_connection_names[0]
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].name == "cloudsql"
+    error_message = "A single volume should be configured named 'cloudsql'."
+  }
+  assert {
+    condition = [
+      for instance in google_cloud_run_v2_service.webapp.template[0].volumes[0].cloud_sql_instance[0].instances : instance
+    ][0] == run.setup.instance_connection_names[0]
+    error_message = "The cloudsql volume should be configured to use the run.setup.instance_connection_names[0] instance connection name."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].name == "cloudsql"
+    error_message = "A single volume mount should be configured with the name 'cloudsql'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].mount_path == "/cloudsql"
+    error_message = "A single volume mount should be configured with the mount path '/cloudsql'."
+  }
+}
+
+run "test_cloudsql_mount_to_single_container" {
+  variables {
+    name = run.setup.random_name
+    volumes = [
+      {
+        name = "cloudsql"
+        cloud_sql_instance = {
+          instances = [run.setup.instance_connection_names[0]]
+        }
+      }
+    ]
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+        volume_mounts = [
+          {
+            name       = "cloudsql"
+            mount_path = "/cloudsql"
+          }
+        ]
+      }
+    }
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].name == "cloudsql"
+    error_message = "A single volume should be configured named 'cloudsql'."
+  }
+  assert {
+    condition = (
+      [for instance in google_cloud_run_v2_service.webapp.template[0].volumes[0].cloud_sql_instance[0].instances : instance][0]
+      == run.setup.instance_connection_names[0]
+    )
+    error_message = "The cloudsql volume should be configured to use the run.setup.instance_connection_names[0] instance connection name."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].name == "cloudsql"
+    error_message = "A single volume mount should be configured with the name 'cloudsql'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].mount_path == "/cloudsql"
+    error_message = "A single volume mount should be configured with the mount path '/cloudsql'."
+  }
+}
+
+run "test_cloudsql_mount_multiple_cloudsql_instances" {
+  variables {
+    name = run.setup.random_name
+    volumes = [
+      {
+        name = "cloudsql"
+        cloud_sql_instance = {
+          instances = run.setup.instance_connection_names
+        }
+      }
+    ]
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+        volume_mounts = [
+          {
+            name       = "cloudsql"
+            mount_path = "/cloudsql"
+          }
+        ]
+      }
+    }
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].volumes[0].name == "cloudsql"
+    error_message = "A single volume should be configured named 'cloudsql'."
+  }
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.template[0].volumes[0].cloud_sql_instance[0].instances) == 2
+    error_message = "The cloudsql volume should be configured with exactly 2 instances."
+  }
+  assert {
+    condition = (
+      contains(
+        google_cloud_run_v2_service.webapp.template[0].volumes[0].cloud_sql_instance[0].instances,
+        run.setup.instance_connection_names[0]
+      )
+      && contains(
+        google_cloud_run_v2_service.webapp.template[0].volumes[0].cloud_sql_instance[0].instances,
+        run.setup.instance_connection_names[1]
+      )
+    )
+    error_message = "The cloudsql volume should be configured with the two instance connection names defined in var.volumes."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].name == "cloudsql"
+    error_message = "A single volume mount should be configured with the name 'cloudsql'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].containers[0].volume_mounts[0].mount_path == "/cloudsql"
+    error_message = "A single volume mount should be configured with the mount path '/cloudsql'."
+  }
+}
+
+run "test_cloudsql_mount_to_pre_deploy_job_using_helper_variable" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_pre_deploy_job = true
+    pre_deploy_job_container = {
+      image   = "golang:latest"
+      command = ["go"]
+      args    = ["version"]
+    }
+    pre_deploy_job_mount_cloudsql_instance = run.setup.instance_connection_names[0]
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].name == "cloudsql"
+    error_message = "A single volume should be configured named 'cloudsql'."
+  }
+  assert {
+    condition = (
+      [for instance in google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].cloud_sql_instance[0].instances : instance][0]
+      == run.setup.instance_connection_names[0]
+    )
+    error_message = "The cloudsql volume should be configured to use the run.setup.instance_connection_names[0] instance connection name."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].volume_mounts[0].name == "cloudsql"
+    error_message = "A single volume mount should be configured with the name 'cloudsql'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].volume_mounts[0].mount_path == "/cloudsql"
+    error_message = "A single volume mount should be configured with the mount path '/cloudsql'."
+  }
+}
+
+run "test_cloudsql_mount_to_pre_deploy_job" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_pre_deploy_job = true
+    pre_deploy_job_volumes = [
+      {
+        name = "cloudsql"
+        cloud_sql_instance = {
+          instances = [run.setup.instance_connection_names[0]]
+        }
+      }
+    ]
+    pre_deploy_job_container = {
+      image   = "golang:latest"
+      command = ["go"]
+      args    = ["version"]
+      volume_mounts = [
+        {
+          name       = "cloudsql"
+          mount_path = "/cloudsql"
+        }
+      ]
+    }
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].name == "cloudsql"
+    error_message = "A single volume should be configured named 'cloudsql'."
+  }
+  assert {
+    condition = (
+      [for instance in google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].cloud_sql_instance[0].instances : instance][0]
+      == run.setup.instance_connection_names[0]
+    )
+    error_message = "The cloudsql volume should be configured to use the run.setup.instance_connection_names[0] instance connection name."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].volume_mounts[0].name == "cloudsql"
+    error_message = "A single volume mount should be configured with the name 'cloudsql'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].volume_mounts[0].mount_path == "/cloudsql"
+    error_message = "A single volume mount should be configured with the mount path '/cloudsql'."
+  }
+}
diff --git a/tests/load_balancer.tftest.hcl b/tests/load_balancer.tftest.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..ed47d2f59ce53ea6c089b64f969ceb45ebd585af
--- /dev/null
+++ b/tests/load_balancer.tftest.hcl
@@ -0,0 +1,103 @@
+run "setup" {
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_service_with_default_variable_values_and_load_balancer_enabled" {
+  variables {
+    name                 = run.setup.random_name
+    enable_load_balancer = true
+    dns_names = {
+      webapp = "${run.setup.random_name}.test.example.gcp.uis.cam.ac.uk"
+    }
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.ingress == "INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER"
+    error_message = "Ingress should be 'INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.launch_stage == "GA"
+    error_message = "Launch stage should be 'GA'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.location == "europe-west2"
+    error_message = "Location should be 'europe-west2'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].execution_environment == "EXECUTION_ENVIRONMENT_GEN1"
+    error_message = "Execution environment should be 'EXECUTION_ENVIRONMENT_GEN1'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].timeout == "300s"
+    error_message = "Timeout should be '300s'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service_iam_member.webapp_all_users_invoker[0].role == "roles/run.invoker"
+    error_message = "google_cloud_run_v2_service_iam_member.webapp_all_users_invoker.role should be 'roles/run.invoker'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service_iam_member.webapp_all_users_invoker[0].member == "allUsers"
+    error_message = "google_cloud_run_v2_service_iam_member.webapp_all_users_invoker.member should be 'allUsers'."
+  }
+  assert {
+    condition     = google_service_account.webapp.name != null
+    error_message = "A dedicated service account should be created for the Cloud Run service."
+  }
+  assert {
+    condition     = google_compute_region_network_endpoint_group.webapp[0].network_endpoint_type == "SERVERLESS"
+    error_message = "Network endpoint group type should be 'SERVERLESS'."
+  }
+  assert {
+    condition     = google_compute_region_network_endpoint_group.webapp[0].cloud_run[0].service == google_cloud_run_v2_service.webapp.name
+    error_message = "Network endpoint group must contain the created Cloud Run service in its definition."
+  }
+  assert {
+    condition     = google_compute_ssl_policy.default[0].min_tls_version == "TLS_1_2"
+    error_message = "Minimum TLS version should be 'TLS_1_2'."
+  }
+  assert {
+    condition     = google_compute_ssl_policy.default[0].profile == "MODERN"
+    error_message = "SSL profile should be 'MODERN'."
+  }
+  assert {
+    condition     = module.webapp_http_load_balancer[0].backend_services["default"].protocol == "HTTP"
+    error_message = "The default backend service protocol should be 'HTTP'."
+  }
+  assert {
+    condition     = module.webapp_http_load_balancer[0].backend_services["default"].enable_cdn == false
+    error_message = "The default backend service should not enable CDN."
+  }
+  assert {
+    condition     = module.webapp_http_load_balancer[0].backend_services["default"].log_config[0].enable == true
+    error_message = "The default backend service log config should be enabled."
+  }
+  assert {
+    condition     = module.webapp_http_load_balancer[0].backend_services["default"].log_config[0].sample_rate == 1.0
+    error_message = "The default backend service log sample rate should be '1.0'."
+  }
+  assert {
+    condition     = length(module.webapp_http_load_balancer[0].backend_services["default"].backend) == 1
+    error_message = "The default backend service should be configured with a single backend block."
+  }
+  assert {
+    condition = contains([
+      for backend in module.webapp_http_load_balancer[0].backend_services["default"].backend : backend.group
+    ], google_compute_region_network_endpoint_group.webapp[0].self_link)
+    error_message = "The default backend service group should be configured to use the created network_endpoint_group."
+  }
+  assert {
+    condition     = module.webapp_http_load_balancer[0].http_proxy != null
+    error_message = "A http proxy resource should be created by the load balancer module."
+  }
+  assert {
+    condition     = module.webapp_http_load_balancer[0].https_proxy != null
+    error_message = "A https proxy resource should be created by the load balancer module."
+  }
+}
diff --git a/tests/monitoring.tftest.hcl b/tests/monitoring.tftest.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..73a3219fb5c30b539cf6479427de6a74eaca096e
--- /dev/null
+++ b/tests/monitoring.tftest.hcl
@@ -0,0 +1,315 @@
+run "setup" {
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_monitoring_with_alert_policies_created_in_default_project" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_monitoring = true
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.ingress == "INGRESS_TRAFFIC_ALL"
+    error_message = "Ingress should be 'INGRESS_TRAFFIC_ALL'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].ssl_cert_expiry_alert_policy.enabled == true
+    error_message = "SSL cert expiry alert policy should be enabled."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].ssl_cert_expiry_alert_policy.project == var.project
+    error_message = "SSL cert expiry alert policy should be created in the project specified in var.project."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].uptime_check_alert_policy.enabled == true
+    error_message = "Uptime alert policy should be enabled."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].uptime_check_alert_policy.project == var.project
+    error_message = "Uptime alert policy should be created in the project specified in var.project."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.period == "300s"
+    error_message = "Uptime check should be configured with period == '300s'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.timeout == "30s"
+    error_message = "Uptime check should be configured with timeout == '30s'."
+  }
+  assert {
+    condition     = length(module.uptime_monitoring["default"].https_uptime_check_config.http_check) == 1
+    error_message = "A single HTTP check should be configured."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].path == "/"
+    error_message = "The HTTP check path should be '/'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].port == 443
+    error_message = "The HTTP check port should be 443."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].request_method == "GET"
+    error_message = "The HTTP check request method should be 'GET'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].use_ssl == true
+    error_message = "The HTTP check should be configured to use SSL."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].validate_ssl == true
+    error_message = "The HTTP check should be configured to validate SSL."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.monitored_resource[0].type == "uptime_url"
+    )
+    error_message = "The uptime check should be configured to monitor a URL resource."
+  }
+}
+
+run "test_monitoring_with_alert_policies_created_in_scoping_project" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_monitoring          = true
+    monitoring_scoping_project = "infra-testing-meta-21f09a44"
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.ingress == "INGRESS_TRAFFIC_ALL"
+    error_message = "Ingress should be 'INGRESS_TRAFFIC_ALL'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].ssl_cert_expiry_alert_policy.enabled == true
+    error_message = "SSL cert expiry alert policy should be enabled."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].ssl_cert_expiry_alert_policy.project == var.monitoring_scoping_project
+    )
+    error_message = (
+      "SSL cert expiry alert policy should be created in the project specified in var.monitoring_scoping_project."
+    )
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].uptime_check_alert_policy.enabled == true
+    error_message = "Uptime alert policy should be enabled."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].uptime_check_alert_policy.project == var.monitoring_scoping_project
+    )
+    error_message = "Uptime alert policy should be created in the project specified in var.monitoring_scoping_project."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.period == "300s"
+    error_message = "Uptime check should be configured with period == '300s'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.timeout == "30s"
+    error_message = "Uptime check should be configured with timeout == '30s'."
+  }
+  assert {
+    condition     = length(module.uptime_monitoring["default"].https_uptime_check_config.http_check) == 1
+    error_message = "A single HTTP check should be configured."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].path == "/"
+    error_message = "The HTTP check path should be '/'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].port == 443
+    error_message = "The HTTP check port should be 443."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].request_method == "GET"
+    error_message = "The HTTP check request method should be 'GET'."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].use_ssl == true
+    error_message = "The HTTP check should be configured to use SSL."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].validate_ssl == true
+    error_message = "The HTTP check should be configured to validate SSL."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.monitored_resource[0].type == "uptime_url"
+    )
+    error_message = "The uptime check should be configured to monitor a URL resource."
+  }
+}
+
+run "test_monitoring_with_auth_proxy" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_monitoring                 = true
+    allow_unauthenticated_invocations = false
+  }
+
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.monitored_resource[0].labels.host
+      == module.uptime_monitoring["default"].auth_proxy_host
+    )
+    error_message = "HTTP check path should be configured to use the authentication proxy function host."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].path
+      == module.uptime_monitoring["default"].auth_proxy_path
+    )
+    error_message = "HTTP check path should be configured to use the authentication proxy function path."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].auth_proxy_function.ingress_settings == "ALLOW_ALL"
+    error_message = "The authentication proxy function should be configured to allow all ingress."
+  }
+}
+
+run "test_monitoring_with_auth_proxy_and_vpc_access_connector" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_monitoring                 = true
+    allow_unauthenticated_invocations = false
+    ingress                           = "INGRESS_TRAFFIC_INTERNAL_ONLY"
+  }
+
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.monitored_resource[0].labels.host
+      == module.uptime_monitoring["default"].auth_proxy_host
+    )
+    error_message = "HTTP check path should be configured to use the authentication proxy function host."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].path
+      == module.uptime_monitoring["default"].auth_proxy_path
+    )
+    error_message = "HTTP check path should be configured to use the authentication proxy function path."
+  }
+  assert {
+    condition = (
+      google_compute_subnetwork.vpc_connector[0].network
+      == "https://www.googleapis.com/compute/v1/projects/${var.project}/global/networks/default"
+    )
+    error_message = "A subnet for the VPC connector should be created in the 'default' network."
+  }
+  assert {
+    condition     = google_vpc_access_connector.main[0].project == var.project
+    error_message = "A VPC connector should be created in the specified project."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].auth_proxy_function.ingress_settings == "ALLOW_ALL"
+    error_message = "The authentication proxy function should be configured to allow all ingress."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].auth_proxy_function.vpc_connector
+      == "projects/${var.project}/locations/${var.region}/connectors/${var.name}-conn"
+    )
+    error_message = "The authentication proxy function should be configured to use the created VPC connector."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].auth_proxy_function.vpc_connector_egress_settings == "ALL_TRAFFIC"
+    )
+    error_message = (
+      "The authentication proxy function should be configured to route all egress traffic via the VPC connector."
+    )
+  }
+}
+
+run "setup_vpc_connector" {
+  variables {
+    create_vpc_connector = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_monitoring_with_auth_proxy_and_an_existing_vpc_access_connector" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_monitoring                 = true
+    allow_unauthenticated_invocations = false
+    ingress                           = "INGRESS_TRAFFIC_INTERNAL_ONLY"
+    vpc_access = {
+      connector = run.setup_vpc_connector.vpc_connector_id
+      egress    = "ALL_TRAFFIC"
+    }
+  }
+
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.monitored_resource[0].labels.host
+      == module.uptime_monitoring["default"].auth_proxy_host
+    )
+    error_message = "HTTP check path should be configured to use the authentication proxy function host."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].https_uptime_check_config.http_check[0].path
+      == module.uptime_monitoring["default"].auth_proxy_path
+    )
+    error_message = "HTTP check path should be configured to use the authentication proxy function path."
+  }
+  assert {
+    condition     = try(google_compute_subnetwork.vpc_connector[0], null) == null
+    error_message = "A subnet should not be created."
+  }
+  assert {
+    condition     = try(google_vpc_access_connector.main[0], null) == null
+    error_message = "A VPC connector should not be created."
+  }
+  assert {
+    condition     = module.uptime_monitoring["default"].auth_proxy_function.ingress_settings == "ALLOW_ALL"
+    error_message = "The authentication proxy function should be configured to allow all ingress."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].auth_proxy_function.vpc_connector
+      == "projects/${var.project}/locations/${var.region}/connectors/${var.name}-setup"
+    )
+    error_message = "The authentication proxy function should be configured to use the provided VPC connector."
+  }
+  assert {
+    condition = (
+      module.uptime_monitoring["default"].auth_proxy_function.vpc_connector_egress_settings == "ALL_TRAFFIC"
+    )
+    error_message = (
+      "The authentication proxy function should be configured to route all egress traffic via the VPC connector."
+    )
+  }
+}
diff --git a/tests/pre_deploy_job.tftest.hcl b/tests/pre_deploy_job.tftest.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..92bca7f3315dc036b347385cde0d36c053ec7d91
--- /dev/null
+++ b/tests/pre_deploy_job.tftest.hcl
@@ -0,0 +1,129 @@
+run "setup" {
+  variables {
+    create_test_secrets = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_pre_deploy_job" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_pre_deploy_job = true
+    pre_deploy_job_container = {
+      image   = "golang:latest"
+      command = ["go"]
+      args    = ["version"]
+    }
+  }
+}
+
+run "setup_secret_iam" {
+  variables {
+    create_test_secrets     = true
+    create_test_secrets_iam = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_pre_deploy_job_env_vars_and_secrets" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_pre_deploy_job = true
+    pre_deploy_job_container = {
+      image   = "golang:latest"
+      command = ["go"]
+      args    = ["version"]
+      env = [
+        {
+          name  = "HELLO",
+          value = "WORLD!"
+        },
+        {
+          name = "SECRET",
+          value_source = {
+            secret_key_ref = {
+              secret = run.setup.secret_ids[0]
+            }
+          }
+        }
+      ]
+      volume_mounts = [
+        {
+          name       = "secret-volume",
+          mount_path = "/secrets"
+        }
+      ]
+    }
+    pre_deploy_job_volumes = [
+      {
+        name = "secret-volume",
+        secret = {
+          secret = run.setup.secret_ids[1]
+          items = [
+            {
+              version = "1",
+              path    = "my-secret"
+            }
+          ]
+        }
+      }
+    ]
+  }
+
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].env[0].name == "HELLO"
+    error_message = "A standard environment variable with the name 'HELLO' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].env[0].value == "WORLD!"
+    error_message = "A standard environment variable with the value 'WORLD!' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].env[1].name == "SECRET"
+    error_message = "A secret-backed environment variable with the name 'SECRET' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].env[1].value_source[0].secret_key_ref[0].secret == run.setup.secret_ids[0]
+    error_message = "A secret-backed environment variable referencing the 'run.setup.secret_ids[0]' secret should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].volume_mounts[0].name == "secret-volume"
+    error_message = "A volume mount with the name 'secret-volume' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].containers[0].volume_mounts[0].mount_path == "/secrets"
+    error_message = "A volume mount with the mount path '/secrets' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].name == "secret-volume"
+    error_message = "A volume with the name 'secret-volume' should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].secret[0].secret == run.setup.secret_ids[1]
+    error_message = "A secret-backed volume referencing the 'run.setup.secret_ids[0]' secret should be created."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].secret[0].items[0].version == "1"
+    error_message = "Secret version should be '1'."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].volumes[0].secret[0].items[0].path == "my-secret"
+    error_message = "Secret path should be 'my-secret'."
+  }
+}
diff --git a/tests/setup/main.tf b/tests/setup/main.tf
new file mode 100644
index 0000000000000000000000000000000000000000..11a5bc8657568b1b91940bf006722fb8f6caecdc
--- /dev/null
+++ b/tests/setup/main.tf
@@ -0,0 +1,76 @@
+# These are test resources which are destroyed after each test run. Therefore, we are disabling kics-scan on the whole
+# file.
+# kics-scan ignore
+
+resource "random_id" "name" {
+  byte_length = 2
+  # "rapp" represents Cloud Run App and is required to ensure any resources created by this repo's tests are easily
+  # identifiable by the cleanup.sh script. We only have 4 characters to play with given some of Google's naming
+  # restrictions.
+  prefix = "test-rapp"
+}
+
+module "sql" {
+  count = var.create_test_sql_instances ? 2 : 0
+
+  source                      = "GoogleCloudPlatform/sql-db/google//modules/postgresql"
+  version                     = "~> 17.0"
+  database_version            = "POSTGRES_15"
+  name                        = "${random_id.name.hex}-${count.index}"
+  project_id                  = var.project
+  tier                        = "db-f1-micro"
+  availability_type           = "ZONAL"
+  region                      = var.region
+  zone                        = "${var.region}-a"
+  deletion_protection         = false
+  deletion_protection_enabled = false
+}
+
+resource "google_secret_manager_secret" "test" {
+  count = var.create_test_secrets ? 2 : 0
+
+  secret_id = "${random_id.name.hex}-secret-version-${count.index}"
+  project   = var.project
+
+  replication {
+    auto {}
+  }
+}
+
+resource "google_secret_manager_secret_version" "test" {
+  count = var.create_test_secrets ? 2 : 0
+
+  secret      = google_secret_manager_secret.test[count.index].id
+  secret_data = "secret-data"
+}
+
+resource "google_secret_manager_secret_iam_member" "test" {
+  count = var.create_test_secrets_iam ? 2 : 0
+
+  project   = var.project
+  secret_id = google_secret_manager_secret.test[count.index].id
+  role      = "roles/secretmanager.secretAccessor"
+  member    = "serviceAccount:${random_id.name.hex}-run@${var.project}.iam.gserviceaccount.com"
+}
+
+resource "google_compute_subnetwork" "test" {
+  count = var.create_vpc_connector ? 1 : 0
+
+  name          = "${random_id.name.hex}-setup"
+  project       = var.project
+  ip_cidr_range = var.test_ip_cidr_range
+  network       = "default"
+  region        = var.region
+}
+
+resource "google_vpc_access_connector" "test" {
+  count = var.create_vpc_connector ? 1 : 0
+
+  name    = "${random_id.name.hex}-setup"
+  project = var.project
+  region  = var.region
+
+  subnet {
+    name = google_compute_subnetwork.test[0].name
+  }
+}
diff --git a/tests/setup/outputs.tf b/tests/setup/outputs.tf
new file mode 100644
index 0000000000000000000000000000000000000000..de4e19dfffbcdcf971a6db66c80b6b4d84c09689
--- /dev/null
+++ b/tests/setup/outputs.tf
@@ -0,0 +1,24 @@
+output "random_name" {
+  description = "A random name to use for the test run."
+  value       = random_id.name.hex
+}
+
+output "instance_connection_names" {
+  description = "A list of created SQL instance connection names."
+  value       = [for k, v in module.sql : v.instance_connection_name]
+}
+
+output "secret_ids" {
+  description = "A list of created secret object IDs."
+  value       = [for k, v in google_secret_manager_secret.test : v.id]
+}
+
+output "subnetwork_id" {
+  description = "The ID of the subnetwork created to test VPC access for the Cloud Run service."
+  value       = var.create_vpc_connector ? google_compute_subnetwork.test[0].id : null
+}
+
+output "vpc_connector_id" {
+  description = "The ID of the VPC Access Connector object created to test VPC access for the Cloud Run service."
+  value       = var.create_vpc_connector ? google_vpc_access_connector.test[0].id : null
+}
diff --git a/tests/setup/variables.tf b/tests/setup/variables.tf
new file mode 100644
index 0000000000000000000000000000000000000000..1cc1db59d328d1d249d870ed0f5c4a3c515a8e0b
--- /dev/null
+++ b/tests/setup/variables.tf
@@ -0,0 +1,41 @@
+variable "project" {
+  description = "The ID of a project to deploy the testing resources to."
+  type        = string
+}
+
+variable "region" {
+  description = "The region/location to deploy test resources to."
+  type        = string
+}
+
+variable "create_test_sql_instances" {
+  description = "If true, two SQL instances will be deployed for testing."
+  type        = bool
+  default     = false
+}
+
+variable "create_test_secrets" {
+  description = "If true, two secret objects will be deployed for testing."
+  type        = bool
+  default     = false
+}
+
+variable "create_test_secrets_iam" {
+  description = <<EOI
+If true, IAM bindings will be created to allow the Cloud Run service to access the test secret objects.
+EOI
+  type        = bool
+  default     = false
+}
+
+variable "create_vpc_connector" {
+  description = "If true, create a VPC Access Connector and associated subnet for testing."
+  type        = bool
+  default     = false
+}
+
+variable "test_ip_cidr_range" {
+  description = "CIDR range for the subnet which is created for the VPC Access Connector."
+  type        = string
+  default     = "10.0.0.16/28"
+}
diff --git a/tests/tests.tfvars b/tests/tests.tfvars
new file mode 100644
index 0000000000000000000000000000000000000000..f23df58d2f01daf729ab9a773cfe4ecd26cb0080
--- /dev/null
+++ b/tests/tests.tfvars
@@ -0,0 +1,2 @@
+project = "infra-testing-int-e2395220"
+region  = "europe-west2"
diff --git a/tests/vpc_access.tftest.hcl b/tests/vpc_access.tftest.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..b3214b4e6d483cca1a2566ef8a29f3380dd4551a
--- /dev/null
+++ b/tests/vpc_access.tftest.hcl
@@ -0,0 +1,181 @@
+run "setup" {
+  variables {
+    create_vpc_connector = true
+  }
+
+  module {
+    source = "./tests/setup"
+  }
+}
+
+run "test_service_vpc_access" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    vpc_access = {
+      connector = run.setup.vpc_connector_id
+      egress    = "PRIVATE_RANGES_ONLY"
+    }
+  }
+
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.template[0].vpc_access) == 1
+    error_message = "The Cloud Run service should be configured with a single VPC Access Connector."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].vpc_access[0].connector == run.setup.vpc_connector_id
+    error_message = "The Cloud Run service should be configured to use the run.setup.vpc_connector_id VPC Access Connector."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].vpc_access[0].egress == "PRIVATE_RANGES_ONLY"
+    error_message = "The Cloud Run service should be configured to route private ranges only to the VPC Access Connector."
+  }
+}
+
+run "test_pre_deploy_job_vpc_access" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_pre_deploy_job = true
+    pre_deploy_job_container = {
+      image   = "golang:latest"
+      command = ["go"]
+      args    = ["version"]
+    }
+    pre_deploy_job_vpc_access = {
+      connector = run.setup.vpc_connector_id
+      egress    = "PRIVATE_RANGES_ONLY"
+    }
+  }
+
+  assert {
+    condition     = length(google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].vpc_access) == 1
+    error_message = "The Cloud Run service should be configured with a single VPC Access Connector."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].vpc_access[0].connector == run.setup.vpc_connector_id
+    error_message = "The Cloud Run service should be configured to use the run.setup.vpc_connector_id VPC Access Connector."
+  }
+  assert {
+    condition     = google_cloud_run_v2_job.pre_deploy[0].template[0].template[0].vpc_access[0].egress == "PRIVATE_RANGES_ONLY"
+    error_message = "The Cloud Run service should be configured to route private ranges only to the VPC Access Connector."
+  }
+}
+
+run "test_static_egress_ip_config_with_existing_access_controller" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    vpc_access = {
+      connector = run.setup.vpc_connector_id
+    }
+    enable_static_egress_ip        = true
+    static_egress_ip_subnetwork_id = run.setup.subnetwork_id
+  }
+
+  assert {
+    condition     = length(google_compute_router.static_ip) == 1
+    error_message = "A single compute router resource should be created."
+  }
+  assert {
+    condition     = length(google_compute_address.static_ip) == 1
+    error_message = "A single compute address resource should be created."
+  }
+  assert {
+    condition     = length(google_compute_router_nat.static_ip) == 1
+    error_message = "A single compute router NAT resource should be created."
+  }
+  assert {
+    condition     = google_compute_router_nat.static_ip[0].router == google_compute_router.static_ip[0].name
+    error_message = "The NAT resource should be configured to use the google_compute_router.static_ip[0] router resource."
+  }
+  assert {
+    condition     = length(google_compute_router_nat.static_ip[0].subnetwork) == 1
+    error_message = "The NAT resource should be configured with a single subnetwork."
+  }
+  assert {
+    condition     = [for subnet in google_compute_router_nat.static_ip[0].subnetwork : subnet.name][0] == run.setup.subnetwork_id
+    error_message = "The NAT resource should be configured to use the run.setup.subnetwork_id subnetwork resource."
+  }
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.template[0].vpc_access) == 1
+    error_message = "The Cloud Run service should be configured with a single VPC Access Connector."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].vpc_access[0].connector == run.setup.vpc_connector_id
+    error_message = "The Cloud Run service should be configured to use the run.setup.vpc_connector_id VPC Access Connector."
+  }
+}
+
+run "test_static_egress_ip_config_with_defaults" {
+  variables {
+    name = run.setup.random_name
+    containers = {
+      webapp = {
+        image = "us-docker.pkg.dev/cloudrun/container/hello"
+      }
+    }
+    enable_static_egress_ip = true
+  }
+
+  assert {
+    condition     = length(google_compute_subnetwork.vpc_connector) == 1
+    error_message = "A single subnetwork should be created for the VPC Access Connector to utilise."
+  }
+  assert {
+    condition     = length(google_vpc_access_connector.main) == 1
+    error_message = "A single VPC Access Connector resource should be created."
+  }
+  assert {
+    condition     = google_vpc_access_connector.main[0].subnet[0].name == google_compute_subnetwork.vpc_connector[0].name
+    error_message = "The VPC Access Connector should be configured to use the google_compute_subnetwork.vpc_connector[0] subnetwork."
+  }
+  assert {
+    condition     = length(google_compute_router.static_ip) == 1
+    error_message = "A single compute router resource should be created."
+  }
+  assert {
+    condition     = length(google_compute_address.static_ip) == 1
+    error_message = "A single compute address resource should be created."
+  }
+  assert {
+    condition     = length(google_compute_router_nat.static_ip) == 1
+    error_message = "A single compute router NAT resource should be created."
+  }
+  assert {
+    condition     = google_compute_router_nat.static_ip[0].router == google_compute_router.static_ip[0].name
+    error_message = "The NAT resource should be configured to use the google_compute_router.static_ip[0] router resource."
+  }
+  assert {
+    condition     = length(google_compute_router_nat.static_ip[0].subnetwork) == 1
+    error_message = "The NAT resource should be configured with a single subnetwork."
+  }
+  assert {
+    condition     = [for subnet in google_compute_router_nat.static_ip[0].subnetwork : subnet.name][0] == google_compute_subnetwork.vpc_connector[0].id
+    error_message = "The NAT resource should be configured to use the google_compute_subnetwork.vpc_connector[0] subnetwork resource."
+  }
+  assert {
+    condition     = length(google_cloud_run_v2_service.webapp.template[0].vpc_access) == 1
+    error_message = "The Cloud Run service should be configured with a single VPC Access Connector."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].vpc_access[0].connector == google_vpc_access_connector.main[0].id
+    error_message = "The Cloud Run service should be configured to use the google_vpc_access_connector.main[0].id resource."
+  }
+  assert {
+    condition     = google_cloud_run_v2_service.webapp.template[0].vpc_access[0].egress == "ALL_TRAFFIC"
+    error_message = "The Cloud Run service should be configured to route all egress traffic to the VPC Access Connector."
+  }
+}
diff --git a/variables.tf b/variables.tf
index b570035e502a62ff26e0101057cfa9c6b9dd535d..698d8b3e3acafc63b8364456fa07c5cf66aa556f 100644
--- a/variables.tf
+++ b/variables.tf
@@ -1,189 +1,355 @@
-# variables.tf defines inputs for the module
+# Cloud Run service variables
+variable "name" {
+  description = "Prefix used to form resource names."
+  type        = string
+  default     = "webapp"
+
+  validation {
+    condition     = length(var.name) <= 18
+    error_message = "var.name cannot be greater than 18 characters."
+  }
+}
+
+variable "region" {
+  description = "Location used to create Cloud Run service and other resources."
+  type        = string
+}
+
+variable "description" {
+  description = "A description for the Cloud Run service."
+  type        = string
+  default     = null
+}
 
 variable "project" {
   description = "Project containing the webapp."
+  type        = string
 }
 
-variable "name" {
-  description = "Prefix used for form resource names"
-  default     = "webapp"
+variable "service_labels" {
+  description = <<EOI
+A set of key/value label pairs to assign to the Cloud Run service.
+EOI
+  type        = map(string)
+  default     = {}
 }
 
-variable "sql_instance_project" {
-  description = "Project containing SQL instance. Defaults to var.project."
-  default     = ""
+variable "service_annotations" {
+  description = <<EOI
+Map containing additional annotations to be added to the Cloud Run service
+itself.
+EOI
+  type        = map(string)
+  default     = {}
 }
 
-variable "sql_instance_connection_name" {
-  description = "SQL instance connection name"
-  default     = ""
+variable "ingress" {
+  description = <<EOI
+The ingress setting for the Cloud Run service. Possible values are
+INGRESS_TRAFFIC_ALL, INGRESS_TRAFFIC_INTERNAL_ONLY, and
+INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER.
+
+If var.use_load_balancer == true, the provided var.ingress will be ignored and
+the ingress will be set automatically to
+"INGRESS_TRAFFIC_INTERNAL_LOAD_BALANCER".
+EOI
+  type        = string
+  default     = "INGRESS_TRAFFIC_ALL"
 }
 
-variable "enable_pre_deploy_job" {
+variable "launch_stage" {
   description = <<EOI
-Configure a Cloud Run Job to be executed *before* the main Cloud Run service is deployed. This is useful for running
-database migrations among other things.
+The launch stage for the Cloud Run service. Possible values are UNIMPLEMENTED,
+PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, and DEPRECATED.
 EOI
-  type        = bool
-  default     = false
+  type        = string
+  default     = "GA"
 }
 
-variable "trigger_pre_deploy_job" {
+# Template block variables
+variable "revision" {
   description = <<EOI
-When true, the pre-deploy Cloud Run job is executed via a null_resource-triggered gcloud command whenever Terraform
-detects that var.image_name has changed.
+The unique name for the revision. If this field is omitted, it will be
+automatically generated based on the Service name.
 EOI
-  type        = bool
-  default     = true
+  type        = string
+  default     = null
 }
 
-variable "force_pre_deploy_job" {
+variable "template_labels" {
   description = <<EOI
-When true, and only when used in addition to var.trigger_pre_deploy_job, the pre-deploy Cloud Run job is executed at
-every terraform apply, regardless of the status of var.image_name. This is sometimes useful for development
-environments where the "latest" tag is deployed, as without this the pre-deploy command would never run. For staging
-and production environments this should never be required as the var.image_name should change with each
-release/deployment of an application.
+A set of key/value label pairs to assign to the Cloud Run service revision.
 EOI
-  type        = bool
-  default     = false
+  type        = map(string)
+  default     = {}
+}
+
+variable "template_annotations" {
+  description = <<EOI
+Map containing additional annotations to be added to the Cloud Run service
+template.
+EOI
+  type        = map(string)
+  default     = {}
 }
 
-variable "pre_deploy_job_image_name" {
+variable "timeout_seconds" {
   description = <<EOI
-Specify the URL of a container image to use for the pre-deploy Cloud Run job. By default the var.image_name URL is used
-(see locals.tf).
+The maximum duration, in seconds, the instance is allowed for responding to a
+request. Maximum is 900s.
 EOI
   type        = string
-  default     = null
+  default     = "300s"
 }
 
-variable "pre_deploy_job_command" {
-  description = "The command to run in the pre-deploy Cloud Run job."
-  type        = list(string)
-  default     = null
+variable "execution_environment" {
+  description = <<EOI
+The sandbox environment to host this revision. Possible values are
+EXECUTION_ENVIRONMENT_GEN1, and EXECUTION_ENVIRONMENT_GEN2.
+EOI
+  type        = string
+  default     = "EXECUTION_ENVIRONMENT_GEN1"
 }
 
-variable "pre_deploy_job_args" {
-  description = "Arguments supplied to the command in the pre-deploy Cloud Run job."
-  type        = list(string)
+variable "encryption_key" {
+  description = <<EOI
+The ID of a customer managed encryption key (CMEK) to use to encrypt this
+container image.
+EOI
+  type        = string
   default     = null
 }
 
-variable "pre_deploy_job_environment_variables" {
+variable "max_instance_request_concurrency" {
   description = <<EOI
-Environment variables which can be set on the pre-deploy Cloud Run job. Map from name to value.
-By default the var.environment_variables is used
-(see locals.tf).
+Sets the maximum number of requests that each serving instance can receive.
 EOI
-  type        = map(string)
+  type        = number
   default     = null
 }
 
-variable "grant_sql_client_role_to_webapp_sa" {
+variable "session_affinity" {
   description = <<EOI
-    When set to true the 'roles/cloudsql.client' role will be granted to the
-    webapp service account at the project level to allow it to connect to Cloud SQL.
-  EOI
+Enables session affinity. For more information, go to
+https://cloud.google.com/run/docs/configuring/session-affinity.
+EOI
   type        = bool
-  default     = false
+  default     = null
 }
 
-variable "cloud_run_region" {
-  description = "Override region used to create Cloud Resources"
-  default     = ""
+variable "scaling" {
+  description = <<EOI
+The minimum number of auto-scaled instances defaults to 0, thus, the container
+will stop if it doesn't receive requests for a period of time and the
+following request will make the container start from cold. This should be
+carefully considered for containers that take a significant amount of time
+starting from cold.
+
+For a container with N-workers, the maximum number of auto-scaled instances
+should be less than 1/N of the maximum connection count for the Cloud SQL
+instance.
+EOI
+  type = object({
+    min_instance_count = optional(number)
+    max_instance_count = optional(number)
+  })
+  default = null
 }
 
-variable "environment_variables" {
-  description = "Environment variables which should be set on the service. Map from name to value."
-  default     = {}
+variable "vpc_access" {
+  description = <<EOI
+Configure VPC access for the Cloud Run service. For more information on these
+options see
+https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_service#nested_vpc_access
+EOI
+  type = object({
+    connector = optional(string)
+    egress    = optional(string)
+    network_interfaces = optional(object({
+      network    = optional(string)
+      subnetwork = optional(string)
+      tags       = optional(string)
+    }))
+  })
+  default = null
 }
 
-variable "min_scale" {
+variable "containers" {
   description = <<EOI
-Minimum number of auto-scaled instances. Defaults to 0, thus, the container will stop
-if it doesn't receive requests for a period of time and the following request will make
-the container start from cold.
-This should be carefully considered for containers that take a significant amount of
-time starting from cold.
+Configure one or more container instances for the service. See
+https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_service#nested_containers
+for information on the available arguments.
 EOI
-  default     = 0
+  type = map(object({
+    name    = optional(string)
+    image   = string
+    command = optional(list(string))
+    args    = optional(list(string))
+    env = optional(list(object({
+      name  = string
+      value = optional(string)
+      value_source = optional(object({
+        secret_key_ref = optional(object({
+          secret  = string
+          version = optional(string, "latest")
+        }))
+      }))
+    })), [])
+    resources = optional(object({
+      limits            = optional(map(string))
+      cpu_idle          = optional(bool)
+      startup_cpu_boost = optional(bool)
+    }))
+    ports = optional(list(object({
+      name           = optional(string)
+      container_port = optional(number)
+    })), [])
+    volume_mounts = optional(list(object({
+      name       = string
+      mount_path = string
+    })), [])
+    working_dir = optional(string)
+    liveness_probe = optional(object({
+      initial_delay_seconds = optional(number)
+      timeout_seconds       = optional(number)
+      period_seconds        = optional(number)
+      failure_threshold     = optional(number)
+      http_get = optional(object({
+        path = optional(string)
+        port = optional(number)
+        http_headers = optional(list(object({
+          name  = string
+          value = optional(string)
+        })), [])
+      }))
+      grpc = optional(object({
+        port    = optional(number)
+        service = optional(string)
+      }))
+    }))
+    startup_probe = optional(object({
+      initial_delay_seconds = optional(number)
+      timeout_seconds       = optional(number)
+      period_seconds        = optional(number)
+      failure_threshold     = optional(number)
+      http_get = optional(object({
+        path = optional(string)
+        port = optional(number)
+        http_headers = optional(list(object({
+          name  = string
+          value = optional(string)
+        })), [])
+      }))
+      tcp_socket = optional(object({
+        port = number
+      }))
+      grpc = optional(object({
+        port    = optional(number)
+        service = optional(string)
+      }))
+    }))
+  }))
 }
 
-variable "max_scale" {
-  description = "Maximum number of auto-scaled instances. For a container with N-workers, maxScale should be less than 1/N of the maximum connection count for the Cloud SQL instance."
+variable "volumes" {
+  description = <<EOI
+Configure one or more volumes for the service. See
+https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_service#nested_volumes
+for more information on these options.
+EOI
+  type = list(object({
+    name = string
+    secret = optional(object({
+      secret       = string
+      default_mode = optional(number)
+      items = optional(list(object({
+        path    = string
+        version = optional(string)
+        mode    = optional(number)
+      })), [])
+    }))
+    cloud_sql_instance = optional(object({
+      instances = optional(list(string))
+    }))
+  }))
+  default = []
 }
 
-variable "container_concurrency" {
-  description = "Maximum number of concurrent requests to an instance before it is auto-scaled. Defaults to 80 which is the maximum that Cloud Run allows"
-  default     = "80"
+variable "traffic" {
+  description = <<EOI
+Configure traffic allocation between one or more service revisions.
+EOI
+  type = list(object({
+    type     = optional(string)
+    revision = optional(string)
+    percent  = optional(number)
+    tag      = optional(string)
+  }))
+  default = []
 }
 
-variable "cpu_limit" {
-  description = "CPU limit for the deployed container. Defaults to 1 CPU, '1000m'."
-  default     = "1000m"
+variable "mount_cloudsql_instance" {
+  description = <<EOI
+Mount a single CloudSQL instance in each container for the service. This value
+should be the Cloud SQL instance connection name, for example
+"example-devel-e662dd2b:europe-west2:sql-6e1dd60b". This is a convenience
+variable to simplify mounting a single Cloud SQL instance. If you require more
+control you can define one or more Cloud SQL mounts directly using
+var.containers and var.volumes instead.
+EOI
+  type        = string
+  default     = null
 }
 
-variable "memory_limit" {
-  description = "Memory limit for the deployed container. Defaults to 512 MB, '512M'."
-  default     = "512M"
+variable "sql_instance_project" {
+  description = "Project containing SQL instance. Defaults to var.project."
+  type        = string
+  default     = null
 }
 
-variable "timeout_seconds" {
-  description = "The maximum duration, in seconds, the instance is allowed for responding to a request. Default to 300. Maximum is 900."
-  default     = 300
+variable "grant_sql_client_role_to_webapp_sa" {
+  description = <<EOI
+When set to true the roles/cloudsql.client role will be granted to the webapp
+service account at the project level to allow it to connect to Cloud SQL.
+EOI
+  type        = bool
+  default     = false
 }
 
 variable "allow_unauthenticated_invocations" {
   description = <<EOI
-If true, the webapp will allow unauthenticated invocations. If false, the webapp requires authentication
-as a Google user with the Cloud Run invoker permission on the deployment.
+If true, the webapp will allow unauthenticated invocations. If false, the
+webapp requires authentication as a Google user with the Cloud Run invoker
+permission on the deployment.
 EOI
   type        = bool
   default     = true
 }
 
-variable "ingress_style" {
-  type        = string
-  default     = "domain-mapping"
-  description = "Whether to configure a load balancer or create a domain mapping"
-  validation {
-    condition     = contains(["domain-mapping", "load-balancer"], var.ingress_style)
-    error_message = "Ingress style must be one of 'domain-mapping' or 'load-balancer'."
-  }
-}
-
-variable "dns_name" {
-  default     = ""
+# Load Balancer variables
+variable "enable_load_balancer" {
+  type        = bool
+  default     = false
   description = <<EOI
-    Deprecated: use the dns_names variable instead.
-
-    If non-empty, var.dns_names will be ignored.
-
-    If non-empty, a domain mapping will be created for the webapp from this host
-    to point to the webapp or a load balancer will be created for this host depending
-    on the value of the ingress_style variable.
-
-    The domain must first have been verified by Google and the account being used by
-    the google provider must have been added as an owner.
-
-    If and only if a domain mapping has been created, the
-    "domain_mapping_present" output will be true.
-
-    If a domain mapping or load balancer has been created, the "dns_resource_records"
-    output contains the appropriate DNS records.
-  EOI
+Whether to configure a load balancer or use the default run.app generated
+hostname.
+EOI
 }
 
 variable "dns_names" {
-  type        = list(any)
-  default     = []
+  type        = map(string)
+  default     = {}
   description = <<EOI
-    List of DNS names for web application. Note that no records are created,
-    the records to be created can be found in the dns_resource_records output.
+DNS names to configure for the web application. Note that DNS records are
+_NOT_ created, they are used in the load balancer module to ensure the SSL
+certificate is generated with the required SANs. The map's keys are arbitrary
+and are only required to avoid errors when the DNS name is a value which
+Terraform does not know until after the apply operation.
+
+For example:
 
-    Ignored if var.dns_name is non-empty.
-  EOI
+{ my_awesome_dns_name = "awesome.example.com" }
+EOI
 }
 
 variable "use_ssl_certificates" {
@@ -191,20 +357,20 @@ variable "use_ssl_certificates" {
   default = false
 
   description = <<EOI
-    Whether to use the custom TLS certs in var.ssl_certificates for the load balancer
-    or the Google-managed certs for the specified var.dns_names.
-  EOI
+Whether to use the custom TLS certs in var.ssl_certificates for the load
+balancer or the Google-managed certs for the specified var.dns_names.
+EOI
 }
 
 variable "ssl_certificates" {
-  type    = list(any)
-  default = []
-
+  type        = list(any)
+  default     = []
   description = <<EOI
-    A list of self-links to any custom TLS certificates to add to the load balancer.
-    Requires that var.ingress_style be "load-balancer". The self-link is available as
-    the "self_link" attribute of "google_compute_ssl_certificate" resources.
-  EOI
+A list of self-links to any custom TLS certificates to add to the load
+balancer. Requires that var.use_load_balancer be "true". The self-link is
+available as the "self_link" attribute of "google_compute_ssl_certificate"
+resources.
+EOI
 }
 
 variable "ssl_policy" {
@@ -212,8 +378,9 @@ variable "ssl_policy" {
   default = null
 
   description = <<EOI
-By default, the google_compute_ssl_policy.default SSL policy is applied to the load balancer in load_balancer.tf.
-This sets the SSL profile to MODERN and restricts TLS to >= 1.2. If a different SSL policy is required, it should be
+By default, the google_compute_ssl_policy.default SSL policy is applied to the
+load balancer in load_balancer.tf. This sets the SSL profile to MODERN and
+restricts TLS to >= 1.2. If a different SSL policy is required, it should be
 created outside of this module and its ID passed through using this variable.
 EOI
 }
@@ -227,220 +394,338 @@ variable "enable_ipv6" {
 variable "create_ipv6_address" {
   type        = bool
   default     = false
-  description = "Allocate an IPv6 address to the load balancer if var.enable_ipv6 is true."
+  description = <<EOI
+Allocate an IPv6 address to the load balancer if var.enable_ipv6 is true.
+EOI
 }
 
-variable "service_account_id" {
-  default     = ""
+# Cloud Run pre-deploy job variables
+variable "enable_pre_deploy_job" {
   description = <<EOI
-A service account is always created for the web application. If non-empty this
-variable overrides the default service account id. The default id is formed
-from the "name" variable value with "-run" appended.
+Configure a Cloud Run Job to be executed *before* the main Cloud Run service
+is deployed. This is useful for running database migrations for example.
 EOI
+  type        = bool
+  default     = false
 }
 
-variable "service_account_display_name" {
-  default     = ""
+variable "pre_deploy_job_trigger" {
   description = <<EOI
-If non-empty, override the default display name of the webapp service account.
+When true, the pre-deploy Cloud Run job is executed via a
+null_resource-triggered gcloud command whenever Terraform detects that
+var.pre_deploy_job_container.image has changed.
 EOI
+  type        = bool
+  default     = true
 }
 
-variable "alerting_email_address" {
-  default     = ""
-  type        = string
-  description = <<EOT
-Email address for basic uptime alerts. If empty (the default) no alerting will be configured.
-Otherwise note that the project must be in a Stackdriver monitoring workspace and this must be
-configured manually (no terraform support).
-EOT
+variable "pre_deploy_job_force" {
+  description = <<EOI
+When true, and only when used in addition to var.pre_deploy_job_trigger, the
+pre-deploy Cloud Run job is executed at every terraform apply, regardless of
+# the status of var.pre_deploy_job_container.image.
+EOI
+  type        = bool
+  default     = false
 }
 
-variable "alert_notification_channels" {
-  default     = []
-  type        = list(string)
-  description = <<EOT
-Optional. A list of notification channel IDs to send uptime alerts to. The format for the channel IDs should
-be "projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]".
-EOT
+variable "pre_deploy_job_labels" {
+  description = <<EOI
+Map of key/value pairs containing labels to assign to the pre-deploy Cloud Run
+job.
+EOI
+  type        = map(string)
+  default     = null
 }
 
-variable "alerting_uptime_timeout" {
-  default     = "30s"
-  type        = string
-  description = "timeout for http polling"
+variable "pre_deploy_job_annotations" {
+  description = <<EOI
+Map of key/value pairs containing annotations to assign to the pre-deploy Cloud
+Run job.
+EOI
+  type        = map(string)
+  default     = null
 }
 
-variable "alerting_uptime_period" {
-  type        = string
-  default     = "300s"
-  description = "Frequency of uptime checks"
+variable "pre_deploy_job_parallelism" {
+  description = <<EOI
+Specifies the maximum desired number of tasks the execution should run at
+given time.
+EOI
+  type        = number
+  default     = null
 }
 
-variable "alerting_success_threshold_percent" {
+variable "pre_deploy_job_task_count" {
+  description = "Specifies the desired number of tasks the execution should run."
   type        = number
-  default     = 75
-  description = <<EOT
-If the percentage of successful uptime checks within the given uptime period falls below
-this, an alert will be triggered. Set to 100 to trigger an alert if any uptime check fails,
-set to a lower number to tolerate failures without alerting.
-
-Experience has taught us that uptime checks can fail semi-regularly due to transient
-problems outside our control, therefore we allow some leeway before triggering an alert.
-EOT
+  default     = null
 }
 
-variable "alerting_enabled" {
-  type        = bool
-  default     = true
-  description = "Whether alerting policy is enabled"
+variable "pre_deploy_job_launch_stage" {
+  description = <<EOI
+The launch stage for the pre-deploy Cloud Run job. Possible values are UNIMPLEMENTED,
+PRELAUNCH, EARLY_ACCESS, ALPHA, BETA, GA, and DEPRECATED.
+EOI
+  type        = string
+  default     = "GA"
 }
 
-variable "monitoring_path" {
-  type        = string
-  default     = "/"
-  description = "path component of url to be monitored"
+variable "pre_deploy_job_container" {
+  description = <<EOI
+Configure the container instance for the pre-deploy job. See
+https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_job#nested_containers
+for more information on these options.
+EOI
+  type = object({
+    name    = optional(string)
+    image   = optional(string)
+    command = optional(list(string))
+    args    = optional(list(string))
+    env = optional(list(object({
+      name  = string
+      value = optional(string)
+      value_source = optional(object({
+        secret_key_ref = optional(object({
+          secret  = string
+          version = optional(string, "latest")
+        }))
+      }))
+    })), [])
+    resources = optional(object({
+      limits = optional(map(string))
+    }))
+    ports = optional(list(object({
+      name           = optional(string)
+      container_port = optional(number)
+    })), [])
+    volume_mounts = optional(list(object({
+      name       = string
+      mount_path = string
+    })), [])
+    working_dir = optional(string)
+  })
+  default = null
 }
 
-variable "allowed_ingress" {
-  default     = "all"
-  description = <<EOL
-    Specify the allowed ingress to the service. Should be one of:
-    "all", "internal" or "internal-and-cloud-load-balancing".
+variable "pre_deploy_job_volumes" {
+  description = <<EOI
+Configure one or more volumes for the pre-deploy job. See
+https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_job#nested_volumes
+for more information on these options.
+EOI
+  type = list(object({
+    name = string
+    secret = optional(object({
+      secret       = string
+      default_mode = optional(number)
+      items = optional(list(object({
+        path    = string
+        version = optional(string)
+        mode    = optional(number)
+      })), [])
+    }))
+    cloud_sql_instance = optional(object({
+      instances = optional(list(string))
+    }))
+  }))
+  default = []
+}
 
-    If var.ingress_style == "load-balancer", the provided var.allowed_ingress will be ignored
-    and the allowed ingress will be set automatically to "internal-and-cloud-load-balancing".
+variable "pre_deploy_job_timeout" {
+  description = "Configure a timeout, in seconds, for the pre-deploy job."
+  type        = string
+  default     = null
+}
 
-    Setting this to a value other than "all" implies that the service will be
-    moved to the "beta" launch stage. See
-    https://cloud.google.com/run/docs/troubleshooting#launch-stage-validation.
-  EOL
+variable "pre_deploy_job_execution_environment" {
+  description = <<EOI
+The execution environment to host this task. Possible values are
+EXECUTION_ENVIRONMENT_GEN1, and EXECUTION_ENVIRONMENT_GEN2
+EOI
+  type        = string
+  default     = "EXECUTION_ENVIRONMENT_GEN2"
 }
 
-variable "service_annotations" {
-  type        = map(string)
-  default     = {}
-  description = <<EOL
-    Map containing additional annotations to be added to the Cloud Run service
-    itself.
-  EOL
+variable "pre_deploy_job_encryption_key" {
+  description = <<EOI
+The ID of a customer managed encryption key (CMEK) to use to encrypt this
+container image.
+EOI
+  type        = string
+  default     = null
 }
 
-variable "template_annotations" {
-  type        = map(string)
-  default     = {}
-  description = <<EOL
-    Map containing additional annotations to be added to the Cloud Run service
-    template.
-  EOL
+variable "pre_deploy_job_vpc_access" {
+  description = <<EOI
+Configure VPC access for the pre-deploy job. See
+https://registry.terraform.io/providers/hashicorp/google/latest/docs/resources/cloud_run_v2_job#nested_vpc_access
+for more information on these options.
+EOI
+  type = object({
+    connector = optional(string)
+    egress    = optional(string)
+    network_interfaces = optional(object({
+      network    = optional(string)
+      subnetwork = optional(string)
+      tags       = optional(string)
+    }))
+  })
+  default = null
 }
 
-variable "enable_beta_launch_stage" {
-  default     = false
-  description = "Force use of the 'BETA' launch stage for the service."
+variable "pre_deploy_job_max_retries" {
+  description = "Configure the maximum number of retries for the pre-deploy job."
+  type        = number
+  default     = null
 }
 
-variable "disable_monitoring" {
-  default     = false
-  description = <<-EOL
-    Optional. If true, do not create uptime checks. This is useful if, for
-    example, the service is configured to require authenticated invocations.
+variable "pre_deploy_job_mount_cloudsql_instance" {
+  description = <<EOI
+Mount a CloudSQL instance in the pre-deploy job container. This is a
+convenience variable to simplify mounting a Cloud SQL instance. However, if
+you require more control over this you should define it directly in
+var.pre_deploy_job_container instead.
+EOI
+  type        = string
+  default     = null
+}
 
-    Note that this is different from not specifying an alerting email address.
-    If no alerting email address is specified the uptime checks are still
-    created, they just don't alert if they fail.
-EOL
+# Monitoring and alerting
+variable "monitoring_scoping_project" {
+  description = <<EOI
+The ID of a Cloud Monitoring scoping project to create monitoring resources
+in. If omitted, var.project will be used instead.
+EOI
+  type        = string
+  default     = null
 }
 
-variable "create_monitoring_dashboard" {
+variable "enable_alerting" {
   type        = bool
-  default     = false
-  description = <<-EOL
-    Optional. Determines whether to create the monitoring dashboard defined in ./dashboard.json
-    for the provisioned Cloud Run service. Defaults to false.
-EOL
+  default     = true
+  description = "Enable alerting policies."
 }
 
-variable "enable_static_egress_ip" {
-  default     = false
-  type        = bool
-  description = <<-EOL
-    Whether to assign a static ip for egress from this cloud run instance. If enabled the
-    service "vpcaccess.googleapis.com" must also be enabled on the project.
-EOL
+variable "alerting_notification_channels" {
+  default     = []
+  type        = list(string)
+  description = <<EOI
+A list of notification channel IDs to send uptime alerts to. The format for
+the channel IDs should be
+"projects/[PROJECT_ID]/notificationChannels/[CHANNEL_ID]".
+EOI
 }
 
-variable "static_egress_ip_cidr_range" {
-  default     = "10.124.0.0/28"
-  description = "The cidr range used to create a subnet that this cloud run will use if assigned a static ip"
+variable "alerting_uptime_timeout" {
+  default     = "30s"
+  type        = string
+  description = "Timeout for http polling."
 }
 
-variable "min_ports_per_vm" {
-  default     = 64
+variable "alerting_uptime_period" {
+  type        = string
+  default     = "300s"
+  description = "Frequency of uptime checks"
+}
+
+variable "alerting_success_threshold_percent" {
   type        = number
-  description = <<-EOL
-    When using Cloud NAT to provide an egress route, Cloud NAT's minimum ports per VM
-    can be configured to determine how many concurrent connections can be established
-    to the same destination IP address and port.
-EOL
+  default     = 75
+  description = <<EOI
+If the percentage of successful uptime checks within the given uptime period
+falls below this, an alert will be triggered. Set to 100 to trigger an alert
+if any uptime check fails, set to a lower number to tolerate failures without
+alerting.
+
+Experience has taught us that uptime checks can fail semi-regularly due to
+transient problems outside our control, therefore we allow some leeway before
+triggering an alert.
+EOI
 }
 
-variable "auth_proxy_egress_connector" {
-  type        = string
-  default     = ""
-  description = <<-EOL
-    When an auth proxy Function is created for uptime check of internal services, a VPC connector
-    should be provided to route the Function's egress traffic through it to reach the webapp
-    Cloud Run service.
+variable "enable_monitoring" {
+  default     = false
+  type        = bool
+  description = <<EOI
+Optional. If true, create uptime and SSL expiry checks.
 
-    If static IP is enabled, its configured VPC connector will be used instead.
-EOL
+Note that this is different from not specifying an alerting email address. If
+no alerting email address is specified the uptime checks are still created,
+they just don't alert if they fail.
+EOI
 }
 
-variable "secrets_volume" {
-  type = list(object({
-    name    = string # name of the file that is going to be mounted
-    path    = string # name of the directory of the volume mount. e.g. "/secrets". Must be unique for each secret.
-    id      = string # id of the GCP Cloud Secret Manager secret
-    version = string # version of the GCP Cloud Secret Manager secret, defaults to "latest" if empty
-  }))
+variable "monitoring_path" {
+  type        = string
+  default     = "/"
+  description = "Path component of url to be monitored."
+}
 
-  default     = []
-  description = <<-EOL
-    Optional. If containing a list of maps specifying a Secret Manager secret ID,
-    a name and a secret version, will create named files from the secrets.
-EOL
+# Service account variables
+variable "service_account_id" {
+  default     = ""
+  type        = string
+  description = <<EOI
+A service account is always created for the web application. If non-empty this
+variable overrides the default service account id. The default id is formed
+from the "name" variable value with "-run" appended.
+EOI
 }
 
-variable "secrets_envars" {
-  type = list(object({
-    name    = string # name of the environment variables
-    id      = string # id of the GCP Cloud Secret Manager secret
-    version = string # version of the GCP Cloud Secret Manager secret, defaults to "latest" if empty
-  }))
+variable "service_account_display_name" {
+  default     = ""
+  type        = string
+  description = <<EOI
+If non-empty, override the default display name of the webapp service account.
+EOI
+}
 
-  default     = []
-  description = <<-EOL
-    Optional. If containing a list of maps specifying a Secret Manager secret ID,
-    a name and a secret version, will create named environment variables from the
-    secrets.
-EOL
+# Static egress variables
+variable "enable_static_egress_ip" {
+  default     = false
+  type        = bool
+  description = <<EOI
+Whether to assign a static ip for egress from this cloud run instance. If
+enabled, the "vpcaccess.googleapis.com" API must also be enabled on the
+project.
+EOI
 }
 
-variable "image_name" {
+variable "static_egress_ip_cidr_range" {
+  default     = "10.124.0.0/28"
   type        = string
-  description = "The URL of the container image to be deployed."
-  validation {
-    condition     = length(var.image_name) > 0
-    error_message = "The image_name value must be a valid URL to a container image."
-  }
+  description = <<EOI
+The cidr range used to create a subnet that this cloud run will use if assigned
+a static ip
+EOI
+}
+
+# tflint-ignore: terraform_unused_declarations
+variable "static_egress_ip_subnetwork_id" {
+  description = <<EOI
+When using an existing VPC Access Connector with the static egress IP
+configuration an existing subnetwork must be provided.
+EOI
+  type        = string
+  default     = null
+}
+
+variable "min_ports_per_vm" {
+  default     = 64
+  type        = number
+  description = <<EOI
+When using Cloud NAT to provide an egress route, Cloud NAT's minimum ports per
+VM can be configured to determine how many concurrent connections can be
+established to the same destination IP address and port.
+EOI
 }
 
 variable "vpc_access_connector_max_throughput" {
   type        = number
   default     = 300
-  description = <<-EOL
-    Optional. The maximum throughput of the connector in megabytes per second.
-    Defaults to 300.
-EOL
-}
\ No newline at end of file
+  description = <<EOI
+Optional. The maximum throughput of the connector in megabytes per second.
+Defaults to 300.
+EOI
+}
diff --git a/versions.tf b/versions.tf
deleted file mode 100644
index 6540a891bd2ad7f6ee891798aeaf5dc48ae405b3..0000000000000000000000000000000000000000
--- a/versions.tf
+++ /dev/null
@@ -1,19 +0,0 @@
-# versions.tf defines minimum provider versions for the module
-
-terraform {
-  required_providers {
-    google = {
-      source  = "hashicorp/google"
-      version = ">= 3.70, < 5.0"
-      configuration_aliases = [
-        google.stackdriver
-      ]
-    }
-    google-beta = {
-      source  = "hashicorp/google-beta"
-      version = ">= 3.70, < 5.0"
-    }
-  }
-
-  required_version = ">= 1.0, < 2.0"
-}
diff --git a/versions.tf.json b/versions.tf.json
new file mode 100644
index 0000000000000000000000000000000000000000..08319541c3441ca6e824cfe2b5054d9370f965dc
--- /dev/null
+++ b/versions.tf.json
@@ -0,0 +1,15 @@
+{
+  "terraform": {
+    "required_version": "~> 1.3",
+    "required_providers": {
+      "google": {
+        "source": "hashicorp/google",
+        "version": ">= 4.0"
+      },
+      "null": {
+        "source": "hashicorp/null",
+        "version": "~> 3.0"
+      }
+    }
+  }
+}