wrangler.toml config questions

Hi there!

I just wanted to make sure that I setup my workers / understood how the wrangler.toml config actually works.

Here is my current config:
compatibility_date = "2023-10-30"
send_metrics = false
node_compat = true
main = "src/worker.ts"
account_id = "xxx-xxx"


# Staging
[env.staging]
name = "api-staging"
workers_dev = true
vars = { ENVIRONMENT = "staging" }
routes = [{ pattern = "staging.example.app", custom_domain = true }]
d1_databases = [
    { binding = "DB", database_name = "db-staging", database_id = "xxx-xxx", migrations_dir = "migrations", preview_database_id = "DB_STAGING" },
]


# Production
[env.production]
name = "api-production"
workers_dev = false
vars = { ENVIRONMENT = "production" }
routes = [{ pattern = "example.app", custom_domain = false }]
d1_databases = [
    { binding = "DB", database_name = "db-production", database_id = "xxx-xxx", migrations_dir = "migrations", preview_database_id = "DB_PRODUCTION"},
]

[[d1_databases]]
binding = "DB"
database_name = "api-staging"
database_id = "xxx_xxx" # same as the staging database_id
migrations_dir = "migrations"
preview_database_id = "DB_LOCAL"


  1. No "name" at top level


Since I didn't provide a name at the top level, I will always deploy my workers with the -e flag - same goes for migrations.

This way I don't end up with a top level worker, but only get "api-staging" and "api-production"


  1. [[d1_databases]] - last one in the config


This is used for the local dev wrangler if I'm not mistaken - but how exactly?

Currently I just copied whatever I have in the config for the staging environment - but this doesn't "point" to that worker/d1 instance, does it?

I could literally change everything for this last config and just fix my scripts accordingly.

So for example if I change the database_name to "this_is_random"

then my local migration script would look something like:
migrate:local": "wrangler d1 migrations apply this_is_random --local
Was this page helpful?