diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 78432bf7..83129057 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -8,7 +8,7 @@ env: DATABASE_URL: postgres://postgres:postgres@127.0.0.1:5432/river_dev?sslmode=disable # Test database. - TEST_DATABASE_URL: postgres://postgres:postgres@127.0.0.1:5432/river_test?sslmode=disable + TEST_DATABASE_URL: postgres://postgres:postgres@127.0.0.1:5432/river_test?pool_max_conns=15&sslmode=disable on: push: @@ -42,6 +42,13 @@ jobs: postgres: image: postgres:${{ matrix.postgres-version }} env: + # Left as a reminder that it might not be a bad idea to increase max + # connections and then increase the maximum allowed in the databaes + # pools for each package under tests. This config is only supported on + # Postgres 16+ though, and changing Postgres configuration on any + # version before that is absurdly difficult through Docker, so it + # might be worth just waiting until <16 have rolled off. + # POSTGRES_INITDB_ARGS: "-c max_connections=1500" POSTGRES_PASSWORD: postgres options: >- --health-cmd pg_isready @@ -62,42 +69,11 @@ jobs: - name: Display Go version run: go version - - name: Set up test DBs - run: go run ./internal/cmd/testdbman create - env: - PGHOST: 127.0.0.1 - PGPORT: 5432 - PGUSER: postgres - PGPASSWORD: postgres - PGSSLMODE: disable + - name: Set up database + run: psql -c "CREATE DATABASE river_test" $ADMIN_DATABASE_URL - name: Test - working-directory: . - run: go test -p 1 -race ./... -timeout 2m - - - name: Test cmd/river - working-directory: ./cmd/river - run: go test -race ./... -timeout 2m - - - name: Test riverdriver - working-directory: ./riverdriver - run: go test -race ./... -timeout 2m - - - name: Test riverdriver/riverdatabasesql - working-directory: ./riverdriver/riverdatabasesql - run: go test -race ./... -timeout 2m - - - name: Test riverdriver/riverpgxv5 - working-directory: ./riverdriver/riverpgxv5 - run: go test -race ./... -timeout 2m - - - name: Test rivershared - working-directory: ./rivershared - run: go test -race ./... -timeout 2m - - - name: Test rivertype - working-directory: ./rivertype - run: go test -race ./... -timeout 2m + run: make test/race cli: strategy: @@ -157,21 +133,16 @@ jobs: - run: river migrate-get --all --exclude-version 1 --up - - name: river migrate-up - run: river migrate-up --database-url $DATABASE_URL - shell: bash + - run: river migrate-up --database-url $DATABASE_URL + shell: bash # needed for windows to interpret env var - - name: river migrate-list - run: river migrate-list --database-url $DATABASE_URL + - run: river migrate-list --database-url $DATABASE_URL shell: bash - - name: river validate - run: river validate --database-url $DATABASE_URL + - run: river validate --database-url $DATABASE_URL shell: bash - - name: river version - run: river version - shell: bash + - run: river version - name: river bench run: | @@ -196,8 +167,7 @@ jobs: fi shell: bash - - name: river migrate-down - run: river migrate-down --database-url $DATABASE_URL --max-steps 100 + - run: river migrate-down --database-url $DATABASE_URL --max-steps 100 shell: bash - name: river validate (expect failure) @@ -214,6 +184,32 @@ jobs: fi shell: bash + # + # Run the whole migration loop again, this time with a custom schema. + # + + - run: echo "CUSTOM_SCHEMA=custom_schema" >> $GITHUB_ENV + shell: bash + + - run: psql -c "CREATE SCHEMA $CUSTOM_SCHEMA" $DATABASE_URL + shell: bash + + - run: river migrate-up --database-url $DATABASE_URL --schema $CUSTOM_SCHEMA + shell: bash + + - run: river validate --database-url $DATABASE_URL --schema $CUSTOM_SCHEMA + shell: bash + + - run: river migrate-down --database-url $DATABASE_URL --max-steps 100 --schema $CUSTOM_SCHEMA + shell: bash + + - name: river validate (expect failure) + run: | + if river validate --database-url $DATABASE_URL --schema $CUSTOM_SCHEMA; then + echo "expected non-zero exit code" && exit 1 + fi + shell: bash + golangci: name: lint runs-on: ubuntu-latest diff --git a/.golangci.yaml b/.golangci.yaml index a5773f09..7122745a 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -88,6 +88,7 @@ linters: - r - sb # common convention for string builder - t + - tb - tt # common convention for table tests - tx - w diff --git a/CHANGELOG.md b/CHANGELOG.md index 98cff95c..2478a398 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,10 +7,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +⚠️ Internal APIs used for communication between River and River Pro have changed. If using River Pro, make sure to update River and River Pro to latest at the same time to get compatible versions. + ### Added - Added `river/riverlog` containing middleware that injects a context logger to workers that collates log output and persists it with job metadata. This is paired with a River UI enhancement that shows logs in the UI. [PR #844](https://github.com/riverqueue/river/pull/844). - Added `JobInsertMiddlewareFunc` and `WorkerMiddlewareFunc` to easily implement middleware with a function instead of a struct. [PR #844](https://github.com/riverqueue/river/pull/844). +- Added `Config.Schema` which lets a non-default schema be injected explicitly into a River client that'll be used for all database operations. This may be particularly useful for proxies like PgBouncer that may not respect a schema configured in `search_path`. [PR #848](https://github.com/riverqueue/river/pull/848). ### Changed diff --git a/Makefile b/Makefile index 1dd7bac4..78b7ce60 100644 --- a/Makefile +++ b/Makefile @@ -53,14 +53,14 @@ $(foreach mod,$(submodules),$(eval $(call lint-target,$(mod)))) .PHONY: test test:: ## Run test suite for all submodules define test-target - test:: ; cd $1 && go test ./... -p 1 + test:: ; cd $1 && go test ./... -timeout 2m endef $(foreach mod,$(submodules),$(eval $(call test-target,$(mod)))) .PHONY: test/race test/race:: ## Run test suite for all submodules with race detector define test-race-target - test/race:: ; cd $1 && go test ./... -p 1 -race + test/race:: ; cd $1 && go test ./... -race -timeout 2m endef $(foreach mod,$(submodules),$(eval $(call test-race-target,$(mod)))) diff --git a/client.go b/client.go index 4c9ad792..0f485ae3 100644 --- a/client.go +++ b/client.go @@ -282,12 +282,12 @@ type Config struct { // Defaults to DefaultRetryPolicy. RetryPolicy ClientRetryPolicy - // schema is a non-standard schema where River tables are located. All table + // Schema is a non-standard Schema where River tables are located. All table // references in database queries will use this value as a prefix. // // Defaults to empty, which causes the client to look for tables using the // setting of Postgres `search_path`. - schema string + Schema string // SkipUnknownJobCheck is a flag to control whether the client should skip // checking to see if a registered worker exists in the client's worker bundle @@ -383,7 +383,7 @@ func (c *Config) WithDefaults() *Config { ReindexerSchedule: c.ReindexerSchedule, RescueStuckJobsAfter: valutil.ValOrDefault(c.RescueStuckJobsAfter, rescueAfter), RetryPolicy: retryPolicy, - schema: c.schema, + Schema: c.Schema, SkipUnknownJobCheck: c.SkipUnknownJobCheck, Test: c.Test, TestOnly: c.TestOnly, @@ -431,6 +431,14 @@ func (c *Config) validate() error { return errors.New("RescueStuckJobsAfter cannot be less than JobTimeout") } + // Max Postgres notification topic length is 63 and we prefix schema to + // notification topic, so whatever schema the user specifies must fit inside + // this convention. + maxSchemaLength := 63 - 1 - len(string(notifier.NotificationTopicLongest)) // -1 for the dot in `.` + if len(c.Schema) > maxSchemaLength { + return fmt.Errorf("Schema length must be less than or equal to %d characters", maxSchemaLength) //nolint:staticcheck + } + for queue, queueConfig := range c.Queues { if err := queueConfig.validate(queue); err != nil { return err @@ -672,6 +680,10 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client client.pilot.PilotInit(archetype) pluginPilot, _ := client.pilot.(pilotPlugin) + if withBaseService, ok := config.RetryPolicy.(baseservice.WithBaseService); ok { + baseservice.Init(archetype, withBaseService) + } + // There are a number of internal components that are only needed/desired if // we're actually going to be working jobs (as opposed to just enqueueing // them): @@ -680,7 +692,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client return nil, errMissingDatabasePoolWithQueues } - client.completer = jobcompleter.NewBatchCompleter(archetype, driver.GetExecutor(), client.pilot, nil) + client.completer = jobcompleter.NewBatchCompleter(archetype, config.Schema, driver.GetExecutor(), client.pilot, nil) client.subscriptionManager = newSubscriptionManager(archetype, nil) client.services = append(client.services, client.completer, client.subscriptionManager) @@ -689,7 +701,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client // uses listen/notify. Instead, each service polls for changes it's // interested in. e.g. Elector polls to see if leader has expired. if !config.PollOnly { - client.notifier = notifier.New(archetype, driver.GetListener(config.schema)) + client.notifier = notifier.New(archetype, driver.GetListener(config.Schema)) client.services = append(client.services, client.notifier) } } else { @@ -698,6 +710,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client client.elector = leadership.NewElector(archetype, driver.GetExecutor(), client.notifier, &leadership.Config{ ClientID: config.ID, + Schema: config.Schema, }) client.services = append(client.services, client.elector) @@ -726,7 +739,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client CancelledJobRetentionPeriod: config.CancelledJobRetentionPeriod, CompletedJobRetentionPeriod: config.CompletedJobRetentionPeriod, DiscardedJobRetentionPeriod: config.DiscardedJobRetentionPeriod, - Schema: config.schema, + Schema: config.Schema, Timeout: config.JobCleanerTimeout, }, driver.GetExecutor()) maintenanceServices = append(maintenanceServices, jobCleaner) @@ -737,7 +750,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client jobRescuer := maintenance.NewRescuer(archetype, &maintenance.JobRescuerConfig{ ClientRetryPolicy: config.RetryPolicy, RescueAfter: config.RescueStuckJobsAfter, - Schema: config.schema, + Schema: config.Schema, WorkUnitFactoryFunc: func(kind string) workunit.WorkUnitFactory { if workerInfo, ok := config.Workers.workersMap[kind]; ok { return workerInfo.workUnitFactory @@ -753,7 +766,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client jobScheduler := maintenance.NewJobScheduler(archetype, &maintenance.JobSchedulerConfig{ Interval: config.schedulerInterval, NotifyInsert: client.maybeNotifyInsertForQueues, - Schema: config.schema, + Schema: config.Schema, }, driver.GetExecutor()) maintenanceServices = append(maintenanceServices, jobScheduler) client.testSignals.jobScheduler = &jobScheduler.TestSignals @@ -774,7 +787,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client { queueCleaner := maintenance.NewQueueCleaner(archetype, &maintenance.QueueCleanerConfig{ RetentionPeriod: maintenance.QueueRetentionPeriodDefault, - Schema: config.schema, + Schema: config.Schema, }, driver.GetExecutor()) maintenanceServices = append(maintenanceServices, queueCleaner) client.testSignals.queueCleaner = &queueCleaner.TestSignals @@ -788,7 +801,7 @@ func NewClient[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Client reindexer := maintenance.NewReindexer(archetype, &maintenance.ReindexerConfig{ ScheduleFunc: scheduleFunc, - Schema: config.schema, + Schema: config.Schema, }, driver.GetExecutor()) maintenanceServices = append(maintenanceServices, reindexer) client.testSignals.reindexer = &reindexer.TestSignals @@ -1264,7 +1277,7 @@ func (c *Client[TTx]) jobCancel(ctx context.Context, exec riverdriver.Executor, ID: jobID, CancelAttemptedAt: c.baseService.Time.NowUTC(), ControlTopic: string(notifier.NotificationTopicControl), - Schema: c.config.schema, + Schema: c.config.Schema, }) } @@ -1274,7 +1287,7 @@ func (c *Client[TTx]) jobCancel(ctx context.Context, exec riverdriver.Executor, func (c *Client[TTx]) JobDelete(ctx context.Context, id int64) (*rivertype.JobRow, error) { return c.driver.GetExecutor().JobDelete(ctx, &riverdriver.JobDeleteParams{ ID: id, - Schema: c.config.schema, + Schema: c.config.Schema, }) } @@ -1287,7 +1300,7 @@ func (c *Client[TTx]) JobDelete(ctx context.Context, id int64) (*rivertype.JobRo func (c *Client[TTx]) JobDeleteTx(ctx context.Context, tx TTx, id int64) (*rivertype.JobRow, error) { return c.driver.UnwrapExecutor(tx).JobDelete(ctx, &riverdriver.JobDeleteParams{ ID: id, - Schema: c.config.schema, + Schema: c.config.Schema, }) } @@ -1296,7 +1309,7 @@ func (c *Client[TTx]) JobDeleteTx(ctx context.Context, tx TTx, id int64) (*river func (c *Client[TTx]) JobGet(ctx context.Context, id int64) (*rivertype.JobRow, error) { return c.driver.GetExecutor().JobGetByID(ctx, &riverdriver.JobGetByIDParams{ ID: id, - Schema: c.config.schema, + Schema: c.config.Schema, }) } @@ -1306,7 +1319,7 @@ func (c *Client[TTx]) JobGet(ctx context.Context, id int64) (*rivertype.JobRow, func (c *Client[TTx]) JobGetTx(ctx context.Context, tx TTx, id int64) (*rivertype.JobRow, error) { return c.driver.UnwrapExecutor(tx).JobGetByID(ctx, &riverdriver.JobGetByIDParams{ ID: id, - Schema: c.config.schema, + Schema: c.config.Schema, }) } @@ -1321,7 +1334,7 @@ func (c *Client[TTx]) JobGetTx(ctx context.Context, tx TTx, id int64) (*rivertyp func (c *Client[TTx]) JobRetry(ctx context.Context, id int64) (*rivertype.JobRow, error) { return c.driver.GetExecutor().JobRetry(ctx, &riverdriver.JobRetryParams{ ID: id, - Schema: c.config.schema, + Schema: c.config.Schema, }) } @@ -1341,7 +1354,7 @@ func (c *Client[TTx]) JobRetry(ctx context.Context, id int64) (*rivertype.JobRow func (c *Client[TTx]) JobRetryTx(ctx context.Context, tx TTx, id int64) (*rivertype.JobRow, error) { return c.driver.UnwrapExecutor(tx).JobRetry(ctx, &riverdriver.JobRetryParams{ ID: id, - Schema: c.config.schema, + Schema: c.config.Schema, }) } @@ -1610,7 +1623,7 @@ func (c *Client[TTx]) insertMany(ctx context.Context, tx riverdriver.ExecutorTx, return c.insertManyShared(ctx, tx, insertParams, func(ctx context.Context, insertParams []*riverdriver.JobInsertFastParams) ([]*rivertype.JobInsertResult, error) { results, err := c.pilot.JobInsertMany(ctx, tx, &riverdriver.JobInsertFastManyParams{ Jobs: insertParams, - Schema: c.config.schema, + Schema: c.config.Schema, }) if err != nil { return nil, err @@ -1774,16 +1787,16 @@ func (c *Client[TTx]) InsertManyFastTx(ctx context.Context, tx TTx, params []Ins return c.insertManyFast(ctx, exec, params) } -func (c *Client[TTx]) insertManyFast(ctx context.Context, tx riverdriver.ExecutorTx, params []InsertManyParams) (int, error) { +func (c *Client[TTx]) insertManyFast(ctx context.Context, execTx riverdriver.ExecutorTx, params []InsertManyParams) (int, error) { insertParams, err := c.insertManyParams(params) if err != nil { return 0, err } - results, err := c.insertManyShared(ctx, tx, insertParams, func(ctx context.Context, insertParams []*riverdriver.JobInsertFastParams) ([]*rivertype.JobInsertResult, error) { - count, err := tx.JobInsertFastManyNoReturning(ctx, &riverdriver.JobInsertFastManyParams{ + results, err := c.insertManyShared(ctx, execTx, insertParams, func(ctx context.Context, insertParams []*riverdriver.JobInsertFastParams) ([]*rivertype.JobInsertResult, error) { + count, err := execTx.JobInsertFastManyNoReturning(ctx, &riverdriver.JobInsertFastManyParams{ Jobs: insertParams, - Schema: c.config.schema, + Schema: c.config.Schema, }) if err != nil { return nil, err @@ -1827,7 +1840,7 @@ func (c *Client[TTx]) maybeNotifyInsertForQueues(ctx context.Context, tx riverdr err := tx.NotifyMany(ctx, &riverdriver.NotifyManyParams{ Payload: payloads, - Schema: c.config.schema, + Schema: c.config.Schema, Topic: string(notifier.NotificationTopicInsert), }) if err != nil { @@ -1858,7 +1871,7 @@ func (c *Client[TTx]) notifyQueuePauseOrResume(ctx context.Context, tx riverdriv err = tx.NotifyMany(ctx, &riverdriver.NotifyManyParams{ Payload: []string{string(payload)}, - Schema: c.config.schema, + Schema: c.config.Schema, Topic: string(notifier.NotificationTopicControl), }) if err != nil { @@ -1905,7 +1918,7 @@ func (c *Client[TTx]) addProducer(queueName string, queueConfig QueueConfig) *pr QueueEventCallback: c.subscriptionManager.distributeQueueEvent, RetryPolicy: c.config.RetryPolicy, SchedulerInterval: c.config.schedulerInterval, - Schema: c.config.schema, + Schema: c.config.Schema, StaleProducerRetentionPeriod: 5 * time.Minute, Workers: c.config.Workers, }) @@ -1955,6 +1968,8 @@ func (c *Client[TTx]) JobList(ctx context.Context, params *JobListParams) (*JobL if params == nil { params = NewJobListParams() } + params.schema = c.config.Schema + dbParams, err := params.toDBParams() if err != nil { return nil, err @@ -1984,6 +1999,7 @@ func (c *Client[TTx]) JobListTx(ctx context.Context, tx TTx, params *JobListPara if params == nil { params = NewJobListParams() } + params.schema = c.config.Schema dbParams, err := params.toDBParams() if err != nil { @@ -2024,7 +2040,7 @@ func (c *Client[TTx]) Queues() *QueueBundle { return c.queues } func (c *Client[TTx]) QueueGet(ctx context.Context, name string) (*rivertype.Queue, error) { return c.driver.GetExecutor().QueueGet(ctx, &riverdriver.QueueGetParams{ Name: name, - Schema: c.config.schema, + Schema: c.config.Schema, }) } @@ -2036,7 +2052,7 @@ func (c *Client[TTx]) QueueGet(ctx context.Context, name string) (*rivertype.Que func (c *Client[TTx]) QueueGetTx(ctx context.Context, tx TTx, name string) (*rivertype.Queue, error) { return c.driver.UnwrapExecutor(tx).QueueGet(ctx, &riverdriver.QueueGetParams{ Name: name, - Schema: c.config.schema, + Schema: c.config.Schema, }) } @@ -2065,7 +2081,7 @@ func (c *Client[TTx]) QueueList(ctx context.Context, params *QueueListParams) (* queues, err := c.driver.GetExecutor().QueueList(ctx, &riverdriver.QueueListParams{ Limit: int(params.paginationCount), - Schema: c.config.schema, + Schema: c.config.Schema, }) if err != nil { return nil, err @@ -2092,7 +2108,7 @@ func (c *Client[TTx]) QueueListTx(ctx context.Context, tx TTx, params *QueueList queues, err := c.driver.UnwrapExecutor(tx).QueueList(ctx, &riverdriver.QueueListParams{ Limit: int(params.paginationCount), - Schema: c.config.schema, + Schema: c.config.Schema, }) if err != nil { return nil, err @@ -2121,7 +2137,7 @@ func (c *Client[TTx]) QueuePause(ctx context.Context, name string, opts *QueuePa if err := tx.QueuePause(ctx, &riverdriver.QueuePauseParams{ Name: name, - Schema: c.config.schema, + Schema: c.config.Schema, }); err != nil { return err } @@ -2149,7 +2165,7 @@ func (c *Client[TTx]) QueuePauseTx(ctx context.Context, tx TTx, name string, opt if err := executorTx.QueuePause(ctx, &riverdriver.QueuePauseParams{ Name: name, - Schema: c.config.schema, + Schema: c.config.Schema, }); err != nil { return err } @@ -2182,7 +2198,7 @@ func (c *Client[TTx]) QueueResume(ctx context.Context, name string, opts *QueueP if err := tx.QueueResume(ctx, &riverdriver.QueueResumeParams{ Name: name, - Schema: c.config.schema, + Schema: c.config.Schema, }); err != nil { return err } @@ -2211,7 +2227,7 @@ func (c *Client[TTx]) QueueResumeTx(ctx context.Context, tx TTx, name string, op if err := executorTx.QueueResume(ctx, &riverdriver.QueueResumeParams{ Name: name, - Schema: c.config.schema, + Schema: c.config.Schema, }); err != nil { return err } @@ -2266,6 +2282,7 @@ func (c *Client[TTx]) queueUpdate(ctx context.Context, executorTx riverdriver.Ex Metadata: params.Metadata, MetadataDoUpdate: updateMetadata, Name: name, + Schema: c.config.Schema, }) if err != nil { return nil, err @@ -2283,7 +2300,7 @@ func (c *Client[TTx]) queueUpdate(ctx context.Context, executorTx riverdriver.Ex if err := executorTx.NotifyMany(ctx, &riverdriver.NotifyManyParams{ Payload: []string{string(payload)}, - Schema: c.config.schema, + Schema: c.config.Schema, Topic: string(notifier.NotificationTopicControl), }); err != nil { return nil, err diff --git a/client_test.go b/client_test.go index 345c3a74..6cc6a838 100644 --- a/client_test.go +++ b/client_test.go @@ -32,6 +32,7 @@ import ( "github.com/riverqueue/river/internal/riverinternaltest" "github.com/riverqueue/river/internal/riverinternaltest/retrypolicytest" "github.com/riverqueue/river/internal/util/dbutil" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverdatabasesql" "github.com/riverqueue/river/riverdriver/riverpgxv5" @@ -157,9 +158,7 @@ func newTestClient(t *testing.T, dbPool *pgxpool.Pool, config *Config) *Client[p func startClient[TTx any](ctx context.Context, t *testing.T, client *Client[TTx]) { t.Helper() - if err := client.Start(ctx); err != nil { - require.NoError(t, err) - } + require.NoError(t, client.Start(ctx)) t.Cleanup(func() { ctx, cancel := context.WithTimeout(ctx, 5*time.Second) @@ -171,9 +170,18 @@ func startClient[TTx any](ctx context.Context, t *testing.T, client *Client[TTx] func runNewTestClient(ctx context.Context, t *testing.T, config *Config) *Client[pgx.Tx] { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + config.Schema = schema + + // TODO(brandur): It'd be better if we could reuse the driver object here, + // but it'll take a lot of unwinding of all these test helpers. client := newTestClient(t, dbPool, config) startClient(ctx, t, client) + return client } @@ -200,18 +208,25 @@ func Test_Client(t *testing.T) { type testBundle struct { config *Config dbPool *pgxpool.Pool + schema string } // Alternate setup returning only client Config rather than a full Client. setupConfig := func(t *testing.T) (*Config, *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPoolClone(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema return config, &testBundle{ config: config, dbPool: dbPool, + schema: schema, } } @@ -219,6 +234,10 @@ func Test_Client(t *testing.T) { t.Helper() config, bundle := setupConfig(t) + + // TODO(brandur): It'd be better if we could reuse the driver object + // from setupConfig here, but it'll take a lot of unwinding of all + // these test helpers. return newTestClient(t, bundle.dbPool, config), bundle } @@ -463,11 +482,13 @@ func Test_Client(t *testing.T) { // _outside of_ a transaction. The exact same test logic applies to each case, // the only difference is a different cancelFunc provided by the specific // subtest. - cancelRunningJobTestHelper := func(t *testing.T, config *Config, cancelFunc func(ctx context.Context, dbPool *pgxpool.Pool, client *Client[pgx.Tx], jobID int64) (*rivertype.JobRow, error)) { //nolint:thelper - defaultConfig, bundle := setupConfig(t) - if config == nil { - config = defaultConfig + cancelRunningJobTestHelper := func(t *testing.T, configMutate func(config *Config), cancelFunc func(ctx context.Context, dbPool *pgxpool.Pool, client *Client[pgx.Tx], jobID int64) (*rivertype.JobRow, error)) { //nolint:thelper + config, bundle := setupConfig(t) + + if configMutate != nil { + configMutate(config) } + client := newTestClient(t, bundle.dbPool, config) jobStartedChan := make(chan int64) @@ -522,9 +543,11 @@ func Test_Client(t *testing.T) { t.Run("CancelRunningJobWithLongPollInterval", func(t *testing.T) { t.Parallel() - config := newTestConfig(t, nil) - config.FetchPollInterval = 60 * time.Second - cancelRunningJobTestHelper(t, config, func(ctx context.Context, dbPool *pgxpool.Pool, client *Client[pgx.Tx], jobID int64) (*rivertype.JobRow, error) { + configMutate := func(config *Config) { + config.FetchPollInterval = 60 * time.Second + } + + cancelRunningJobTestHelper(t, configMutate, func(ctx context.Context, dbPool *pgxpool.Pool, client *Client[pgx.Tx], jobID int64) (*rivertype.JobRow, error) { return client.JobCancel(ctx, jobID) }) }) @@ -585,7 +608,7 @@ func Test_Client(t *testing.T) { // Cancel an unknown job ID, within a transaction: err = dbutil.WithTx(ctx, client.driver.GetExecutor(), func(ctx context.Context, exec riverdriver.ExecutorTx) error { - jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{ID: 0, Schema: client.config.schema}) + jobAfter, err := exec.JobCancel(ctx, &riverdriver.JobCancelParams{ID: 0, Schema: client.config.Schema}) require.ErrorIs(t, err, ErrNotFound) require.Nil(t, jobAfter) return nil @@ -606,6 +629,7 @@ func Test_Client(t *testing.T) { require.NoError(t, err) t.Cleanup(dbPool.Close) + bundle.config.Schema = "" client, err := NewClient(riverpgxv5.New(dbPool), bundle.config) require.NoError(t, err) @@ -1009,7 +1033,9 @@ func Test_Client(t *testing.T) { config, bundle := setupConfig(t) client := newTestClient(t, bundle.dbPool, config) - queue := testfactory.Queue(ctx, t, client.driver.GetExecutor(), nil) + queue := testfactory.Queue(ctx, t, client.driver.GetExecutor(), &testfactory.QueueOpts{ + Schema: config.Schema, + }) tx, err := bundle.dbPool.Begin(ctx) require.NoError(t, err) @@ -1036,7 +1062,7 @@ func Test_Client(t *testing.T) { t.Run("PausedBeforeStart", func(t *testing.T) { t.Parallel() - client, _ := setup(t) + client, bundle := setup(t) jobStartedChan := make(chan int64) @@ -1050,7 +1076,9 @@ func Test_Client(t *testing.T) { })) // Ensure queue record exists: - queue := testfactory.Queue(ctx, t, client.driver.GetExecutor(), nil) + queue := testfactory.Queue(ctx, t, client.driver.GetExecutor(), &testfactory.QueueOpts{ + Schema: bundle.schema, + }) // Pause only the default queue: require.NoError(t, client.QueuePause(ctx, queue.Name, nil)) @@ -1245,7 +1273,7 @@ func Test_Client_Stop(t *testing.T) { } } - t.Run("no jobs in progress", func(t *testing.T) { + t.Run("NoJobsInProgress", func(t *testing.T) { t.Parallel() client := runNewTestClient(ctx, t, newTestConfig(t, nil)) @@ -1256,7 +1284,7 @@ func Test_Client_Stop(t *testing.T) { require.NoError(t, client.Stop(ctx)) }) - t.Run("jobs in progress, completing promptly", func(t *testing.T) { + t.Run("JobsInProgressCompletingPromptly", func(t *testing.T) { t.Parallel() require := require.New(t) doneCh := make(chan struct{}) @@ -1274,7 +1302,7 @@ func Test_Client_Stop(t *testing.T) { var startedJobID int64 select { case startedJobID = <-startedCh: - case <-time.After(500 * time.Millisecond): + case <-time.After(1 * time.Second): t.Fatal("timed out waiting for job to start") } require.Equal(insertRes.Job.ID, startedJobID) @@ -1289,7 +1317,7 @@ func Test_Client_Stop(t *testing.T) { require.NoError(client.Stop(ctx)) }) - t.Run("jobs in progress, failing to complete before stop context", func(t *testing.T) { + t.Run("JobsInProgressFailingToCompleteBeforeStopContext", func(t *testing.T) { t.Parallel() jobDoneChan := make(chan struct{}) @@ -1340,7 +1368,7 @@ func Test_Client_Stop(t *testing.T) { } }) - t.Run("with continual insertion, no jobs are left running", func(t *testing.T) { + t.Run("WithContinualInsertionNoJobsLeftRunning", func(t *testing.T) { t.Parallel() startedCh := make(chan int64) @@ -1390,18 +1418,32 @@ func Test_Client_Stop(t *testing.T) { }) } -func Test_Client_Stop_AfterContextCancelled(t *testing.T) { +func Test_Client_Stop_ContextImmediatelyCancelled(t *testing.T) { t.Parallel() - ctx, cancel := context.WithCancel(context.Background()) + ctx := context.Background() + + ctx, cancel := context.WithCancel(ctx) defer cancel() // doneCh will never close, job will exit due to context cancellation: doneCh := make(chan struct{}) startedCh := make(chan int64) - dbPool := riverinternaltest.TestDB(ctx, t) - client := newTestClient(t, dbPool, newTestConfig(t, makeAwaitCallback(startedCh, doneCh))) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, makeAwaitCallback(startedCh, doneCh)) + ) + config.Schema = schema + + client := newTestClient(t, dbPool, config) + + // Doesn't use newTestClient because it turns out that the test will fail on + // waiting for stop to timeout if a non-background context is used. I added + // the test below (Test_Client_Stop_AfterContextCancelled, currently + // skipped) to reproduce the problem and which we should fix. require.NoError(t, client.Start(ctx)) t.Cleanup(func() { require.NoError(t, client.Stop(context.Background())) }) @@ -1415,6 +1457,52 @@ func Test_Client_Stop_AfterContextCancelled(t *testing.T) { require.ErrorIs(t, client.Stop(ctx), context.Canceled) } +// Added this failing test case as skipped while working on another project, +// detecting a problem that the test case above was glossing over, but finding +// it not easy to fix so decided to punt. +// +// An initial Stop is called on the client with a cancelled context, but then +// Stop is called again with a non-cancelled one through startClient. Because +// the client already semi-stopped (I think this is the reason anyway), it +// doesn't cancel jobs in progress, so doneCh not being closed ends up hanging +// the test until it's eventually killed by the context timeout in startClient. +// +// TODO: Remove the Skip below and fix the problem. +func Test_Client_Stop_AfterContextCancelled(t *testing.T) { + t.Parallel() + + t.Skip("this test case added broken and should be fixed") + + ctx := context.Background() + + // doneCh will never close, job will exit due to context cancellation: + doneCh := make(chan struct{}) + startedCh := make(chan int64) + + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, makeAwaitCallback(startedCh, doneCh)) + ) + config.Schema = schema + + client := newTestClient(t, dbPool, config) + subscribeChan := subscribe(t, client) + + startClient(ctx, t, client) + + insertRes, err := client.Insert(ctx, callbackArgs{}, nil) + require.NoError(t, err) + event := riversharedtest.WaitOrTimeout(t, subscribeChan) + require.Equal(t, insertRes.Job.ID, event.Job.ID) + + ctx, cancel := context.WithCancel(ctx) + cancel() + + require.ErrorIs(t, client.Stop(ctx), context.Canceled) +} + func Test_Client_StopAndCancel(t *testing.T) { t.Parallel() @@ -1612,8 +1700,13 @@ func Test_Client_JobDelete(t *testing.T) { setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema client := newTestClient(t, dbPool, config) return client, &testBundle{dbPool: dbPool} @@ -1715,16 +1808,25 @@ func Test_Client_Insert(t *testing.T) { type testBundle struct { dbPool *pgxpool.Pool + schema string } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema client := newTestClient(t, dbPool, config) - return client, &testBundle{dbPool: dbPool} + return client, &testBundle{ + dbPool: dbPool, + schema: schema, + } } t.Run("Succeeds", func(t *testing.T) { @@ -1790,6 +1892,8 @@ func Test_Client_Insert(t *testing.T) { config := newTestConfig(t, nil) config.FetchCooldown = 5 * time.Second config.FetchPollInterval = 5 * time.Second + config.Schema = bundle.schema + client := newTestClient(t, bundle.dbPool, config) startClient(ctx, t, client) @@ -1893,14 +1997,20 @@ func Test_Client_InsertTx(t *testing.T) { ctx := context.Background() type testBundle struct { - tx pgx.Tx + schema string + tx pgx.Tx } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema client := newTestClient(t, dbPool, config) tx, err := dbPool.Begin(ctx) @@ -1908,7 +2018,8 @@ func Test_Client_InsertTx(t *testing.T) { t.Cleanup(func() { tx.Rollback(ctx) }) return client, &testBundle{ - tx: tx, + schema: schema, + tx: tx, } } @@ -1962,6 +2073,7 @@ func Test_Client_InsertTx(t *testing.T) { client, err := NewClient(riverpgxv5.New(nil), &Config{ Logger: riversharedtest.Logger(t), + Schema: bundle.schema, }) require.NoError(t, err) @@ -2030,16 +2142,26 @@ func Test_Client_InsertManyFast(t *testing.T) { type testBundle struct { dbPool *pgxpool.Pool + schema string } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) - return client, &testBundle{dbPool: dbPool} + return client, &testBundle{ + dbPool: dbPool, + schema: schema, + } } t.Run("SucceedsWithMultipleJobs", func(t *testing.T) { @@ -2056,7 +2178,7 @@ func Test_Client_InsertManyFast(t *testing.T) { jobs, err := client.driver.GetExecutor().JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(noOpArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) @@ -2080,6 +2202,7 @@ func Test_Client_InsertManyFast(t *testing.T) { config.FetchCooldown = 20 * time.Millisecond config.FetchPollInterval = 20 * time.Second // essentially disable polling config.Queues = map[string]QueueConfig{QueueDefault: {MaxWorkers: 2}, "another_queue": {MaxWorkers: 1}} + config.Schema = bundle.schema client := newTestClient(t, bundle.dbPool, config) @@ -2129,6 +2252,8 @@ func Test_Client_InsertManyFast(t *testing.T) { config := newTestConfig(t, nil) config.FetchCooldown = 5 * time.Second config.FetchPollInterval = 5 * time.Second + config.Schema = bundle.schema + client := newTestClient(t, bundle.dbPool, config) startClient(ctx, t, client) @@ -2162,7 +2287,7 @@ func Test_Client_InsertManyFast(t *testing.T) { jobs, err := client.driver.GetExecutor().JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(noOpArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) @@ -2273,14 +2398,21 @@ func Test_Client_InsertManyFastTx(t *testing.T) { ctx := context.Background() type testBundle struct { - tx pgx.Tx + schema string + tx pgx.Tx } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) tx, err := dbPool.Begin(ctx) @@ -2288,7 +2420,8 @@ func Test_Client_InsertManyFastTx(t *testing.T) { t.Cleanup(func() { tx.Rollback(ctx) }) return client, &testBundle{ - tx: tx, + schema: schema, + tx: tx, } } @@ -2306,7 +2439,7 @@ func Test_Client_InsertManyFastTx(t *testing.T) { jobs, err := client.driver.UnwrapExecutor(bundle.tx).JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(noOpArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) @@ -2317,7 +2450,7 @@ func Test_Client_InsertManyFastTx(t *testing.T) { // Ensure the jobs are visible outside the transaction: jobs, err = client.driver.GetExecutor().JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(noOpArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Len(t, jobs, 2, "Expected to find exactly two jobs of kind: "+(noOpArgs{}).Kind()) @@ -2333,7 +2466,7 @@ func Test_Client_InsertManyFastTx(t *testing.T) { insertedJobs, err := client.driver.UnwrapExecutor(bundle.tx).JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(noOpArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Len(t, insertedJobs, 1) @@ -2354,7 +2487,7 @@ func Test_Client_InsertManyFastTx(t *testing.T) { insertedJobs, err := client.driver.UnwrapExecutor(bundle.tx).JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(noOpArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Len(t, insertedJobs, 1) @@ -2371,6 +2504,7 @@ func Test_Client_InsertManyFastTx(t *testing.T) { client, err := NewClient(riverpgxv5.New(nil), &Config{ Logger: riversharedtest.Logger(t), + Schema: bundle.schema, }) require.NoError(t, err) @@ -2455,16 +2589,25 @@ func Test_Client_InsertMany(t *testing.T) { type testBundle struct { dbPool *pgxpool.Pool + schema string } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema client := newTestClient(t, dbPool, config) - return client, &testBundle{dbPool: dbPool} + return client, &testBundle{ + dbPool: dbPool, + schema: schema, + } } t.Run("SucceedsWithMultipleJobs", func(t *testing.T) { @@ -2523,7 +2666,7 @@ func Test_Client_InsertMany(t *testing.T) { jobs, err := client.driver.GetExecutor().JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(noOpArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Len(t, jobs, 2, "Expected to find exactly two jobs of kind: "+(noOpArgs{}).Kind()) @@ -2546,6 +2689,7 @@ func Test_Client_InsertMany(t *testing.T) { config.FetchCooldown = 20 * time.Millisecond config.FetchPollInterval = 20 * time.Second // essentially disable polling config.Queues = map[string]QueueConfig{QueueDefault: {MaxWorkers: 2}, "another_queue": {MaxWorkers: 1}} + config.Schema = bundle.schema client := newTestClient(t, bundle.dbPool, config) @@ -2595,6 +2739,8 @@ func Test_Client_InsertMany(t *testing.T) { config := newTestConfig(t, nil) config.FetchCooldown = 5 * time.Second config.FetchPollInterval = 5 * time.Second + config.Schema = bundle.schema + client := newTestClient(t, bundle.dbPool, config) startClient(ctx, t, client) @@ -2628,7 +2774,7 @@ func Test_Client_InsertMany(t *testing.T) { jobs, err := client.driver.GetExecutor().JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(noOpArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Len(t, jobs, 1, "Expected to find exactly one job of kind: "+(noOpArgs{}).Kind()) @@ -2740,14 +2886,20 @@ func Test_Client_InsertManyTx(t *testing.T) { ctx := context.Background() type testBundle struct { - tx pgx.Tx + schema string + tx pgx.Tx } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema client := newTestClient(t, dbPool, config) tx, err := dbPool.Begin(ctx) @@ -2755,7 +2907,8 @@ func Test_Client_InsertManyTx(t *testing.T) { t.Cleanup(func() { tx.Rollback(ctx) }) return client, &testBundle{ - tx: tx, + schema: schema, + tx: tx, } } @@ -2815,7 +2968,7 @@ func Test_Client_InsertManyTx(t *testing.T) { jobs, err := client.driver.UnwrapExecutor(bundle.tx).JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(noOpArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Len(t, jobs, 2, "Expected to find exactly two jobs of kind: "+(noOpArgs{}).Kind()) @@ -2835,7 +2988,7 @@ func Test_Client_InsertManyTx(t *testing.T) { insertedJobs, err := client.driver.UnwrapExecutor(bundle.tx).JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(noOpArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Len(t, insertedJobs, 1) @@ -2867,6 +3020,7 @@ func Test_Client_InsertManyTx(t *testing.T) { client, err := NewClient(riverpgxv5.New(nil), &Config{ Logger: riversharedtest.Logger(t), + Schema: bundle.schema, }) require.NoError(t, err) @@ -2883,6 +3037,7 @@ func Test_Client_InsertManyTx(t *testing.T) { _, bundle := setup(t) config := newTestConfig(t, nil) config.Queues = nil + config.Schema = bundle.schema insertCalled := false var innerResults []*rivertype.JobInsertResult @@ -2926,6 +3081,7 @@ func Test_Client_InsertManyTx(t *testing.T) { config := newTestConfig(t, nil) config.Queues = nil + config.Schema = bundle.schema type MiddlewareWithBaseService struct { baseservice.BaseService @@ -3056,8 +3212,14 @@ func Test_Client_JobGet(t *testing.T) { setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) return client, &testBundle{} @@ -3096,18 +3258,26 @@ func Test_Client_JobList(t *testing.T) { ctx := context.Background() type testBundle struct { - exec riverdriver.Executor + exec riverdriver.Executor + schema string } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) return client, &testBundle{ - exec: client.driver.GetExecutor(), + exec: client.driver.GetExecutor(), + schema: schema, } } @@ -3116,9 +3286,9 @@ func Test_Client_JobList(t *testing.T) { client, bundle := setup(t) - job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("test_kind_1")}) - job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("test_kind_1")}) - job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("test_kind_2")}) + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("test_kind_1"), Schema: bundle.schema}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("test_kind_1"), Schema: bundle.schema}) + job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("test_kind_2"), Schema: bundle.schema}) listRes, err := client.JobList(ctx, NewJobListParams().Kinds("test_kind_1")) require.NoError(t, err) @@ -3135,9 +3305,9 @@ func Test_Client_JobList(t *testing.T) { client, bundle := setup(t) - job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Queue: ptrutil.Ptr("queue_1")}) - job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Queue: ptrutil.Ptr("queue_1")}) - job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Queue: ptrutil.Ptr("queue_2")}) + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Queue: ptrutil.Ptr("queue_1"), Schema: bundle.schema}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Queue: ptrutil.Ptr("queue_1"), Schema: bundle.schema}) + job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Queue: ptrutil.Ptr("queue_2"), Schema: bundle.schema}) listRes, err := client.JobList(ctx, NewJobListParams().Queues("queue_1")) require.NoError(t, err) @@ -3154,10 +3324,10 @@ func Test_Client_JobList(t *testing.T) { client, bundle := setup(t) - job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateAvailable)}) - job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateAvailable)}) - job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) - job4 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStatePending)}) + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateAvailable), Schema: bundle.schema}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateAvailable), Schema: bundle.schema}) + job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning), Schema: bundle.schema}) + job4 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStatePending), Schema: bundle.schema}) listRes, err := client.JobList(ctx, NewJobListParams().States(rivertype.JobStateAvailable)) require.NoError(t, err) @@ -3179,8 +3349,8 @@ func Test_Client_JobList(t *testing.T) { client, bundle := setup(t) - job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{}) - job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{}) + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema}) listRes, err := client.JobList(ctx, NewJobListParams().OrderBy(JobListOrderByTime, SortOrderAsc)) require.NoError(t, err) @@ -3204,8 +3374,8 @@ func Test_Client_JobList(t *testing.T) { rivertype.JobStateScheduled, } for _, state := range states { - job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(state), ScheduledAt: &now}) - job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(state), ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(state), ScheduledAt: &now, Schema: bundle.schema}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(state), ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second)), Schema: bundle.schema}) listRes, err := client.JobList(ctx, NewJobListParams().OrderBy(JobListOrderByTime, SortOrderAsc).States(state)) require.NoError(t, err) @@ -3230,8 +3400,8 @@ func Test_Client_JobList(t *testing.T) { rivertype.JobStateDiscarded, } for _, state := range states { - job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(state), FinalizedAt: ptrutil.Ptr(now.Add(-10 * time.Second))}) - job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(state), FinalizedAt: ptrutil.Ptr(now.Add(-15 * time.Second))}) + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(state), FinalizedAt: ptrutil.Ptr(now.Add(-10 * time.Second))}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(state), FinalizedAt: ptrutil.Ptr(now.Add(-15 * time.Second))}) listRes, err := client.JobList(ctx, NewJobListParams().OrderBy(JobListOrderByTime, SortOrderAsc).States(state)) require.NoError(t, err) @@ -3249,8 +3419,8 @@ func Test_Client_JobList(t *testing.T) { client, bundle := setup(t) now := time.Now().UTC() - job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning), AttemptedAt: &now}) - job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning), AttemptedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning), AttemptedAt: &now}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning), AttemptedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) listRes, err := client.JobList(ctx, NewJobListParams().OrderBy(JobListOrderByTime, SortOrderAsc).States(rivertype.JobStateRunning)) require.NoError(t, err) @@ -3268,9 +3438,9 @@ func Test_Client_JobList(t *testing.T) { client, bundle := setup(t) now := time.Now().UTC() - job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateAvailable), ScheduledAt: &now}) - job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateAvailable), ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) - job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning), ScheduledAt: ptrutil.Ptr(now.Add(-2 * time.Second))}) + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateAvailable), ScheduledAt: &now}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateAvailable), ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning), ScheduledAt: ptrutil.Ptr(now.Add(-2 * time.Second))}) listRes, err := client.JobList(ctx, nil) require.NoError(t, err) @@ -3283,9 +3453,9 @@ func Test_Client_JobList(t *testing.T) { client, bundle := setup(t) - job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{}) - job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{}) - job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{}) + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema}) + job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema}) listRes, err := client.JobList(ctx, NewJobListParams().After(JobListCursorFromJob(job1))) require.NoError(t, err) @@ -3313,9 +3483,9 @@ func Test_Client_JobList(t *testing.T) { client, bundle := setup(t) now := time.Now().UTC() - job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{ScheduledAt: &now}) - job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{ScheduledAt: ptrutil.Ptr(now.Add(1 * time.Second))}) - job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{ScheduledAt: ptrutil.Ptr(now.Add(2 * time.Second))}) + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, ScheduledAt: &now}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, ScheduledAt: ptrutil.Ptr(now.Add(1 * time.Second))}) + job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, ScheduledAt: ptrutil.Ptr(now.Add(2 * time.Second))}) listRes, err := client.JobList(ctx, NewJobListParams().OrderBy(JobListOrderByScheduledAt, SortOrderAsc).After(JobListCursorFromJob(job1))) require.NoError(t, err) @@ -3343,12 +3513,12 @@ func Test_Client_JobList(t *testing.T) { client, bundle := setup(t) now := time.Now().UTC() - job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateAvailable), ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) - job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateAvailable), ScheduledAt: &now}) - job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning), ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second)), AttemptedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) - job4 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning), ScheduledAt: ptrutil.Ptr(now.Add(-6 * time.Second)), AttemptedAt: &now}) - job5 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCompleted), ScheduledAt: ptrutil.Ptr(now.Add(-7 * time.Second)), FinalizedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) - job6 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCompleted), ScheduledAt: ptrutil.Ptr(now.Add(-7 * time.Second)), FinalizedAt: &now}) + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateAvailable), ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateAvailable), ScheduledAt: &now}) + job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning), ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second)), AttemptedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + job4 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning), ScheduledAt: ptrutil.Ptr(now.Add(-6 * time.Second)), AttemptedAt: &now}) + job5 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateCompleted), ScheduledAt: ptrutil.Ptr(now.Add(-7 * time.Second)), FinalizedAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + job6 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateCompleted), ScheduledAt: ptrutil.Ptr(now.Add(-7 * time.Second)), FinalizedAt: &now}) listRes, err := client.JobList(ctx, NewJobListParams().OrderBy(JobListOrderByTime, SortOrderAsc).States(rivertype.JobStateAvailable).After(JobListCursorFromJob(job1))) require.NoError(t, err) @@ -3374,9 +3544,9 @@ func Test_Client_JobList(t *testing.T) { client, bundle := setup(t) - job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Metadata: []byte(`{"foo": "bar"}`)}) - job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Metadata: []byte(`{"baz": "value"}`)}) - job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Metadata: []byte(`{"baz": "value"}`)}) + job1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Metadata: []byte(`{"foo": "bar"}`), Schema: bundle.schema}) + job2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Metadata: []byte(`{"baz": "value"}`), Schema: bundle.schema}) + job3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Metadata: []byte(`{"baz": "value"}`), Schema: bundle.schema}) listRes, err := client.JobList(ctx, NewJobListParams().Metadata(`{"foo": "bar"}`)) require.NoError(t, err) @@ -3414,8 +3584,14 @@ func Test_Client_JobRetry(t *testing.T) { setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) return client, &testBundle{dbPool: dbPool} @@ -3479,7 +3655,8 @@ func Test_Client_ErrorHandler(t *testing.T) { ctx := context.Background() type testBundle struct { - SubscribeChan <-chan *Event + schema string + subscribeChan <-chan *Event } setup := func(t *testing.T, config *Config) (*Client[pgx.Tx], *testBundle) { @@ -3490,7 +3667,10 @@ func Test_Client_ErrorHandler(t *testing.T) { subscribeChan, cancel := client.Subscribe(EventKindJobCompleted, EventKindJobFailed) t.Cleanup(cancel) - return client, &testBundle{SubscribeChan: subscribeChan} + return client, &testBundle{ + schema: client.config.Schema, + subscribeChan: subscribeChan, + } } requireInsert := func(ctx context.Context, client *Client[pgx.Tx]) *rivertype.JobRow { @@ -3519,7 +3699,7 @@ func Test_Client_ErrorHandler(t *testing.T) { client, bundle := setup(t, config) requireInsert(ctx, client) - riversharedtest.WaitOrTimeout(t, bundle.SubscribeChan) + riversharedtest.WaitOrTimeout(t, bundle.subscribeChan) require.True(t, errorHandlerCalled) }) @@ -3548,11 +3728,11 @@ func Test_Client_ErrorHandler(t *testing.T) { require.NoError(t, err) _, err = client.driver.GetExecutor().JobInsertFastMany(ctx, &riverdriver.JobInsertFastManyParams{ Jobs: []*riverdriver.JobInsertFastParams{(*riverdriver.JobInsertFastParams)(insertParams)}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) - riversharedtest.WaitOrTimeout(t, bundle.SubscribeChan) + riversharedtest.WaitOrTimeout(t, bundle.subscribeChan) require.True(t, errorHandlerCalled) }) @@ -3576,7 +3756,7 @@ func Test_Client_ErrorHandler(t *testing.T) { client, bundle := setup(t, config) requireInsert(ctx, client) - riversharedtest.WaitOrTimeout(t, bundle.SubscribeChan) + riversharedtest.WaitOrTimeout(t, bundle.subscribeChan) require.True(t, panicHandlerCalled) }) @@ -3588,20 +3768,27 @@ func Test_Client_Maintenance(t *testing.T) { ctx := context.Background() type testBundle struct { - exec riverdriver.Executor + exec riverdriver.Executor + schema string } setup := func(t *testing.T, config *Config) (*Client[pgx.Tx], *testBundle) { t.Helper() var ( - dbPool = riverinternaltest.TestDB(ctx, t) - client = newTestClient(t, dbPool, config) + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) ) + config.Schema = schema + client := newTestClient(t, dbPool, config) client.testSignals.Init() - return client, &testBundle{exec: client.driver.GetExecutor()} + return client, &testBundle{ + exec: client.driver.GetExecutor(), + schema: schema, + } } // Starts the client, then waits for it to be elected leader and for the @@ -3629,18 +3816,18 @@ func Test_Client_Maintenance(t *testing.T) { // Take care to insert jobs before starting the client because otherwise // there's a race condition where the cleaner could run its initial // pass before our insertion is complete. - ineligibleJob1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateAvailable)}) - ineligibleJob2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) - ineligibleJob3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateScheduled)}) + ineligibleJob1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateAvailable)}) + ineligibleJob2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) + ineligibleJob3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateScheduled)}) - jobBeyondHorizon1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCancelled), FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) - jobBeyondHorizon2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCompleted), FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) - jobBeyondHorizon3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateDiscarded), FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) + jobBeyondHorizon1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateCancelled), FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) + jobBeyondHorizon2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateCompleted), FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) + jobBeyondHorizon3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateDiscarded), FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) // Will not be deleted. - jobWithinHorizon1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCancelled), FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) - jobWithinHorizon2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCompleted), FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) - jobWithinHorizon3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateDiscarded), FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) + jobWithinHorizon1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateCancelled), FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) + jobWithinHorizon2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateCompleted), FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) + jobWithinHorizon3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateDiscarded), FinalizedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) startAndWaitForQueueMaintainer(ctx, t, client) @@ -3683,24 +3870,25 @@ func Test_Client_Maintenance(t *testing.T) { // Take care to insert jobs before starting the client because otherwise // there's a race condition where the rescuer could run its initial // pass before our insertion is complete. - ineligibleJob1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), State: ptrutil.Ptr(rivertype.JobStateScheduled), ScheduledAt: ptrutil.Ptr(now.Add(time.Minute))}) - ineligibleJob2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), State: ptrutil.Ptr(rivertype.JobStateRetryable), ScheduledAt: ptrutil.Ptr(now.Add(time.Minute))}) - ineligibleJob3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), State: ptrutil.Ptr(rivertype.JobStateCompleted), FinalizedAt: ptrutil.Ptr(now.Add(-time.Minute))}) + ineligibleJob1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateScheduled), ScheduledAt: ptrutil.Ptr(now.Add(time.Minute))}) + ineligibleJob2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRetryable), ScheduledAt: ptrutil.Ptr(now.Add(time.Minute))}) + ineligibleJob3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateCompleted), FinalizedAt: ptrutil.Ptr(now.Add(-time.Minute))}) // large attempt number ensures these don't immediately start executing again: - jobStuckToRetry1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), State: ptrutil.Ptr(rivertype.JobStateRunning), Attempt: ptrutil.Ptr(20), AttemptedAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) - jobStuckToRetry2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), State: ptrutil.Ptr(rivertype.JobStateRunning), Attempt: ptrutil.Ptr(20), AttemptedAt: ptrutil.Ptr(now.Add(-30 * time.Minute))}) + jobStuckToRetry1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning), Attempt: ptrutil.Ptr(20), AttemptedAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) + jobStuckToRetry2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning), Attempt: ptrutil.Ptr(20), AttemptedAt: ptrutil.Ptr(now.Add(-30 * time.Minute))}) jobStuckToDiscard := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{ State: ptrutil.Ptr(rivertype.JobStateRunning), Attempt: ptrutil.Ptr(20), AttemptedAt: ptrutil.Ptr(now.Add(-5*time.Minute - time.Second)), MaxAttempts: ptrutil.Ptr(1), + Schema: bundle.schema, }) // Will not be rescued. - jobNotYetStuck1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), State: ptrutil.Ptr(rivertype.JobStateRunning), AttemptedAt: ptrutil.Ptr(now.Add(-4 * time.Minute))}) - jobNotYetStuck2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), State: ptrutil.Ptr(rivertype.JobStateRunning), AttemptedAt: ptrutil.Ptr(now.Add(-1 * time.Minute))}) - jobNotYetStuck3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), State: ptrutil.Ptr(rivertype.JobStateRunning), AttemptedAt: ptrutil.Ptr(now.Add(-10 * time.Second))}) + jobNotYetStuck1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning), AttemptedAt: ptrutil.Ptr(now.Add(-4 * time.Minute))}) + jobNotYetStuck2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning), AttemptedAt: ptrutil.Ptr(now.Add(-1 * time.Minute))}) + jobNotYetStuck3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("noOp"), Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning), AttemptedAt: ptrutil.Ptr(now.Add(-10 * time.Second))}) startAndWaitForQueueMaintainer(ctx, t, client) @@ -3710,7 +3898,7 @@ func Test_Client_Maintenance(t *testing.T) { requireJobHasState := func(jobID int64, state rivertype.JobState) { t.Helper() - job, err := bundle.exec.JobGetByID(ctx, &riverdriver.JobGetByIDParams{ID: jobID, Schema: ""}) + job, err := bundle.exec.JobGetByID(ctx, &riverdriver.JobGetByIDParams{ID: jobID, Schema: bundle.schema}) require.NoError(t, err) require.Equal(t, state, job.State) } @@ -3746,18 +3934,18 @@ func Test_Client_Maintenance(t *testing.T) { // Take care to insert jobs before starting the client because otherwise // there's a race condition where the scheduler could run its initial // pass before our insertion is complete. - ineligibleJob1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateAvailable)}) - ineligibleJob2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) - ineligibleJob3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCompleted), FinalizedAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) + ineligibleJob1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateAvailable)}) + ineligibleJob2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) + ineligibleJob3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateCompleted), FinalizedAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) - jobInPast1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateScheduled), ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) - jobInPast2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateScheduled), ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Minute))}) - jobInPast3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateScheduled), ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) + jobInPast1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateScheduled), ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Hour))}) + jobInPast2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateScheduled), ScheduledAt: ptrutil.Ptr(now.Add(-1 * time.Minute))}) + jobInPast3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateScheduled), ScheduledAt: ptrutil.Ptr(now.Add(-5 * time.Second))}) // Will not be scheduled. - jobInFuture1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCancelled), FinalizedAt: ptrutil.Ptr(now.Add(1 * time.Hour))}) - jobInFuture2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateCompleted), FinalizedAt: ptrutil.Ptr(now.Add(1 * time.Minute))}) - jobInFuture3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateDiscarded), FinalizedAt: ptrutil.Ptr(now.Add(10 * time.Second))}) + jobInFuture1 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateCancelled), FinalizedAt: ptrutil.Ptr(now.Add(1 * time.Hour))}) + jobInFuture2 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateCompleted), FinalizedAt: ptrutil.Ptr(now.Add(1 * time.Minute))}) + jobInFuture3 := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateDiscarded), FinalizedAt: ptrutil.Ptr(now.Add(10 * time.Second))}) startAndWaitForQueueMaintainer(ctx, t, client) @@ -3809,7 +3997,7 @@ func Test_Client_Maintenance(t *testing.T) { jobs, err := bundle.exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(periodicJobArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Len(t, jobs, 1, "Expected to find exactly one job of kind: "+(periodicJobArgs{}).Kind()) @@ -3838,7 +4026,7 @@ func Test_Client_Maintenance(t *testing.T) { // No jobs yet because the RunOnStart option was not specified. jobs, err := bundle.exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(periodicJobArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Empty(t, jobs) @@ -3868,7 +4056,7 @@ func Test_Client_Maintenance(t *testing.T) { jobs, err := bundle.exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(periodicJobArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Empty(t, jobs, "Expected to find zero jobs of kind: "+(periodicJobArgs{}).Kind()) @@ -3877,13 +4065,18 @@ func Test_Client_Maintenance(t *testing.T) { t.Run("PeriodicJobEnqueuerAddDynamically", func(t *testing.T) { t.Parallel() + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + config := newTestConfig(t, nil) + config.Schema = schema worker := &periodicJobWorker{} AddWorker(config.Workers, worker) - dbPool := riverinternaltest.TestDB(ctx, t) - client := newTestClient(t, dbPool, config) client.testSignals.Init() startClient(ctx, t, client) @@ -3905,7 +4098,7 @@ func Test_Client_Maintenance(t *testing.T) { // We get a queued job because RunOnStart was specified. jobs, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(periodicJobArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Len(t, jobs, 1) @@ -3914,12 +4107,19 @@ func Test_Client_Maintenance(t *testing.T) { t.Run("PeriodicJobEnqueuerRemoveDynamically", func(t *testing.T) { t.Parallel() + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + config := newTestConfig(t, nil) + config.Schema = schema worker := &periodicJobWorker{} AddWorker(config.Workers, worker) - client := newTestClient(t, riverinternaltest.TestDB(ctx, t), config) + client := newTestClient(t, dbPool, config) client.testSignals.Init() exec := client.driver.GetExecutor() @@ -3958,7 +4158,7 @@ func Test_Client_Maintenance(t *testing.T) { { jobs, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(periodicJobArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Len(t, jobs, 1) @@ -3966,7 +4166,7 @@ func Test_Client_Maintenance(t *testing.T) { { jobs, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(OtherPeriodicArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Len(t, jobs, 1) @@ -4003,7 +4203,7 @@ func Test_Client_Maintenance(t *testing.T) { jobs, err := bundle.exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{(periodicJobArgs{}).Kind()}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(t, err) require.Len(t, jobs, 1, "Expected to find exactly one job of kind: "+(periodicJobArgs{}).Kind()) @@ -4012,9 +4212,15 @@ func Test_Client_Maintenance(t *testing.T) { t.Run("QueueCleaner", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) config := newTestConfig(t, nil) + config.Schema = schema + client := newTestClient(t, dbPool, config) client.testSignals.Init() exec := client.driver.GetExecutor() @@ -4024,14 +4230,14 @@ func Test_Client_Maintenance(t *testing.T) { // Take care to insert queues before starting the client because otherwise // there's a race condition where the cleaner could run its initial // pass before our insertion is complete. - queueBeyondHorizon1 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) - queueBeyondHorizon2 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) - queueBeyondHorizon3 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) + queueBeyondHorizon1 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Schema: schema, UpdatedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) + queueBeyondHorizon2 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Schema: schema, UpdatedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) + queueBeyondHorizon3 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Schema: schema, UpdatedAt: ptrutil.Ptr(deleteHorizon.Add(-1 * time.Hour))}) // Will not be deleted. - queueWithinHorizon1 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) - queueWithinHorizon2 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) - queueWithinHorizon3 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) + queueWithinHorizon1 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Schema: schema, UpdatedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) + queueWithinHorizon2 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Schema: schema, UpdatedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) + queueWithinHorizon3 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Schema: schema, UpdatedAt: ptrutil.Ptr(deleteHorizon.Add(1 * time.Hour))}) startClient(ctx, t, client) @@ -4089,24 +4295,34 @@ func Test_Client_QueueGet(t *testing.T) { ctx := context.Background() - type testBundle struct{} + type testBundle struct { + schema string + } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) - return client, &testBundle{} + return client, &testBundle{ + schema: schema, + } } t.Run("FetchesAnExistingQueue", func(t *testing.T) { t.Parallel() - client, _ := setup(t) + client, bundle := setup(t) - queue := testfactory.Queue(ctx, t, client.driver.GetExecutor(), nil) + queue := testfactory.Queue(ctx, t, client.driver.GetExecutor(), &testfactory.QueueOpts{Schema: bundle.schema}) queueRes, err := client.QueueGet(ctx, queue.Name) require.NoError(t, err) @@ -4136,21 +4352,32 @@ func Test_Client_QueueGetTx(t *testing.T) { type testBundle struct { executorTx riverdriver.ExecutorTx + schema string tx pgx.Tx } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) tx, err := dbPool.Begin(ctx) require.NoError(t, err) t.Cleanup(func() { tx.Rollback(ctx) }) - return client, &testBundle{executorTx: client.driver.UnwrapExecutor(tx), tx: tx} + return client, &testBundle{ + executorTx: client.driver.UnwrapExecutor(tx), + schema: schema, + tx: tx, + } } t.Run("FetchesAnExistingQueue", func(t *testing.T) { @@ -4158,7 +4385,7 @@ func Test_Client_QueueGetTx(t *testing.T) { client, bundle := setup(t) - queue := testfactory.Queue(ctx, t, bundle.executorTx, nil) + queue := testfactory.Queue(ctx, t, bundle.executorTx, &testfactory.QueueOpts{Schema: bundle.schema}) queueRes, err := client.QueueGetTx(ctx, bundle.tx, queue.Name) require.NoError(t, err) @@ -4187,22 +4414,32 @@ func Test_Client_QueueList(t *testing.T) { ctx := context.Background() - type testBundle struct{} + type testBundle struct { + schema string + } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) - return client, &testBundle{} + return client, &testBundle{ + schema: schema, + } } t.Run("ListsAndPaginatesQueues", func(t *testing.T) { t.Parallel() - client, _ := setup(t) + client, bundle := setup(t) requireQueuesEqual := func(t *testing.T, target, actual *rivertype.Queue) { t.Helper() @@ -4222,13 +4459,13 @@ func Test_Client_QueueList(t *testing.T) { require.Empty(t, listRes.Queues) // Make queue1, pause it, refetch: - queue1 := testfactory.Queue(ctx, t, client.driver.GetExecutor(), &testfactory.QueueOpts{Metadata: []byte(`{"foo": "bar"}`)}) + queue1 := testfactory.Queue(ctx, t, client.driver.GetExecutor(), &testfactory.QueueOpts{Metadata: []byte(`{"foo": "bar"}`), Schema: bundle.schema}) require.NoError(t, client.QueuePause(ctx, queue1.Name, nil)) queue1, err = client.QueueGet(ctx, queue1.Name) require.NoError(t, err) - queue2 := testfactory.Queue(ctx, t, client.driver.GetExecutor(), nil) - queue3 := testfactory.Queue(ctx, t, client.driver.GetExecutor(), nil) + queue2 := testfactory.Queue(ctx, t, client.driver.GetExecutor(), &testfactory.QueueOpts{Schema: bundle.schema}) + queue3 := testfactory.Queue(ctx, t, client.driver.GetExecutor(), &testfactory.QueueOpts{Schema: bundle.schema}) listRes, err = client.QueueList(ctx, NewQueueListParams().First(2)) require.NoError(t, err) @@ -4254,21 +4491,32 @@ func Test_Client_QueueListTx(t *testing.T) { type testBundle struct { executorTx riverdriver.ExecutorTx + schema string tx pgx.Tx } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) tx, err := dbPool.Begin(ctx) require.NoError(t, err) t.Cleanup(func() { tx.Rollback(ctx) }) - return client, &testBundle{executorTx: client.driver.UnwrapExecutor(tx), tx: tx} + return client, &testBundle{ + executorTx: client.driver.UnwrapExecutor(tx), + schema: schema, + tx: tx, + } } t.Run("ListsQueues", func(t *testing.T) { @@ -4280,7 +4528,7 @@ func Test_Client_QueueListTx(t *testing.T) { require.NoError(t, err) require.Empty(t, listRes.Queues) - queue := testfactory.Queue(ctx, t, bundle.executorTx, nil) + queue := testfactory.Queue(ctx, t, bundle.executorTx, &testfactory.QueueOpts{Schema: bundle.schema}) listRes, err = client.QueueListTx(ctx, bundle.tx, NewQueueListParams()) require.NoError(t, err) @@ -4299,22 +4547,32 @@ func Test_Client_QueueUpdate(t *testing.T) { ctx := context.Background() - type testBundle struct{} + type testBundle struct { + schema string + } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) - return client, &testBundle{} + return client, &testBundle{ + schema: schema, + } } t.Run("UpdatesQueueMetadata", func(t *testing.T) { t.Parallel() - client, _ := setup(t) + client, bundle := setup(t) startClient(ctx, t, client) type metadataUpdatePayload struct { @@ -4339,7 +4597,7 @@ func Test_Client_QueueUpdate(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { sub.Unlisten(ctx) }) - queue := testfactory.Queue(ctx, t, client.driver.GetExecutor(), nil) + queue := testfactory.Queue(ctx, t, client.driver.GetExecutor(), &testfactory.QueueOpts{Schema: bundle.schema}) require.Equal(t, []byte(`{}`), queue.Metadata) queue, err = client.QueueUpdate(ctx, queue.Name, &QueueUpdateParams{ @@ -4391,21 +4649,32 @@ func Test_Client_QueueUpdateTx(t *testing.T) { type testBundle struct { executorTx riverdriver.ExecutorTx + schema string tx pgx.Tx } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) tx, err := dbPool.Begin(ctx) require.NoError(t, err) t.Cleanup(func() { tx.Rollback(ctx) }) - return client, &testBundle{executorTx: client.driver.UnwrapExecutor(tx), tx: tx} + return client, &testBundle{ + executorTx: client.driver.UnwrapExecutor(tx), + schema: schema, + tx: tx, + } } t.Run("UpdatesQueueMetadata", func(t *testing.T) { @@ -4413,7 +4682,7 @@ func Test_Client_QueueUpdateTx(t *testing.T) { client, bundle := setup(t) - queue := testfactory.Queue(ctx, t, bundle.executorTx, nil) + queue := testfactory.Queue(ctx, t, bundle.executorTx, &testfactory.QueueOpts{Schema: bundle.schema}) require.Equal(t, []byte(`{}`), queue.Metadata) queue, err := client.QueueUpdateTx(ctx, bundle.tx, queue.Name, &QueueUpdateParams{ @@ -4450,11 +4719,16 @@ func Test_Client_RetryPolicy(t *testing.T) { t.Run("RetryUntilDiscarded", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { return errors.New("job error") }) + config.Schema = schema // The default policy would work too, but this takes some variability // out of it to make comparisons easier. @@ -4462,11 +4736,14 @@ func Test_Client_RetryPolicy(t *testing.T) { client := newTestClient(t, dbPool, config) + now := client.baseService.Time.StubNowUTC(time.Now().UTC()) + t.Logf("Now: %s", now) + subscribeChan, cancel := client.Subscribe(EventKindJobCompleted, EventKindJobFailed) t.Cleanup(cancel) originalJobs := make([]*rivertype.JobRow, rivercommon.MaxAttemptsDefault) - for i := 0; i < len(originalJobs); i++ { + for i := range originalJobs { job := requireInsert(ctx, client) // regression protection to ensure we're testing the right number of jobs: require.Equal(t, rivercommon.MaxAttemptsDefault, job.MaxAttempts) @@ -4474,9 +4751,10 @@ func Test_Client_RetryPolicy(t *testing.T) { updatedJob, err := client.driver.GetExecutor().JobUpdate(ctx, &riverdriver.JobUpdateParams{ ID: job.ID, AttemptedAtDoUpdate: true, - AttemptedAt: ptrutil.Ptr(time.Now().UTC()), + AttemptedAt: &now, // we want a value here, but it'll be overwritten as jobs are locked by the producer AttemptDoUpdate: true, Attempt: i, // starts at i, but will be i + 1 by the time it's being worked + Schema: schema, // Need to find a cleaner way around this, but state is required // because sqlc can't encode an empty string to the @@ -4499,7 +4777,7 @@ func Test_Client_RetryPolicy(t *testing.T) { finishedJobs, err := client.driver.GetExecutor().JobGetByIDMany(ctx, &riverdriver.JobGetByIDManyParams{ ID: sliceutil.Map(originalJobs, func(m *rivertype.JobRow) int64 { return m.ID }), - Schema: "", + Schema: schema, }) require.NoError(t, err) @@ -4530,9 +4808,7 @@ func Test_Client_RetryPolicy(t *testing.T) { t.Logf(" New scheduled at: %v", finishedJob.ScheduledAt) t.Logf(" Expected scheduled at: %v", expectedNextScheduledAt) - // TODO(brandur): This tolerance could be reduced if we could inject - // time.Now into adapter which may happen with baseservice - require.WithinDuration(t, expectedNextScheduledAt, finishedJob.ScheduledAt, 2*time.Second) + require.WithinDuration(t, expectedNextScheduledAt, finishedJob.ScheduledAt, time.Microsecond) require.Equal(t, rivertype.JobStateRetryable, finishedJob.State) } @@ -4546,8 +4822,7 @@ func Test_Client_RetryPolicy(t *testing.T) { t.Logf("Attempt number %d discarded", originalJob.Attempt) - // TODO(brandur): See note on tolerance above. - require.WithinDuration(t, time.Now(), *finishedJob.FinalizedAt, 2*time.Second) + require.WithinDuration(t, now, *finishedJob.FinalizedAt, time.Microsecond) require.Equal(t, rivertype.JobStateDiscarded, finishedJob.State) } }) @@ -4575,7 +4850,11 @@ func Test_Client_Subscribe(t *testing.T) { t.Run("Success", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) // Fail/succeed jobs based on their name so we can get a mix of both to // verify. @@ -4585,6 +4864,7 @@ func Test_Client_Subscribe(t *testing.T) { } return nil }) + config.Schema = schema client := newTestClient(t, dbPool, config) @@ -4645,7 +4925,11 @@ func Test_Client_Subscribe(t *testing.T) { t.Run("CompletedOnly", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { if strings.HasPrefix(job.Args.Name, "failed") { @@ -4653,6 +4937,7 @@ func Test_Client_Subscribe(t *testing.T) { } return nil }) + config.Schema = schema client := newTestClient(t, dbPool, config) @@ -4688,7 +4973,11 @@ func Test_Client_Subscribe(t *testing.T) { t.Run("FailedOnly", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { if strings.HasPrefix(job.Args.Name, "failed") { @@ -4696,6 +4985,7 @@ func Test_Client_Subscribe(t *testing.T) { } return nil }) + config.Schema = schema client := newTestClient(t, dbPool, config) @@ -4731,11 +5021,16 @@ func Test_Client_Subscribe(t *testing.T) { t.Run("PanicOnUnknownKind", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { return nil }) + config.Schema = schema client := newTestClient(t, dbPool, config) @@ -4747,11 +5042,16 @@ func Test_Client_Subscribe(t *testing.T) { t.Run("SubscriptionCancellation", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { return nil }) + config.Schema = schema client := newTestClient(t, dbPool, config) @@ -4768,9 +5068,16 @@ func Test_Client_Subscribe(t *testing.T) { t.Run("SubscribeOnClientWithoutWorkers", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) - client := newTestClient(t, dbPool, &Config{}) + config := &Config{} + config.Schema = schema + + client := newTestClient(t, dbPool, config) require.PanicsWithValue(t, "created a subscription on a client that will never work jobs (Queues not configured)", func() { _, _ = client.Subscribe(EventKindJobCompleted) @@ -4803,7 +5110,11 @@ func Test_Client_SubscribeConfig(t *testing.T) { t.Run("Success", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) // Fail/succeed jobs based on their name so we can get a mix of both to // verify. @@ -4813,6 +5124,7 @@ func Test_Client_SubscribeConfig(t *testing.T) { } return nil }) + config.Schema = schema client := newTestClient(t, dbPool, config) @@ -4875,11 +5187,16 @@ func Test_Client_SubscribeConfig(t *testing.T) { t.Run("EventsDropWithNoListeners", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { return nil }) + config.Schema = schema client := newTestClient(t, dbPool, config) @@ -4929,7 +5246,7 @@ func Test_Client_SubscribeConfig(t *testing.T) { _, err := client.driver.GetExecutor().JobInsertFastMany(ctx, &riverdriver.JobInsertFastManyParams{ Jobs: insertParams, - Schema: "", + Schema: schema, }) require.NoError(t, err) @@ -4953,11 +5270,16 @@ func Test_Client_SubscribeConfig(t *testing.T) { t.Run("PanicOnChanSizeLessThanZero", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { return nil }) + config.Schema = schema client := newTestClient(t, dbPool, config) @@ -4971,11 +5293,16 @@ func Test_Client_SubscribeConfig(t *testing.T) { t.Run("PanicOnUnknownKind", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { return nil }) + config.Schema = schema client := newTestClient(t, dbPool, config) @@ -5000,12 +5327,17 @@ func Test_Client_InsertTriggersImmediateWork(t *testing.T) { close(doneCh) // don't need to block any jobs from completing startedCh := make(chan int64) - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) config := newTestConfig(t, makeAwaitCallback(startedCh, doneCh)) config.FetchCooldown = 20 * time.Millisecond config.FetchPollInterval = 20 * time.Second // essentially disable polling config.Queues = map[string]QueueConfig{QueueDefault: {MaxWorkers: 1}, "another_queue": {MaxWorkers: 1}} + config.Schema = schema client := newTestClient(t, dbPool, config) @@ -5048,7 +5380,13 @@ func Test_Client_InsertNotificationsAreDeduplicatedAndDebounced(t *testing.T) { t.Parallel() ctx := context.Background() - dbPool := riverinternaltest.TestDB(ctx, t) + + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { return nil }) @@ -5056,6 +5394,7 @@ func Test_Client_InsertNotificationsAreDeduplicatedAndDebounced(t *testing.T) { config.FetchCooldown = time.Second config.schedulerInterval = 20 * time.Second // quiet scheduler config.Queues = map[string]QueueConfig{"queue1": {MaxWorkers: 1}, "queue2": {MaxWorkers: 1}, "queue3": {MaxWorkers: 1}} + config.Schema = schema client := newTestClient(t, dbPool, config) startClient(ctx, t, client) @@ -5121,14 +5460,21 @@ func Test_Client_JobCompletion(t *testing.T) { ctx := context.Background() type testBundle struct { - DBPool *pgxpool.Pool - SubscribeChan <-chan *Event + dbPool *pgxpool.Pool + schema string + subscribeChan <-chan *Event } setup := func(t *testing.T, config *Config) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) startClient(ctx, t, client) @@ -5136,8 +5482,9 @@ func Test_Client_JobCompletion(t *testing.T) { t.Cleanup(cancel) return client, &testBundle{ - DBPool: dbPool, - SubscribeChan: subscribeChan, + dbPool: dbPool, + schema: schema, + subscribeChan: subscribeChan, } } @@ -5154,7 +5501,7 @@ func Test_Client_JobCompletion(t *testing.T) { insertRes, err := client.Insert(ctx, callbackArgs{}, nil) require.NoError(err) - event := riversharedtest.WaitOrTimeout(t, bundle.SubscribeChan) + event := riversharedtest.WaitOrTimeout(t, bundle.subscribeChan) require.Equal(insertRes.Job.ID, event.Job.ID) require.Equal(rivertype.JobStateCompleted, event.Job.State) @@ -5168,36 +5515,39 @@ func Test_Client_JobCompletion(t *testing.T) { t.Run("JobThatIsAlreadyCompletedIsNotAlteredByCompleter", func(t *testing.T) { t.Parallel() - require := require.New(t) - var exec riverdriver.Executor now := time.Now().UTC() - config := newTestConfig(t, func(ctx context.Context, job *Job[callbackArgs]) error { - _, err := exec.JobUpdate(ctx, &riverdriver.JobUpdateParams{ + + client, bundle := setup(t, newTestConfig(t, nil)) + + type JobArgs struct { + JobArgsReflectKind[JobArgs] + } + + AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { + _, err := client.driver.GetExecutor().JobUpdate(ctx, &riverdriver.JobUpdateParams{ ID: job.ID, FinalizedAtDoUpdate: true, FinalizedAt: &now, + Schema: bundle.schema, StateDoUpdate: true, State: rivertype.JobStateCompleted, }) - require.NoError(err) + require.NoError(t, err) return nil - }) - - client, bundle := setup(t, config) - exec = client.driver.GetExecutor() + })) - insertRes, err := client.Insert(ctx, callbackArgs{}, nil) - require.NoError(err) + insertRes, err := client.Insert(ctx, JobArgs{}, nil) + require.NoError(t, err) - event := riversharedtest.WaitOrTimeout(t, bundle.SubscribeChan) - require.Equal(insertRes.Job.ID, event.Job.ID) - require.Equal(rivertype.JobStateCompleted, event.Job.State) + event := riversharedtest.WaitOrTimeout(t, bundle.subscribeChan) + require.Equal(t, insertRes.Job.ID, event.Job.ID) + require.Equal(t, rivertype.JobStateCompleted, event.Job.State) reloadedJob, err := client.JobGet(ctx, insertRes.Job.ID) - require.NoError(err) + require.NoError(t, err) - require.Equal(rivertype.JobStateCompleted, reloadedJob.State) - require.WithinDuration(now, *reloadedJob.FinalizedAt, time.Microsecond) + require.Equal(t, rivertype.JobStateCompleted, reloadedJob.State) + require.WithinDuration(t, now, *reloadedJob.FinalizedAt, time.Microsecond) }) t.Run("JobThatReturnsErrIsRetryable", func(t *testing.T) { @@ -5213,7 +5563,7 @@ func Test_Client_JobCompletion(t *testing.T) { insertRes, err := client.Insert(ctx, callbackArgs{}, nil) require.NoError(err) - event := riversharedtest.WaitOrTimeout(t, bundle.SubscribeChan) + event := riversharedtest.WaitOrTimeout(t, bundle.subscribeChan) require.Equal(insertRes.Job.ID, event.Job.ID) require.Equal(rivertype.JobStateRetryable, event.Job.State) @@ -5238,7 +5588,7 @@ func Test_Client_JobCompletion(t *testing.T) { insertRes, err := client.Insert(ctx, callbackArgs{}, nil) require.NoError(err) - event := riversharedtest.WaitOrTimeout(t, bundle.SubscribeChan) + event := riversharedtest.WaitOrTimeout(t, bundle.subscribeChan) require.Equal(insertRes.Job.ID, event.Job.ID) require.Equal(rivertype.JobStateCancelled, event.Job.State) @@ -5253,7 +5603,6 @@ func Test_Client_JobCompletion(t *testing.T) { t.Run("JobThatIsAlreadyDiscardedIsNotAlteredByCompleter", func(t *testing.T) { t.Parallel() - require := require.New(t) now := time.Now().UTC() client, bundle := setup(t, newTestConfig(t, nil)) @@ -5269,73 +5618,73 @@ func Test_Client_JobCompletion(t *testing.T) { Errors: [][]byte{[]byte("{\"error\": \"oops\"}")}, FinalizedAtDoUpdate: true, FinalizedAt: &now, + Schema: bundle.schema, StateDoUpdate: true, State: rivertype.JobStateDiscarded, }) - require.NoError(err) + require.NoError(t, err) return errors.New("oops") })) insertRes, err := client.Insert(ctx, JobArgs{}, nil) - require.NoError(err) + require.NoError(t, err) - event := riversharedtest.WaitOrTimeout(t, bundle.SubscribeChan) - require.Equal(insertRes.Job.ID, event.Job.ID) - require.Equal(rivertype.JobStateDiscarded, event.Job.State) + event := riversharedtest.WaitOrTimeout(t, bundle.subscribeChan) + require.Equal(t, insertRes.Job.ID, event.Job.ID) + require.Equal(t, rivertype.JobStateDiscarded, event.Job.State) reloadedJob, err := client.JobGet(ctx, insertRes.Job.ID) - require.NoError(err) + require.NoError(t, err) - require.Equal(rivertype.JobStateDiscarded, reloadedJob.State) - require.NotNil(reloadedJob.FinalizedAt) + require.Equal(t, rivertype.JobStateDiscarded, reloadedJob.State) + require.NotNil(t, reloadedJob.FinalizedAt) }) t.Run("JobThatIsCompletedManuallyIsNotTouchedByCompleter", func(t *testing.T) { t.Parallel() - require := require.New(t) - now := time.Now().UTC() - client, bundle := setup(t, newTestConfig(t, nil)) + now := client.baseService.Time.StubNowUTC(time.Now().UTC()) + type JobArgs struct { JobArgsReflectKind[JobArgs] } var updatedJob *Job[JobArgs] AddWorker(client.config.Workers, WorkFunc(func(ctx context.Context, job *Job[JobArgs]) error { - tx, err := bundle.DBPool.Begin(ctx) - require.NoError(err) + tx, err := bundle.dbPool.Begin(ctx) + require.NoError(t, err) updatedJob, err = JobCompleteTx[*riverpgxv5.Driver](ctx, tx, job) - require.NoError(err) + require.NoError(t, err) return tx.Commit(ctx) })) insertRes, err := client.Insert(ctx, JobArgs{}, nil) - require.NoError(err) + require.NoError(t, err) - event := riversharedtest.WaitOrTimeout(t, bundle.SubscribeChan) - require.Equal(insertRes.Job.ID, event.Job.ID) - require.Equal(rivertype.JobStateCompleted, event.Job.State) - require.Equal(rivertype.JobStateCompleted, updatedJob.State) - require.NotNil(updatedJob) - require.NotNil(event.Job.FinalizedAt) - require.NotNil(updatedJob.FinalizedAt) + event := riversharedtest.WaitOrTimeout(t, bundle.subscribeChan) + require.Equal(t, insertRes.Job.ID, event.Job.ID) + require.Equal(t, rivertype.JobStateCompleted, event.Job.State) + require.Equal(t, rivertype.JobStateCompleted, updatedJob.State) + require.NotNil(t, updatedJob) + require.NotNil(t, event.Job.FinalizedAt) + require.NotNil(t, updatedJob.FinalizedAt) // Make sure the FinalizedAt is approximately ~now: - require.WithinDuration(now, *updatedJob.FinalizedAt, 2*time.Second) + require.WithinDuration(t, now, *updatedJob.FinalizedAt, time.Microsecond) // Make sure we're getting the same timestamp back from the event and the // updated job inside the txn: - require.WithinDuration(*updatedJob.FinalizedAt, *event.Job.FinalizedAt, time.Microsecond) + require.WithinDuration(t, *updatedJob.FinalizedAt, *event.Job.FinalizedAt, time.Microsecond) reloadedJob, err := client.JobGet(ctx, insertRes.Job.ID) - require.NoError(err) + require.NoError(t, err) - require.Equal(rivertype.JobStateCompleted, reloadedJob.State) - require.Equal(updatedJob.FinalizedAt, reloadedJob.FinalizedAt) + require.Equal(t, rivertype.JobStateCompleted, reloadedJob.State) + require.Equal(t, updatedJob.FinalizedAt, reloadedJob.FinalizedAt) }) } @@ -5365,7 +5714,7 @@ func Test_Client_UnknownJobKindErrorsTheJob(t *testing.T) { require.NoError(err) insertedResults, err := client.driver.GetExecutor().JobInsertFastMany(ctx, &riverdriver.JobInsertFastManyParams{ Jobs: []*riverdriver.JobInsertFastParams{(*riverdriver.JobInsertFastParams)(insertParams)}, - Schema: client.config.schema, + Schema: client.config.Schema, }) require.NoError(err) @@ -5394,10 +5743,15 @@ func Test_Client_Start_Error(t *testing.T) { t.Run("NoQueueConfiguration", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) config := newTestConfig(t, nil) config.Queues = nil + config.Schema = schema config.Workers = nil client := newTestClient(t, dbPool, config) @@ -5408,9 +5762,14 @@ func Test_Client_Start_Error(t *testing.T) { t.Run("NoRegisteredWorkers", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) config := newTestConfig(t, nil) + config.Schema = schema config.Workers = NewWorkers() // initialized, but empty client := newTestClient(t, dbPool, config) @@ -5421,7 +5780,8 @@ func Test_Client_Start_Error(t *testing.T) { t.Run("DatabaseError", func(t *testing.T) { t.Parallel() - dbConfig := riverinternaltest.DatabaseConfig("does-not-exist-and-dont-create-it") + dbConfig := riversharedtest.DBPool(ctx, t).Config().Copy() + dbConfig.ConnConfig.Database = "does-not-exist-and-dont-create-it" dbPool, err := pgxpool.NewWithConfig(ctx, dbConfig) require.NoError(t, err) @@ -5443,8 +5803,16 @@ func Test_NewClient_BaseServiceName(t *testing.T) { t.Parallel() ctx := context.Background() - dbPool := riverinternaltest.TestDB(ctx, t) - client := newTestClient(t, dbPool, newTestConfig(t, nil)) + + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + + client := newTestClient(t, dbPool, config) // Ensure we get the clean name "Client" instead of the fully qualified name // with generic type param: require.Equal(t, "Client", client.baseService.Name) @@ -5489,13 +5857,19 @@ func Test_NewClient_Defaults(t *testing.T) { t.Parallel() ctx := context.Background() - dbPool := riverinternaltest.TestDB(ctx, t) + + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) workers := NewWorkers() AddWorker(workers, &noOpWorker{}) client, err := NewClient(riverpgxv5.New(dbPool), &Config{ Queues: map[string]QueueConfig{QueueDefault: {MaxWorkers: 1}}, + Schema: schema, Workers: workers, }) require.NoError(t, err) @@ -5537,7 +5911,12 @@ func Test_NewClient_Overrides(t *testing.T) { ctx := context.Background() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + errorHandler := &testErrorHandler{} logger := slog.New(slog.NewTextHandler(os.Stderr, nil)) @@ -5574,6 +5953,7 @@ func Test_NewClient_Overrides(t *testing.T) { Queues: map[string]QueueConfig{QueueDefault: {MaxWorkers: 1}}, ReindexerSchedule: &periodicIntervalSchedule{interval: time.Hour}, RetryPolicy: retryPolicy, + Schema: schema, SkipUnknownJobCheck: true, TestOnly: true, // disables staggered start in maintenance services Workers: workers, @@ -5606,6 +5986,7 @@ func Test_NewClient_Overrides(t *testing.T) { require.Equal(t, logger, client.baseService.Logger) require.Equal(t, 5, client.config.MaxAttempts) require.Equal(t, retryPolicy, client.config.RetryPolicy) + require.Equal(t, schema, client.config.Schema) require.True(t, client.config.SkipUnknownJobCheck) require.Len(t, client.config.WorkerMiddleware, 1) } @@ -5825,6 +6206,13 @@ func Test_NewClient_Validations(t *testing.T) { require.Equal(t, 23*time.Hour+maintenance.JobRescuerRescueAfterDefault, client.config.RescueStuckJobsAfter) }, }, + { + name: "Schema length must be less than or equal to 46 characters", + configFunc: func(config *Config) { + config.Schema = strings.Repeat("a", 47) + }, + wantErr: errors.New("Schema length must be less than or equal to 46 characters"), + }, { name: "Queues can be nil when Workers is also nil", configFunc: func(config *Config) { @@ -5916,25 +6304,30 @@ func Test_NewClient_Validations(t *testing.T) { t.Parallel() ctx := context.Background() - require := require.New(t) - dbPool := riverinternaltest.TestDB(ctx, t) + + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) workers := NewWorkers() AddWorker(workers, &noOpWorker{}) config := &Config{ Queues: map[string]QueueConfig{QueueDefault: {MaxWorkers: 1}}, + Schema: schema, Workers: workers, } tt.configFunc(config) client, err := NewClient(riverpgxv5.New(dbPool), config) if tt.wantErr != nil { - require.Error(err) - require.ErrorContains(err, tt.wantErr.Error()) + require.Error(t, err) + require.ErrorContains(t, err, tt.wantErr.Error()) return } - require.NoError(err) + require.NoError(t, err) if tt.validateResult != nil { tt.validateResult(t, client) @@ -6013,7 +6406,6 @@ func TestClient_JobTimeout(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { t.Parallel() - require := require.New(t) ctx := context.Background() testWorker := &timeoutTestWorker{doneCh: make(chan testWorkerDeadline)} @@ -6028,15 +6420,15 @@ func TestClient_JobTimeout(t *testing.T) { client := runNewTestClient(ctx, t, config) _, err := client.Insert(ctx, timeoutTestArgs{TimeoutValue: tt.jobArgTimeout}, nil) - require.NoError(err) + require.NoError(t, err) result := riversharedtest.WaitOrTimeout(t, testWorker.doneCh) if tt.wantDuration == 0 { - require.False(result.ok, "expected no deadline") + require.False(t, result.ok, "expected no deadline") return } - require.True(result.ok, "expected a deadline, but none was set") - require.WithinDuration(time.Now().Add(tt.wantDuration), result.deadline, 2*time.Second) + require.True(t, result.ok, "expected a deadline, but none was set") + require.WithinDuration(t, time.Now().Add(tt.wantDuration), result.deadline, 2*time.Second) }) } } @@ -6296,16 +6688,33 @@ func TestID(t *testing.T) { t.Run("IsGeneratedWhenNotSpecifiedInConfig", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) - client := newTestClient(t, dbPool, newTestConfig(t, nil)) + + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + + config := newTestConfig(t, nil) + config.Schema = schema + + client := newTestClient(t, dbPool, config) require.NotEmpty(t, client.ID()) }) t.Run("IsGeneratedWhenNotSpecifiedInConfig", func(t *testing.T) { t.Parallel() + + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + config := newTestConfig(t, nil) config.ID = "my-client-id" - dbPool := riverinternaltest.TestDB(ctx, t) + config.Schema = schema + client := newTestClient(t, dbPool, config) require.Equal(t, "my-client-id", client.ID()) }) @@ -6333,12 +6742,19 @@ func TestInsert(t *testing.T) { t.Parallel() ctx := context.Background() - dbPool := riverinternaltest.TestDB(ctx, t) + + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + workers := NewWorkers() AddWorker(workers, &noOpWorker{}) config := &Config{ Queues: map[string]QueueConfig{QueueDefault: {MaxWorkers: 1}}, + Schema: schema, Workers: workers, } @@ -6457,9 +6873,15 @@ func TestUniqueOpts(t *testing.T) { workers := NewWorkers() AddWorker(workers, &noOpWorker{}) - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema - client := newTestClient(t, dbPool, newTestConfig(t, nil)) + client := newTestClient(t, dbPool, config) // Tests that use ByPeriod below can be sensitive to intermittency if // the tests run at say 23:59:59.998, then it's possible to accidentally diff --git a/cmd/river/rivercli/river_cli.go b/cmd/river/rivercli/river_cli.go index c07d6b8b..2678c1d0 100644 --- a/cmd/river/rivercli/river_cli.go +++ b/cmd/river/rivercli/river_cli.go @@ -394,7 +394,7 @@ type migrateDown struct { } func (c *migrateDown) Run(ctx context.Context, opts *migrateOpts) (bool, error) { - migrator, err := c.GetMigrator(&rivermigrate.Config{Line: opts.Line, Logger: c.Logger}) + migrator, err := c.GetMigrator(&rivermigrate.Config{Line: opts.Line, Logger: c.Logger, Schema: c.Schema}) if err != nil { return false, err } @@ -482,7 +482,7 @@ func (c *migrateGet) Run(_ context.Context, opts *migrateGetOpts) (bool, error) // other databases is added in the future. Unlike other migrate commands, // this one doesn't take a `--database-url`, so we'd need a way of // detecting the database type. - migrator, err := rivermigrate.New(c.DriverProcurer.ProcurePgxV5(nil), &rivermigrate.Config{Line: opts.Line, Logger: c.Logger}) + migrator, err := rivermigrate.New(c.DriverProcurer.ProcurePgxV5(nil), &rivermigrate.Config{Line: opts.Line, Logger: c.Logger, Schema: ""}) if err != nil { return false, err } @@ -550,7 +550,7 @@ type migrateList struct { } func (c *migrateList) Run(ctx context.Context, opts *migrateListOpts) (bool, error) { - migrator, err := c.GetMigrator(&rivermigrate.Config{Line: opts.Line, Logger: c.Logger}) + migrator, err := c.GetMigrator(&rivermigrate.Config{Line: opts.Line, Logger: c.Logger, Schema: c.Schema}) if err != nil { return false, err } @@ -587,7 +587,7 @@ type migrateUp struct { } func (c *migrateUp) Run(ctx context.Context, opts *migrateOpts) (bool, error) { - migrator, err := c.GetMigrator(&rivermigrate.Config{Line: opts.Line, Logger: c.Logger}) + migrator, err := c.GetMigrator(&rivermigrate.Config{Line: opts.Line, Logger: c.Logger, Schema: c.Schema}) if err != nil { return false, err } @@ -625,7 +625,7 @@ type validate struct { } func (c *validate) Run(ctx context.Context, opts *validateOpts) (bool, error) { - migrator, err := c.GetMigrator(&rivermigrate.Config{Line: opts.Line, Logger: c.Logger}) + migrator, err := c.GetMigrator(&rivermigrate.Config{Line: opts.Line, Logger: c.Logger, Schema: c.Schema}) if err != nil { return false, err } diff --git a/cmd/river/rivercli/river_cli_test.go b/cmd/river/rivercli/river_cli_test.go index a717339e..87069431 100644 --- a/cmd/river/rivercli/river_cli_test.go +++ b/cmd/river/rivercli/river_cli_test.go @@ -6,7 +6,6 @@ import ( "context" "fmt" "net/url" - "os" "runtime/debug" "strings" "testing" @@ -17,6 +16,7 @@ import ( "github.com/spf13/cobra" "github.com/stretchr/testify/require" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivermigrate" @@ -174,6 +174,8 @@ Built with %s // out into its own test block so that we don't have to mark the entire block // above as non-parallel because a few tests can't be made parallel. func TestBaseCommandSetNonParallel(t *testing.T) { + ctx := context.Background() + type testBundle struct { out *bytes.Buffer } @@ -197,28 +199,33 @@ func TestBaseCommandSetNonParallel(t *testing.T) { t.Run("PGEnvWithoutDatabaseURL", func(t *testing.T) { cmd, _ := setup(t) - // Deconstruct a database URL into its PG* components. This path is the - // one that gets taken in CI, but could work locally as well. - if databaseURL := os.Getenv("TEST_DATABASE_URL"); databaseURL != "" { - parsedURL, err := url.Parse(databaseURL) - require.NoError(t, err) - - t.Setenv("PGDATABASE", parsedURL.Path[1:]) - t.Setenv("PGHOST", parsedURL.Hostname()) - pass, _ := parsedURL.User.Password() - t.Setenv("PGPASSWORD", pass) - t.Setenv("PGPORT", cmp.Or(parsedURL.Port(), "5432")) - t.Setenv("PGSSLMODE", parsedURL.Query().Get("sslmode")) - t.Setenv("PGUSER", parsedURL.User.Username()) - } else { - // With no `TEST_DATABASE_URL` available, try a simpler alternative - // configuration. Requires a database on localhost that doesn't - // require authentication, which should exist from testdbman. - t.Setenv("PGDATABASE", "river_test") - t.Setenv("PGHOST", "localhost") - } + testDatabaseURL := riversharedtest.TestDatabaseURL() + + config, err := pgxpool.ParseConfig(testDatabaseURL) + require.NoError(t, err) + + dbPool, err := pgxpool.NewWithConfig(ctx, config) + require.NoError(t, err) + + var ( + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + + t.Setenv("TEST_DATABASE_URL", "") + + parsedURL, err := url.Parse(testDatabaseURL) + require.NoError(t, err) + + t.Setenv("PGDATABASE", parsedURL.Path[1:]) + t.Setenv("PGHOST", parsedURL.Hostname()) + pass, _ := parsedURL.User.Password() + t.Setenv("PGPASSWORD", pass) + t.Setenv("PGPORT", cmp.Or(parsedURL.Port(), "5432")) + t.Setenv("PGSSLMODE", parsedURL.Query().Get("sslmode")) + t.Setenv("PGUSER", parsedURL.User.Username()) - cmd.SetArgs([]string{"validate"}) + cmd.SetArgs([]string{"migrate-up", "--schema", schema}) require.NoError(t, cmd.Execute()) }) } diff --git a/docs/development.md b/docs/development.md index 04d1a1cc..fe1e5e1a 100644 --- a/docs/development.md +++ b/docs/development.md @@ -2,13 +2,13 @@ ## Run tests -Raise test databases: +Raise the test database: - go run ./internal/cmd/testdbman create + createdb river_test Run tests: - go test ./... -p 1 + go test ./... --timeout 2s ## Run lint @@ -23,6 +23,13 @@ queries. After changing an sqlc `.sql` file, generate Go with: make generate +## Raise and migrate development database + +To run programs locally outside of tests, create and raise a development database: + + createdb river_dev + go run ./cmd/river migrate-up --database-url postgres:///river_dev --line main + ## Releasing a new version 1. Fetch changes to the repo and any new tags. Export `VERSION` by incrementing the last tag. Execute `update-mod-version` to add it the project's `go.mod` files: diff --git a/driver_test.go b/driver_test.go index eae1810c..2017b6f6 100644 --- a/driver_test.go +++ b/driver_test.go @@ -13,28 +13,42 @@ import ( "github.com/stretchr/testify/require" "github.com/riverqueue/river/internal/rivercommon" - "github.com/riverqueue/river/internal/riverinternaltest" "github.com/riverqueue/river/internal/riverinternaltest/riverdrivertest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverdatabasesql" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivertype" ) func TestDriverDatabaseSQL(t *testing.T) { t.Parallel() - ctx := context.Background() - dbPool := riverinternaltest.TestDB(ctx, t) - - stdPool := stdlib.OpenDBFromPool(dbPool) + var ( + ctx = context.Background() + dbPool = riversharedtest.DBPool(ctx, t) // TODO + stdPool = stdlib.OpenDBFromPool(dbPool) + driver = riverdatabasesql.New(stdPool) + ) t.Cleanup(func() { require.NoError(t, stdPool.Close()) }) + // Make sure to use the same schema as Pgx test transactions use or else + // operations without a schema included will clash with each other due to + // different `river_state` types with the error "ERROR: cached plan must not + // change result type (SQLSTATE 0A000)". + // + // Alternatively, we could switch dbPool to use a DBPoolClone so it's got a + // different statement cache. + var testTxSchema string + tx := riverdbtest.TestTxPgx(ctx, t) + require.NoError(t, tx.QueryRow(ctx, "SELECT current_schema()").Scan(&testTxSchema)) + riverdrivertest.Exercise(ctx, t, - func(ctx context.Context, t *testing.T) riverdriver.Driver[*sql.Tx] { + func(ctx context.Context, t *testing.T) (riverdriver.Driver[*sql.Tx], string) { t.Helper() - return riverdatabasesql.New(stdPool) + return driver, riverdbtest.TestSchema(ctx, t, driver, nil) }, func(ctx context.Context, t *testing.T) riverdriver.Executor { t.Helper() @@ -43,6 +57,10 @@ func TestDriverDatabaseSQL(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { _ = tx.Rollback() }) + // The same thing as the built-in riversharedtest.TestTx does. + _, err = tx.ExecContext(ctx, "SET search_path TO '"+testTxSchema+"'") + require.NoError(t, err) + return riverdatabasesql.New(nil).UnwrapExecutor(tx) }) } @@ -53,16 +71,21 @@ func TestDriverRiverPgxV5(t *testing.T) { ctx := context.Background() riverdrivertest.Exercise(ctx, t, - func(ctx context.Context, t *testing.T) riverdriver.Driver[pgx.Tx] { + func(ctx context.Context, t *testing.T) (riverdriver.Driver[pgx.Tx], string) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - return riverpgxv5.New(dbPool) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + + return driver, schema }, func(ctx context.Context, t *testing.T) riverdriver.Executor { t.Helper() - tx := riverinternaltest.TestTx(ctx, t) + tx := riverdbtest.TestTxPgx(ctx, t) return riverpgxv5.New(nil).UnwrapExecutor(tx) }) } @@ -75,30 +98,38 @@ func BenchmarkDriverRiverPgxV5_Executor(b *testing.B) { ctx := context.Background() - type testBundle struct{} + type testBundle struct { + schema string + } setupPool := func(b *testing.B) (riverdriver.Executor, *testBundle) { b.Helper() - driver := riverpgxv5.New(riverinternaltest.TestDB(ctx, b)) + var ( + dbPool = riversharedtest.DBPool(ctx, b) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, b, driver, nil) + ) b.ResetTimer() - return driver.GetExecutor(), &testBundle{} + return driver.GetExecutor(), &testBundle{ + schema: schema, + } } setupTx := func(b *testing.B) (riverdriver.Executor, *testBundle) { b.Helper() driver := riverpgxv5.New(nil) - tx := riverinternaltest.TestTx(ctx, b) + tx := riverdbtest.TestTxPgx(ctx, b) b.ResetTimer() return driver.UnwrapExecutor(tx), &testBundle{} } - makeInsertParams := func() *riverdriver.JobInsertFastManyParams { + makeInsertParams := func(bundle *testBundle) *riverdriver.JobInsertFastManyParams { return &riverdriver.JobInsertFastManyParams{ Jobs: []*riverdriver.JobInsertFastParams{{ EncodedArgs: []byte(`{}`), @@ -110,7 +141,7 @@ func BenchmarkDriverRiverPgxV5_Executor(b *testing.B) { ScheduledAt: nil, State: rivertype.JobStateAvailable, }}, - Schema: "", + Schema: bundle.schema, } } @@ -118,10 +149,10 @@ func BenchmarkDriverRiverPgxV5_Executor(b *testing.B) { ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - exec, _ := setupTx(b) + exec, bundle := setupTx(b) for range b.N { - if _, err := exec.JobInsertFastMany(ctx, makeInsertParams()); err != nil { + if _, err := exec.JobInsertFastMany(ctx, makeInsertParams(bundle)); err != nil { b.Fatal(err) } } @@ -131,12 +162,12 @@ func BenchmarkDriverRiverPgxV5_Executor(b *testing.B) { ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - exec, _ := setupPool(b) + exec, bundle := setupPool(b) b.RunParallel(func(pb *testing.PB) { i := 0 for pb.Next() { - if _, err := exec.JobInsertFastMany(ctx, makeInsertParams()); err != nil { + if _, err := exec.JobInsertFastMany(ctx, makeInsertParams(bundle)); err != nil { b.Fatal(err) } i++ @@ -148,10 +179,10 @@ func BenchmarkDriverRiverPgxV5_Executor(b *testing.B) { ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - exec, _ := setupTx(b) + exec, bundle := setupTx(b) for range b.N * 100 { - if _, err := exec.JobInsertFastMany(ctx, makeInsertParams()); err != nil { + if _, err := exec.JobInsertFastMany(ctx, makeInsertParams(bundle)); err != nil { b.Fatal(err) } } @@ -173,10 +204,10 @@ func BenchmarkDriverRiverPgxV5_Executor(b *testing.B) { ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - exec, _ := setupPool(b) + exec, bundle := setupPool(b) for range b.N * 100 * runtime.NumCPU() { - if _, err := exec.JobInsertFastMany(ctx, makeInsertParams()); err != nil { + if _, err := exec.JobInsertFastMany(ctx, makeInsertParams(bundle)); err != nil { b.Fatal(err) } } @@ -210,7 +241,7 @@ func BenchmarkDriverRiverPgxV5Insert(b *testing.B) { var ( driver = riverpgxv5.New(nil) - tx = riverinternaltest.TestTx(ctx, b) + tx = riverdbtest.TestTxPgx(ctx, b) ) bundle := &testBundle{ diff --git a/example_batch_insert_test.go b/example_batch_insert_test.go index 392d7786..0146240e 100644 --- a/example_batch_insert_test.go +++ b/example_batch_insert_test.go @@ -8,9 +8,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type BatchInsertArgs struct{} @@ -33,17 +35,12 @@ func (w *BatchInsertWorker) Work(ctx context.Context, job *river.Job[BatchInsert func Example_batchInsert() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &BatchInsertWorker{}) @@ -52,7 +49,8 @@ func Example_batchInsert() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_client_from_context_dbsql_test.go b/example_client_from_context_database_sql_test.go similarity index 78% rename from example_client_from_context_dbsql_test.go rename to example_client_from_context_database_sql_test.go index cfeab56f..3cca0977 100644 --- a/example_client_from_context_dbsql_test.go +++ b/example_client_from_context_database_sql_test.go @@ -11,9 +11,12 @@ import ( _ "github.com/jackc/pgx/v5/stdlib" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverdatabasesql" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" + "github.com/riverqueue/river/rivershared/util/urlutil" ) type ContextClientSQLArgs struct{} @@ -41,8 +44,7 @@ func (w *ContextClientSQLWorker) Work(ctx context.Context, job *river.Job[Contex func ExampleClientFromContext_databaseSQL() { ctx := context.Background() - config := riverinternaltest.DatabaseConfig("river_test_example") - db, err := sql.Open("pgx", config.ConnString()) + db, err := sql.Open("pgx", urlutil.DatabaseSQLCompatibleURL(riversharedtest.TestDatabaseURL())) if err != nil { panic(err) } @@ -59,7 +61,8 @@ func ExampleClientFromContext_databaseSQL() { }, FetchCooldown: 10 * time.Millisecond, FetchPollInterval: 10 * time.Millisecond, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverdatabasesql.New(db), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_client_from_context_test.go b/example_client_from_context_test.go index b5979c15..e4de2045 100644 --- a/example_client_from_context_test.go +++ b/example_client_from_context_test.go @@ -10,9 +10,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type ContextClientArgs struct{} @@ -40,17 +42,12 @@ func (w *ContextClientWorker) Work(ctx context.Context, job *river.Job[ContextCl func ExampleClientFromContext_pgx() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &ContextClientWorker{}) @@ -60,7 +57,8 @@ func ExampleClientFromContext_pgx() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 10}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_complete_job_within_tx_test.go b/example_complete_job_within_tx_test.go index d3ed1b60..093280de 100644 --- a/example_complete_job_within_tx_test.go +++ b/example_complete_job_within_tx_test.go @@ -8,9 +8,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type TransactionalArgs struct{} @@ -58,27 +60,22 @@ func (w *TransactionalWorker) Work(ctx context.Context, job *river.Job[Transacti func Example_completeJobWithinTx() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &TransactionalWorker{dbPool: dbPool}) - river.AddWorker(workers, &SortWorker{}) riverClient, err := river.NewClient(riverpgxv5.New(dbPool), &river.Config{ Logger: slog.New(&slogutil.SlogMessageOnlyHandler{Level: slog.LevelWarn}), Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_cron_job_test.go b/example_cron_job_test.go index 79136558..fd62102a 100644 --- a/example_cron_job_test.go +++ b/example_cron_job_test.go @@ -9,9 +9,11 @@ import ( "github.com/robfig/cron/v3" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type CronJobArgs struct{} @@ -35,17 +37,12 @@ func (w *CronJobWorker) Work(ctx context.Context, job *river.Job[CronJobArgs]) e func Example_cronJob() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &CronJobWorker{}) @@ -68,7 +65,8 @@ func Example_cronJob() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_custom_insert_opts_test.go b/example_custom_insert_opts_test.go index 893a883e..20151a60 100644 --- a/example_custom_insert_opts_test.go +++ b/example_custom_insert_opts_test.go @@ -8,9 +8,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type AlwaysHighPriorityArgs struct{} @@ -56,17 +58,12 @@ func (w *SometimesHighPriorityWorker) Work(ctx context.Context, job *river.Job[S func Example_customInsertOpts() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &AlwaysHighPriorityWorker{}) river.AddWorker(workers, &SometimesHighPriorityWorker{}) @@ -77,7 +74,8 @@ func Example_customInsertOpts() { river.QueueDefault: {MaxWorkers: 100}, "high_priority": {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_error_handler_test.go b/example_error_handler_test.go index dc330abc..b9a27b34 100644 --- a/example_error_handler_test.go +++ b/example_error_handler_test.go @@ -9,9 +9,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" "github.com/riverqueue/river/rivertype" ) @@ -61,17 +63,12 @@ func (w *ErroringWorker) Work(ctx context.Context, job *river.Job[ErroringArgs]) func Example_errorHandler() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &ErroringWorker{}) @@ -81,7 +78,8 @@ func Example_errorHandler() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 10}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_global_hooks_test.go b/example_global_hooks_test.go index 980ba3f0..8470d995 100644 --- a/example_global_hooks_test.go +++ b/example_global_hooks_test.go @@ -8,9 +8,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" "github.com/riverqueue/river/rivertype" ) @@ -55,17 +57,12 @@ var ( func Example_globalHooks() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &NoOpWorker{}) @@ -80,7 +77,8 @@ func Example_globalHooks() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_global_middleware_test.go b/example_global_middleware_test.go index 7b3d14fb..3f8f543c 100644 --- a/example_global_middleware_test.go +++ b/example_global_middleware_test.go @@ -8,9 +8,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" "github.com/riverqueue/river/rivertype" ) @@ -55,17 +57,12 @@ var ( func Example_globalMiddleware() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &NoOpWorker{}) @@ -80,7 +77,8 @@ func Example_globalMiddleware() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_graceful_shutdown_test.go b/example_graceful_shutdown_test.go index d55c003e..f9e77983 100644 --- a/example_graceful_shutdown_test.go +++ b/example_graceful_shutdown_test.go @@ -13,9 +13,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type WaitsForCancelOnlyArgs struct{} @@ -51,19 +53,14 @@ func (w *WaitsForCancelOnlyWorker) Work(ctx context.Context, job *river.Job[Wait func Example_gracefulShutdown() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + jobStarted := make(chan struct{}) + + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - - jobStarted := make(chan struct{}) - workers := river.NewWorkers() river.AddWorker(workers, &WaitsForCancelOnlyWorker{jobStarted: jobStarted}) @@ -72,7 +69,8 @@ func Example_gracefulShutdown() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_insert_and_work_test.go b/example_insert_and_work_test.go index 6c89e9b1..904142ef 100644 --- a/example_insert_and_work_test.go +++ b/example_insert_and_work_test.go @@ -9,9 +9,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type SortArgs struct { @@ -36,17 +38,12 @@ func (w *SortWorker) Work(ctx context.Context, job *river.Job[SortArgs]) error { func Example_insertAndWork() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &SortWorker{}) @@ -55,7 +52,8 @@ func Example_insertAndWork() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_job_args_hooks_test.go b/example_job_args_hooks_test.go index 8ff0277a..00251ca2 100644 --- a/example_job_args_hooks_test.go +++ b/example_job_args_hooks_test.go @@ -8,9 +8,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" "github.com/riverqueue/river/rivertype" ) @@ -79,17 +81,12 @@ var ( func Example_jobArgsHooks() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &JobWithHooksWorker{}) @@ -98,7 +95,8 @@ func Example_jobArgsHooks() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_job_cancel_from_client_test.go b/example_job_cancel_from_client_test.go index 938eb24b..2a12c671 100644 --- a/example_job_cancel_from_client_test.go +++ b/example_job_cancel_from_client_test.go @@ -9,9 +9,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type SleepingArgs struct{} @@ -38,17 +40,12 @@ func (w *SleepingWorker) Work(ctx context.Context, job *river.Job[CancellingArgs func Example_jobCancelFromClient() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - jobChan := make(chan int64) workers := river.NewWorkers() @@ -59,7 +56,8 @@ func Example_jobCancelFromClient() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 10}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_job_cancel_test.go b/example_job_cancel_test.go index 953472d5..05474d58 100644 --- a/example_job_cancel_test.go +++ b/example_job_cancel_test.go @@ -9,9 +9,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type CancellingArgs struct { @@ -37,17 +39,12 @@ func (w *CancellingWorker) Work(ctx context.Context, job *river.Job[CancellingAr func Example_jobCancel() { //nolint:dupl ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &CancellingWorker{}) @@ -56,7 +53,8 @@ func Example_jobCancel() { //nolint:dupl Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 10}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_job_snooze_test.go b/example_job_snooze_test.go index cacba61d..f14537dc 100644 --- a/example_job_snooze_test.go +++ b/example_job_snooze_test.go @@ -9,9 +9,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type SnoozingArgs struct { @@ -39,17 +41,12 @@ func (w *SnoozingWorker) Work(ctx context.Context, job *river.Job[SnoozingArgs]) func Example_jobSnooze() { //nolint:dupl ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &SnoozingWorker{}) @@ -58,7 +55,8 @@ func Example_jobSnooze() { //nolint:dupl Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 10}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_periodic_job_test.go b/example_periodic_job_test.go index f4e6578b..3876170e 100644 --- a/example_periodic_job_test.go +++ b/example_periodic_job_test.go @@ -9,9 +9,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type PeriodicJobArgs struct{} @@ -33,17 +35,12 @@ func (w *PeriodicJobWorker) Work(ctx context.Context, job *river.Job[PeriodicJob func Example_periodicJob() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &PeriodicJobWorker{}) @@ -61,7 +58,8 @@ func Example_periodicJob() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_queue_pause_test.go b/example_queue_pause_test.go index fd5b9b5b..22bf216b 100644 --- a/example_queue_pause_test.go +++ b/example_queue_pause_test.go @@ -9,9 +9,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type ReportingArgs struct{} @@ -37,17 +39,12 @@ func (w *ReportingWorker) Work(ctx context.Context, job *river.Job[ReportingArgs func Example_queuePause() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - const ( unreliableQueue = "unreliable_external_service" reliableQueue = "reliable_jobs" @@ -63,7 +60,8 @@ func Example_queuePause() { unreliableQueue: {MaxWorkers: 10}, reliableQueue: {MaxWorkers: 10}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_scheduled_job_test.go b/example_scheduled_job_test.go index bb1b6737..ec43ba0b 100644 --- a/example_scheduled_job_test.go +++ b/example_scheduled_job_test.go @@ -9,9 +9,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type ScheduledArgs struct { @@ -34,17 +36,12 @@ func (w *ScheduledWorker) Work(ctx context.Context, job *river.Job[ScheduledArgs func Example_scheduledJob() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &ScheduledWorker{}) @@ -53,7 +50,8 @@ func Example_scheduledJob() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_subscription_test.go b/example_subscription_test.go index 28cd651d..8e94436d 100644 --- a/example_subscription_test.go +++ b/example_subscription_test.go @@ -10,10 +10,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type SubscriptionArgs struct { @@ -42,17 +43,12 @@ func (w *SubscriptionWorker) Work(ctx context.Context, job *river.Job[Subscripti func Example_subscription() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &SubscriptionWorker{}) @@ -61,7 +57,8 @@ func Example_subscription() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_unique_job_test.go b/example_unique_job_test.go index e28b8027..b454b7dd 100644 --- a/example_unique_job_test.go +++ b/example_unique_job_test.go @@ -9,9 +9,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) // Account represents a minimal account including recent expenditures and a @@ -64,17 +66,12 @@ func (w *ReconcileAccountWorker) Work(ctx context.Context, job *river.Job[Reconc func Example_uniqueJob() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &ReconcileAccountWorker{}) @@ -83,7 +80,8 @@ func Example_uniqueJob() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/example_work_func_test.go b/example_work_func_test.go index 413055ad..5bd4b81c 100644 --- a/example_work_func_test.go +++ b/example_work_func_test.go @@ -8,9 +8,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" ) type WorkFuncArgs struct { @@ -25,17 +27,12 @@ func (WorkFuncArgs) Kind() string { return "work_func" } func Example_workFunc() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, river.WorkFunc(func(ctx context.Context, job *river.Job[WorkFuncArgs]) error { fmt.Printf("Message: %s", job.Args.Message) @@ -47,7 +44,8 @@ func Example_workFunc() { Queues: map[string]river.QueueConfig{ river.QueueDefault: {MaxWorkers: 100}, }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/internal/cmd/testdbman/main.go b/internal/cmd/testdbman/main.go deleted file mode 100644 index e459a1da..00000000 --- a/internal/cmd/testdbman/main.go +++ /dev/null @@ -1,402 +0,0 @@ -// testdbman is a command-line tool for managing the test databases used by -// parallel tests and the sample applications. -package main - -import ( - "context" - "errors" - "flag" - "fmt" - "io" - "os" - "runtime" - "slices" - "strings" - "time" - - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" - - "github.com/riverqueue/river/riverdriver/riverpgxv5" - "github.com/riverqueue/river/rivermigrate" - "github.com/riverqueue/river/rivershared/util/maputil" -) - -func main() { - commandBundle := NewCommandBundle( - "testdbman", - "testdbman manages test databases", - ` -A small program to create and manage test databases. River currently requires a -number of different of test databases loaded with its schema for it to be able -to run the full test suite in parallel. - -Run "testdbman create" to raise all required test databases and prepare for a -test run. - `, - ) - - // create - { - commandBundle.AddCommand( - "create", - "Create test databases", - ` -Creates the test databases used by parallel tests and the sample applications. -Each is migrated with River's current schema. - -The sample application DB is named river_test, while the DBs for parallel -tests are named river_test_0, river_test_1, etc. up to the larger of 4 or -runtime.NumCPU() (a choice that comes from pgx's default connection pool size). -`, - createTestDatabases, - ) - } - - // drop - { - commandBundle.AddCommand( - "drop", - "Drop test databases", - ` -Drops all test databases. Any test database matching the base name -(river_test) or the base name with an underscore followed by any other token -(river_test_example, river_test_0, river_test_1, etc.) will be dropped. -`, - dropTestDatabases, - ) - } - - // reset - { - commandBundle.AddCommand( - "reset", - "Drop and recreate test databases", - ` -Reset the test databases, dropping the existing database(s) if they exist, and -recreating them with the most up to date schema. Equivalent to running "drop" -followed by "create". -`, - resetTestDatabases, - ) - } - - ctx := context.Background() - - if err := commandBundle.Exec(ctx, os.Args); err != nil { - fmt.Fprintf(os.Stderr, "failed: %s\n", err) - os.Exit(1) - } -} - -// -// Commands -// - -const managementDatabaseURL = "postgres:///postgres" - -// -// Helpers -// - -func createTestDatabases(ctx context.Context, out io.Writer) error { - mgmtConn, err := pgx.Connect(ctx, managementDatabaseURL) - if err != nil { - return fmt.Errorf("error opening management connection: %w", err) - } - defer mgmtConn.Close(ctx) - - createDBAndMigrate := func(dbName string) error { - if _, err := mgmtConn.Exec(ctx, "CREATE DATABASE "+dbName); err != nil { - return fmt.Errorf("error crating database %q: %w", dbName, err) - } - fmt.Fprintf(out, "created: %-20s", dbName) - - // Defer printing a newline, which will be either added to the end of a - // successful invocation of this command (after the string "[and - // migrated]" has been printed to the current line), or printed before - // returning an error so that in either case output looks right. - defer fmt.Fprintf(out, "\n") - - dbURL := "postgres:///" + dbName - - dbPool, err := pgxpool.New(ctx, dbURL) - if err != nil { - return fmt.Errorf("error creating connection pool to %q: %w", dbURL, err) - } - defer dbPool.Close() - - migrator, err := rivermigrate.New(riverpgxv5.New(dbPool), nil) - if err != nil { - return err - } - - if _, err = migrator.Migrate(ctx, rivermigrate.DirectionUp, &rivermigrate.MigrateOpts{}); err != nil { - return err - } - fmt.Fprintf(out, " [and migrated]") - - return nil - } - - // Allow up to one database per concurrent test, plus two for overhead: - maxTestDBs := runtime.GOMAXPROCS(0) + 2 - dbNames := generateTestDBNames(maxTestDBs) - for _, dbName := range dbNames { - if err := createDBAndMigrate(dbName); err != nil { - return err - } - } - - return nil -} - -func generateTestDBNames(numDBs int) []string { - dbNames := []string{ - "river_test", - "river_test_example", - } - - for i := range numDBs { - dbNames = append(dbNames, fmt.Sprintf("river_test_%d", i)) - } - - return dbNames -} - -func dropTestDatabases(ctx context.Context, out io.Writer) error { - mgmtConn, err := pgx.Connect(ctx, managementDatabaseURL) - if err != nil { - return fmt.Errorf("error opening management connection: %w", err) - } - defer mgmtConn.Close(ctx) - - rows, err := mgmtConn.Query(ctx, "SELECT datname FROM pg_database") - if err != nil { - return fmt.Errorf("error listing databases: %w", err) - } - defer rows.Close() - - allDBNames := make([]string, 0) - for rows.Next() { - var dbName string - err := rows.Scan(&dbName) - if err != nil { - return fmt.Errorf("error scanning database name: %w", err) - } - allDBNames = append(allDBNames, dbName) - } - rows.Close() - - for _, dbName := range allDBNames { - if strings.HasPrefix(dbName, "river_test") { - if _, err := mgmtConn.Exec(ctx, "DROP DATABASE "+dbName); err != nil { - return fmt.Errorf("error dropping database %q: %w", dbName, err) - } - fmt.Fprintf(out, "dropped: %s\n", dbName) - } - } - - return nil -} - -func resetTestDatabases(ctx context.Context, out io.Writer) error { - if err := dropTestDatabases(ctx, out); err != nil { - return err - } - - if err := createTestDatabases(ctx, out); err != nil { - return err - } - - return nil -} - -// -// Command bundle framework -// - -// CommandBundle is a basic CLI command framework similar to Cobra, but with far -// reduced capabilities. I know it seems crazy to write one when Cobra is -// available, but the test manager's interface is quite simple, and not using -// Cobra lets us drop its dependency in the main River package. -type CommandBundle struct { - commands map[string]*commandBundleCommand - long string - out io.Writer - short string - use string -} - -func NewCommandBundle(use, short, long string) *CommandBundle { - if use == "" { - panic("use is required") - } - if short == "" { - panic("short is required") - } - if long == "" { - panic("long is required") - } - - return &CommandBundle{ - commands: make(map[string]*commandBundleCommand), - long: long, - out: os.Stdout, - short: short, - use: use, - } -} - -func (b *CommandBundle) AddCommand(use, short, long string, execFunc func(ctx context.Context, out io.Writer) error) { - if use == "" { - panic("use is required") - } - if short == "" { - panic("short is required") - } - if long == "" { - panic("long is required") - } - if execFunc == nil { - panic("execFunc is required") - } - - if _, ok := b.commands[use]; ok { - panic("command already registered: " + use) - } - - b.commands[use] = &commandBundleCommand{ - execFunc: execFunc, - long: long, - short: short, - use: use, - } -} - -const helpUse = "help" - -func (b *CommandBundle) Exec(ctx context.Context, args []string) error { - ctx, cancel := context.WithTimeout(ctx, 30*time.Second) - defer cancel() - - var ( - flagSet flag.FlagSet - help bool - ) - flagSet.BoolVar(&help, "help", false, "help for program or command") - - args = args[1:] // drop program name - - var commandUse string - if len(args) > 0 && args[0][0] != '-' { - commandUse = args[0] - args = args[1:] - } - - if err := flagSet.Parse(args); err != nil { - return err - } - - args = flagSet.Args() - - // Try extracting a command again after flags are parsed and we didn't get - // one on the first pass. - if commandUse == "" && len(args) > 0 { - commandUse = args[0] - args = args[1:] - } - - if commandUse != "" && commandUse != helpUse && len(args) > 0 || len(args) > 1 { - return errors.New("expected exactly one command") - } - - if commandUse == "" || commandUse == helpUse && len(args) < 1 { - fmt.Fprintf(b.out, "%s\n", b.usage(&flagSet)) - return nil - } - - if commandUse == "help" { - commandUse = args[0] - help = true - } - - command, ok := b.commands[commandUse] - if !ok { - return errors.New("unknown command: " + commandUse) - } - - if help { - fmt.Fprintf(b.out, "%s\n", command.printUsage(b.use, &flagSet)) - return nil - } - - return command.execFunc(ctx, b.out) -} - -func (b *CommandBundle) usage(flagSet *flag.FlagSet) string { - var sb strings.Builder - - sb.WriteString(fmt.Sprintf(` -%s - -Usage: - %s [command] [flags] - -Available Commands: -`, strings.TrimSpace(b.long), b.use)) - - var longestUse int - for use := range b.commands { - if len(use) > longestUse { - longestUse = len(use) - } - } - - // Go's maps are unordered of course. Alphabetize. - sortedCommandUses := maputil.Keys(b.commands) - slices.Sort(sortedCommandUses) - - for _, use := range sortedCommandUses { - command := b.commands[use] - sb.WriteString(fmt.Sprintf(" %-*s %s\n", longestUse, use, command.short)) - } - - sb.WriteString("\nFlags:\n") - flagSet.SetOutput(&sb) - flagSet.PrintDefaults() - - sb.WriteString(fmt.Sprintf(` -Use "%s [command] -help" for more information about a command. - `, b.use)) - - // Go's flag module loves tabs of course. Kill them in favor of spaces, - // which are easier to test against. - return strings.TrimSpace(strings.ReplaceAll(sb.String(), "\t", " ")) -} - -type commandBundleCommand struct { - execFunc func(ctx context.Context, out io.Writer) error - long string - short string - use string -} - -func (b *commandBundleCommand) printUsage(bundleUse string, flagSet *flag.FlagSet) string { - var sb strings.Builder - - sb.WriteString(fmt.Sprintf(` -%s - -Usage: - %s %s [flags] -`, strings.TrimSpace(b.long), bundleUse, b.use)) - - sb.WriteString("\nFlags:\n") - flagSet.SetOutput(&sb) - flagSet.PrintDefaults() - - // Go's flag module loves tabs of course. Kill them in favor of spaces, - // which are easier to test against. - return strings.TrimSpace(strings.ReplaceAll(sb.String(), "\t", " ")) -} diff --git a/internal/cmd/testdbman/main_test.go b/internal/cmd/testdbman/main_test.go deleted file mode 100644 index 1bdb1f57..00000000 --- a/internal/cmd/testdbman/main_test.go +++ /dev/null @@ -1,268 +0,0 @@ -package main - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "strings" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestGenerateTestDBNames(t *testing.T) { - t.Parallel() - - require.Equal(t, []string{ - "river_test", - "river_test_example", - "river_test_0", - "river_test_1", - "river_test_2", - "river_test_3", - }, generateTestDBNames(4)) -} - -// -// Command bundle framework -// - -func TestCommandBundle(t *testing.T) { - t.Parallel() - - var ( - baseArgs = []string{"fake-program-name"} // os args always include a program name - ctx = context.Background() - ) - - type testBundle struct { - buf *bytes.Buffer - command1Invoked *bool - command2Invoked *bool - } - - setup := func() (*CommandBundle, *testBundle) { - commandBundle := NewCommandBundle( - "testcom", - "testcom is a test bundle for use tests", - ` -A test only program for testing the command bundle framework, especially some -complexities around how it emits output. This is the long description and is -meant to be wrapped at 80 characters in your editor. - -It may be multiple paragraphs. This is a second paragraph with additional -information and context. - `, - ) - - var command1Invoked bool - { - commandBundle.AddCommand( - "command1", - "The program's first command", - ` -This is a long description for the program's first command. It acts somewhat -like a mock, setting a boolean to true that we can easily check in tests in case -the program makes the decision to invoke it. -`, - func(ctx context.Context, out io.Writer) error { - fmt.Fprintf(out, "command1 executed\n") - command1Invoked = true - return nil - }, - ) - } - - var command2Invoked bool - { - commandBundle.AddCommand( - "command2", - "The program's second command", - ` -This is a long description for the program's second command. It's the same as -the first command, and acts somewhat like a mock, setting a boolean to true that -we can easily check in tests in case the program makes the decision to invoke -it. -`, - func(ctx context.Context, out io.Writer) error { - fmt.Fprintf(out, "command2 executed\n") - command2Invoked = true - return nil - }, - ) - } - { - commandBundle.AddCommand( - "makeerror", - "A command that returns an error", - ` -The long description for a command that returns an error that we can check -against in tests to make sure that piece of the puzzle works as expected. -`, - func(ctx context.Context, out io.Writer) error { - fmt.Fprintf(out, "makeerror executed\n") - return errors.New("command error!") - }, - ) - } - - var buf bytes.Buffer - commandBundle.out = &buf - - return commandBundle, &testBundle{ - buf: &buf, - command1Invoked: &command1Invoked, - command2Invoked: &command2Invoked, - } - } - - expectedCommandBundleUsage := strings.TrimSpace(` -A test only program for testing the command bundle framework, especially some -complexities around how it emits output. This is the long description and is -meant to be wrapped at 80 characters in your editor. - -It may be multiple paragraphs. This is a second paragraph with additional -information and context. - -Usage: - testcom [command] [flags] - -Available Commands: - command1 The program's first command - command2 The program's second command - makeerror A command that returns an error - -Flags: - -help - help for program or command - -Use "testcom [command] -help" for more information about a command. - `) + "\n" - - t.Run("ShowsUsageWithHelpArgument", func(t *testing.T) { - t.Parallel() - - commandBundle, bundle := setup() - - require.NoError(t, commandBundle.Exec(ctx, append(baseArgs, "-help"))) - - require.Equal(t, expectedCommandBundleUsage, bundle.buf.String()) - }) - - t.Run("ShowsUsageWithHelpCommand", func(t *testing.T) { - t.Parallel() - - commandBundle, bundle := setup() - - require.NoError(t, commandBundle.Exec(ctx, append(baseArgs, "help"))) - - require.Equal(t, expectedCommandBundleUsage, bundle.buf.String()) - }) - - t.Run("ShowsUsageWithNoArguments", func(t *testing.T) { - t.Parallel() - - commandBundle, bundle := setup() - - require.NoError(t, commandBundle.Exec(ctx, baseArgs)) - - require.Equal(t, expectedCommandBundleUsage, bundle.buf.String()) - }) - - expectedCommand1Usage := strings.TrimSpace(` -This is a long description for the program's first command. It acts somewhat -like a mock, setting a boolean to true that we can easily check in tests in case -the program makes the decision to invoke it. - -Usage: - testcom command1 [flags] - -Flags: - -help - help for program or command - `) + "\n" - - t.Run("ShowsCommandUsageWithHelpArgument", func(t *testing.T) { - t.Parallel() - - commandBundle, bundle := setup() - - require.NoError(t, commandBundle.Exec(ctx, append(baseArgs, "command1", "-help"))) - - require.Equal(t, expectedCommand1Usage, bundle.buf.String()) - }) - - t.Run("ShowsCommandUsageWithHelpCommand", func(t *testing.T) { - t.Parallel() - - commandBundle, bundle := setup() - - require.NoError(t, commandBundle.Exec(ctx, append(baseArgs, "help", "command1"))) - - require.Equal(t, expectedCommand1Usage, bundle.buf.String()) - }) - - t.Run("ShowsCommandUsageWithMisorderedHelpArgument", func(t *testing.T) { - t.Parallel() - - commandBundle, bundle := setup() - - require.NoError(t, commandBundle.Exec(ctx, append(baseArgs, "-help", "command1"))) - - require.Equal(t, expectedCommand1Usage, bundle.buf.String()) - }) - - t.Run("ErrorsOnTooManyArguments", func(t *testing.T) { - t.Parallel() - - commandBundle, _ := setup() - - require.EqualError(t, commandBundle.Exec(ctx, append(baseArgs, "command1", "command2")), - "expected exactly one command") - }) - - t.Run("ErrorsOnUnknownCommand", func(t *testing.T) { - t.Parallel() - - commandBundle, _ := setup() - - require.EqualError(t, commandBundle.Exec(ctx, append(baseArgs, "command3")), - "unknown command: command3") - }) - - t.Run("RunsCommand", func(t *testing.T) { - t.Parallel() - - commandBundle, bundle := setup() - - require.NoError(t, commandBundle.Exec(ctx, append(baseArgs, "command1"))) - require.True(t, *bundle.command1Invoked) - require.False(t, *bundle.command2Invoked) - - require.Equal(t, "command1 executed\n", bundle.buf.String()) - }) - - t.Run("DisambiguatesCommands", func(t *testing.T) { - t.Parallel() - - commandBundle, bundle := setup() - - require.NoError(t, commandBundle.Exec(ctx, append(baseArgs, "command2"))) - require.False(t, *bundle.command1Invoked) - require.True(t, *bundle.command2Invoked) - - require.Equal(t, "command2 executed\n", bundle.buf.String()) - }) - - t.Run("ReturnsErrorFromCommand", func(t *testing.T) { - t.Parallel() - - commandBundle, bundle := setup() - - require.EqualError(t, commandBundle.Exec(ctx, append(baseArgs, "makeerror")), "command error!") - - require.Equal(t, "makeerror executed\n", bundle.buf.String()) - }) -} diff --git a/internal/dblist/db_list.go b/internal/dblist/db_list.go index 9498a86d..594eff4e 100644 --- a/internal/dblist/db_list.go +++ b/internal/dblist/db_list.go @@ -31,6 +31,7 @@ type JobListParams struct { OrderBy []JobListOrderBy Priorities []int16 Queues []string + Schema string States []rivertype.JobState } @@ -72,7 +73,11 @@ func JobList(ctx context.Context, exec riverdriver.Executor, params *JobListPara if len(params.States) > 0 { writeAndAfterFirst() - whereBuilder.WriteString("state = any(@states::river_job_state[])") + var maybeSchema string + if params.Schema != "" { + maybeSchema = params.Schema + "." + } + whereBuilder.WriteString("state = any(@states::" + maybeSchema + "river_job_state[])") namedArgs["states"] = sliceutil.Map(params.States, func(s rivertype.JobState) string { return string(s) }) } @@ -116,6 +121,7 @@ func JobList(ctx context.Context, exec riverdriver.Executor, params *JobListPara Max: params.LimitCount, NamedArgs: namedArgs, OrderByClause: orderByBuilder.String(), + Schema: params.Schema, WhereClause: whereBuilder.String(), }) } diff --git a/internal/dblist/db_list_test.go b/internal/dblist/db_list_test.go index 53b0c780..1ce3a2b2 100644 --- a/internal/dblist/db_list_test.go +++ b/internal/dblist/db_list_test.go @@ -8,7 +8,7 @@ import ( "github.com/jackc/pgx/v5" "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/testfactory" @@ -30,7 +30,7 @@ func TestJobListNoJobs(t *testing.T) { driver := riverpgxv5.New(nil) return &testBundle{ - exec: driver.UnwrapExecutor(riverinternaltest.TestTx(ctx, t)), + exec: driver.UnwrapExecutor(riverdbtest.TestTxPgx(ctx, t)), } } @@ -80,7 +80,7 @@ func TestJobListWithJobs(t *testing.T) { var ( driver = riverpgxv5.New(nil) - tx = riverinternaltest.TestTx(ctx, t) + tx = riverdbtest.TestTxPgx(ctx, t) exec = driver.UnwrapExecutor(tx) ) diff --git a/internal/jobcompleter/job_completer.go b/internal/jobcompleter/job_completer.go index 43dadace..b4f4d527 100644 --- a/internal/jobcompleter/job_completer.go +++ b/internal/jobcompleter/job_completer.go @@ -53,6 +53,7 @@ type InlineCompleter struct { disableSleep bool // disable sleep in testing exec riverdriver.Executor pilot riverpilot.Pilot + schema string subscribeCh SubscribeChan // A waitgroup is not actually needed for the inline completer because as @@ -63,10 +64,11 @@ type InlineCompleter struct { wg sync.WaitGroup } -func NewInlineCompleter(archetype *baseservice.Archetype, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh SubscribeChan) *InlineCompleter { +func NewInlineCompleter(archetype *baseservice.Archetype, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh SubscribeChan) *InlineCompleter { return baseservice.Init(archetype, &InlineCompleter{ exec: exec, pilot: pilot, + schema: schema, subscribeCh: subscribeCh, }) } @@ -84,7 +86,7 @@ func (c *InlineCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobst } defer execTx.Rollback(ctx) - jobs, err := c.pilot.JobSetStateIfRunningMany(ctx, execTx, setStateParamsToMany(params)) + jobs, err := c.pilot.JobSetStateIfRunningMany(ctx, execTx, setStateParamsToMany(c.schema, params)) if err != nil { return nil, err } @@ -131,7 +133,7 @@ func (c *InlineCompleter) Start(ctx context.Context) error { return nil } -func setStateParamsToMany(params *riverdriver.JobSetStateIfRunningParams) *riverdriver.JobSetStateIfRunningManyParams { +func setStateParamsToMany(schema string, params *riverdriver.JobSetStateIfRunningParams) *riverdriver.JobSetStateIfRunningManyParams { return &riverdriver.JobSetStateIfRunningManyParams{ Attempt: []*int{params.Attempt}, ErrData: [][]byte{params.ErrData}, @@ -140,6 +142,7 @@ func setStateParamsToMany(params *riverdriver.JobSetStateIfRunningParams) *river MetadataDoMerge: []bool{params.MetadataDoMerge}, MetadataUpdates: [][]byte{params.MetadataUpdates}, ScheduledAt: []*time.Time{params.ScheduledAt}, + Schema: schema, State: []rivertype.JobState{params.State}, } } @@ -160,14 +163,15 @@ type AsyncCompleter struct { errGroup *errgroup.Group exec riverdriver.Executor pilot riverpilot.Pilot + schema string subscribeCh SubscribeChan } -func NewAsyncCompleter(archetype *baseservice.Archetype, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh SubscribeChan) *AsyncCompleter { - return newAsyncCompleterWithConcurrency(archetype, exec, pilot, asyncCompleterDefaultConcurrency, subscribeCh) +func NewAsyncCompleter(archetype *baseservice.Archetype, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh SubscribeChan) *AsyncCompleter { + return newAsyncCompleterWithConcurrency(archetype, schema, exec, pilot, asyncCompleterDefaultConcurrency, subscribeCh) } -func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec riverdriver.Executor, pilot riverpilot.Pilot, concurrency int, subscribeCh SubscribeChan) *AsyncCompleter { +func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, concurrency int, subscribeCh SubscribeChan) *AsyncCompleter { errGroup := &errgroup.Group{} errGroup.SetLimit(concurrency) @@ -176,6 +180,7 @@ func newAsyncCompleterWithConcurrency(archetype *baseservice.Archetype, exec riv errGroup: errGroup, exec: exec, pilot: pilot, + schema: schema, subscribeCh: subscribeCh, }) } @@ -193,7 +198,7 @@ func (c *AsyncCompleter) JobSetStateIfRunning(ctx context.Context, stats *jobsta } defer execTx.Rollback(ctx) - rows, err := c.pilot.JobSetStateIfRunningMany(ctx, execTx, setStateParamsToMany(params)) + rows, err := c.pilot.JobSetStateIfRunningMany(ctx, execTx, setStateParamsToMany(c.schema, params)) if err != nil { return nil, err } @@ -264,6 +269,7 @@ type BatchCompleter struct { maxBacklog int // configurable for testing purposes; max backlog allowed before no more completions accepted exec riverdriver.Executor pilot riverpilot.Pilot + schema string setStateParams map[int64]*batchCompleterSetState setStateParamsMu sync.RWMutex setStateStartTimes map[int64]time.Time @@ -272,7 +278,7 @@ type BatchCompleter struct { waitOnBacklogWaiting bool } -func NewBatchCompleter(archetype *baseservice.Archetype, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh SubscribeChan) *BatchCompleter { +func NewBatchCompleter(archetype *baseservice.Archetype, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh SubscribeChan) *BatchCompleter { const ( completionMaxSize = 5_000 maxBacklog = 20_000 @@ -283,6 +289,7 @@ func NewBatchCompleter(archetype *baseservice.Archetype, exec riverdriver.Execut exec: exec, maxBacklog: maxBacklog, pilot: pilot, + schema: schema, setStateParams: make(map[int64]*batchCompleterSetState), setStateStartTimes: make(map[int64]time.Time), subscribeCh: subscribeCh, @@ -440,6 +447,7 @@ func (c *BatchCompleter) handleBatch(ctx context.Context) error { params.MetadataDoMerge[i] = setState.Params.MetadataDoMerge params.MetadataUpdates[i] = setState.Params.MetadataUpdates params.ScheduledAt[i] = setState.Params.ScheduledAt + params.Schema = c.schema params.State[i] = setState.Params.State i++ } @@ -468,6 +476,7 @@ func (c *BatchCompleter) handleBatch(ctx context.Context) error { MetadataDoMerge: params.MetadataDoMerge[i:endIndex], MetadataUpdates: params.MetadataUpdates[i:endIndex], ScheduledAt: params.ScheduledAt[i:endIndex], + Schema: params.Schema, State: params.State[i:endIndex], } jobRowsSubBatch, err := completeSubBatch(subBatch) diff --git a/internal/jobcompleter/job_completer_test.go b/internal/jobcompleter/job_completer_test.go index b0426e6b..09451bcb 100644 --- a/internal/jobcompleter/job_completer_test.go +++ b/internal/jobcompleter/job_completer_test.go @@ -15,6 +15,7 @@ import ( "github.com/riverqueue/river/internal/jobstats" "github.com/riverqueue/river/internal/rivercommon" "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riverpilot" @@ -75,7 +76,7 @@ func TestInlineJobCompleter_Complete(t *testing.T) { ctx := context.Background() var ( - tx = riverinternaltest.TestTx(ctx, t) + tx = riverdbtest.TestTxPgx(ctx, t) driver = riverpgxv5.New(nil) exec = driver.UnwrapExecutor(tx) execMock = NewPartialExecutorMock(exec) @@ -94,7 +95,7 @@ func TestInlineJobCompleter_Complete(t *testing.T) { subscribeCh := make(chan []CompleterJobUpdated, 10) t.Cleanup(riverinternaltest.DiscardContinuously(subscribeCh)) - completer := NewInlineCompleter(riversharedtest.BaseServiceArchetype(t), execMock, &riverpilot.StandardPilot{}, subscribeCh) + completer := NewInlineCompleter(riversharedtest.BaseServiceArchetype(t), "", execMock, &riverpilot.StandardPilot{}, subscribeCh) t.Cleanup(completer.Stop) completer.disableSleep = true @@ -110,19 +111,22 @@ func TestInlineJobCompleter_Complete(t *testing.T) { func TestInlineJobCompleter_Subscribe(t *testing.T) { t.Parallel() - testCompleterSubscribe(t, func(exec riverdriver.Executor, subscribeCh SubscribeChan) JobCompleter { - return NewInlineCompleter(riversharedtest.BaseServiceArchetype(t), exec, &riverpilot.StandardPilot{}, subscribeCh) + testCompleterSubscribe(t, func(schema string, exec riverdriver.Executor, subscribeChan SubscribeChan) JobCompleter { + return NewInlineCompleter(riversharedtest.BaseServiceArchetype(t), "", exec, &riverpilot.StandardPilot{}, subscribeChan) }) } func TestInlineJobCompleter_Wait(t *testing.T) { t.Parallel() - testCompleterWait(t, func(exec riverdriver.Executor, subscribeChan SubscribeChan) JobCompleter { - return NewInlineCompleter(riversharedtest.BaseServiceArchetype(t), exec, &riverpilot.StandardPilot{}, subscribeChan) + testCompleterWait(t, func(schema string, exec riverdriver.Executor, subscribeChan SubscribeChan) JobCompleter { + return NewInlineCompleter(riversharedtest.BaseServiceArchetype(t), "", exec, &riverpilot.StandardPilot{}, subscribeChan) }) } +// TODO: Can we get rid of this test? It's pretty slow and it's not clear that +// it's testing anything particularly useful compared to the more thorough +// completer tests below. func TestAsyncJobCompleter_Complete(t *testing.T) { t.Parallel() @@ -145,8 +149,9 @@ func TestAsyncJobCompleter_Complete(t *testing.T) { }() var ( - db = riverinternaltest.TestDB(ctx, t) - driver = riverpgxv5.New(db) + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) execMock = NewPartialExecutorMock(driver.GetExecutor()) ) @@ -160,7 +165,7 @@ func TestAsyncJobCompleter_Complete(t *testing.T) { return []*rivertype.JobRow{{ID: params.ID[0], State: params.State[0]}}, nil } subscribeChan := make(chan []CompleterJobUpdated, 10) - completer := newAsyncCompleterWithConcurrency(riversharedtest.BaseServiceArchetype(t), execMock, &riverpilot.StandardPilot{}, 2, subscribeChan) + completer := newAsyncCompleterWithConcurrency(riversharedtest.BaseServiceArchetype(t), schema, execMock, &riverpilot.StandardPilot{}, 2, subscribeChan) completer.disableSleep = true require.NoError(t, completer.Start(ctx)) t.Cleanup(completer.Stop) @@ -224,27 +229,28 @@ func TestAsyncJobCompleter_Complete(t *testing.T) { func TestAsyncJobCompleter_Subscribe(t *testing.T) { t.Parallel() - testCompleterSubscribe(t, func(exec riverdriver.Executor, subscribeCh SubscribeChan) JobCompleter { - return newAsyncCompleterWithConcurrency(riversharedtest.BaseServiceArchetype(t), exec, &riverpilot.StandardPilot{}, 4, subscribeCh) + testCompleterSubscribe(t, func(schema string, exec riverdriver.Executor, subscribeChan SubscribeChan) JobCompleter { + return newAsyncCompleterWithConcurrency(riversharedtest.BaseServiceArchetype(t), schema, exec, &riverpilot.StandardPilot{}, 4, subscribeChan) }) } func TestAsyncJobCompleter_Wait(t *testing.T) { t.Parallel() - testCompleterWait(t, func(exec riverdriver.Executor, subscribeCh SubscribeChan) JobCompleter { - return newAsyncCompleterWithConcurrency(riversharedtest.BaseServiceArchetype(t), exec, &riverpilot.StandardPilot{}, 4, subscribeCh) + testCompleterWait(t, func(schema string, exec riverdriver.Executor, subscribeChan SubscribeChan) JobCompleter { + return newAsyncCompleterWithConcurrency(riversharedtest.BaseServiceArchetype(t), schema, exec, &riverpilot.StandardPilot{}, 4, subscribeChan) }) } -func testCompleterSubscribe(t *testing.T, constructor func(riverdriver.Executor, SubscribeChan) JobCompleter) { +func testCompleterSubscribe(t *testing.T, constructor func(schema string, exec riverdriver.Executor, subscribeCh SubscribeChan) JobCompleter) { t.Helper() ctx := context.Background() var ( - db = riverinternaltest.TestDB(ctx, t) - driver = riverpgxv5.New(db) + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) execMock = NewPartialExecutorMock(driver.GetExecutor()) ) execMock.JobSetStateIfRunningManyFunc = func(ctx context.Context, params *riverdriver.JobSetStateIfRunningManyParams) ([]*rivertype.JobRow, error) { @@ -252,7 +258,7 @@ func testCompleterSubscribe(t *testing.T, constructor func(riverdriver.Executor, } subscribeChan := make(chan []CompleterJobUpdated, 10) - completer := constructor(execMock, subscribeChan) + completer := constructor(schema, execMock, subscribeChan) require.NoError(t, completer.Start(ctx)) // Flatten the slice results from subscribeChan into jobUpdateChan: @@ -282,14 +288,15 @@ func testCompleterSubscribe(t *testing.T, constructor func(riverdriver.Executor, } } -func testCompleterWait(t *testing.T, constructor func(riverdriver.Executor, SubscribeChan) JobCompleter) { +func testCompleterWait(t *testing.T, constructor func(schema string, exec riverdriver.Executor, subscribeChan SubscribeChan) JobCompleter) { t.Helper() ctx := context.Background() var ( - db = riverinternaltest.TestDB(ctx, t) - driver = riverpgxv5.New(db) + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) execMock = NewPartialExecutorMock(driver.GetExecutor()) ) @@ -306,7 +313,7 @@ func testCompleterWait(t *testing.T, constructor func(riverdriver.Executor, Subs } subscribeCh := make(chan []CompleterJobUpdated, 100) - completer := constructor(execMock, subscribeCh) + completer := constructor(schema, execMock, subscribeCh) require.NoError(t, completer.Start(ctx)) // launch 4 completions: @@ -355,11 +362,12 @@ func testCompleterWait(t *testing.T, constructor func(riverdriver.Executor, Subs func TestAsyncCompleter(t *testing.T) { t.Parallel() - testCompleter(t, func(t *testing.T, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh chan<- []CompleterJobUpdated) *AsyncCompleter { + testCompleter(t, func(t *testing.T, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeChan chan<- []CompleterJobUpdated) *AsyncCompleter { t.Helper() - return NewAsyncCompleter(riversharedtest.BaseServiceArchetype(t), exec, pilot, subscribeCh) + return NewAsyncCompleter(riversharedtest.BaseServiceArchetype(t), schema, exec, pilot, subscribeChan) }, func(completer *AsyncCompleter) { completer.disableSleep = true }, + 100, func(completer *AsyncCompleter, exec riverdriver.Executor) { completer.exec = exec }, ) } @@ -367,11 +375,12 @@ func TestAsyncCompleter(t *testing.T) { func TestBatchCompleter(t *testing.T) { t.Parallel() - testCompleter(t, func(t *testing.T, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh chan<- []CompleterJobUpdated) *BatchCompleter { + testCompleter(t, func(t *testing.T, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeChan chan<- []CompleterJobUpdated) *BatchCompleter { t.Helper() - return NewBatchCompleter(riversharedtest.BaseServiceArchetype(t), exec, pilot, subscribeCh) + return NewBatchCompleter(riversharedtest.BaseServiceArchetype(t), schema, exec, pilot, subscribeChan) }, func(completer *BatchCompleter) { completer.disableSleep = true }, + 4_400, func(completer *BatchCompleter, exec riverdriver.Executor) { completer.exec = exec }, ) @@ -379,6 +388,7 @@ func TestBatchCompleter(t *testing.T) { type testBundle struct { exec riverdriver.Executor + schema string subscribeCh <-chan []CompleterJobUpdated } @@ -386,29 +396,37 @@ func TestBatchCompleter(t *testing.T) { t.Helper() var ( - driver = riverpgxv5.New(riverinternaltest.TestDB(ctx, t)) + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) exec = driver.GetExecutor() pilot = &riverpilot.StandardPilot{} subscribeCh = make(chan []CompleterJobUpdated, 10) - completer = NewBatchCompleter(riversharedtest.BaseServiceArchetype(t), exec, pilot, subscribeCh) + completer = NewBatchCompleter(riversharedtest.BaseServiceArchetype(t), schema, exec, pilot, subscribeCh) ) - require.NoError(t, completer.Start(ctx)) - t.Cleanup(completer.Stop) - - riversharedtest.WaitOrTimeout(t, completer.Started()) - return completer, &testBundle{ exec: exec, + schema: schema, subscribeCh: subscribeCh, } } + startCompleter := func(ctx context.Context, t *testing.T, completer *BatchCompleter) { + t.Helper() + + require.NoError(t, completer.Start(ctx)) + t.Cleanup(completer.Stop) + + riversharedtest.WaitOrTimeout(t, completer.Started()) + } + t.Run("CompletionsCompletedInSubBatches", func(t *testing.T) { t.Parallel() completer, bundle := setup(t) completer.completionMaxSize = 10 // set to something artificially low + startCompleter(ctx, t, completer) jobUpdateChan := make(chan CompleterJobUpdated, 100) go func() { @@ -420,7 +438,7 @@ func TestBatchCompleter(t *testing.T) { } }() - stopInsertion := doContinuousInsertion(ctx, t, completer, bundle.exec) + stopInsertion := doContinuousInsertion(ctx, t, completer, bundle.exec, bundle.schema) // Wait for some jobs to come through, giving lots of opportunity for // the completer to have pooled some completions and being forced to @@ -439,6 +457,7 @@ func TestBatchCompleter(t *testing.T) { completer, bundle := setup(t) completer.maxBacklog = 10 // set to something artificially low + startCompleter(ctx, t, completer) jobUpdateChan := make(chan CompleterJobUpdated, 100) go func() { @@ -450,7 +469,7 @@ func TestBatchCompleter(t *testing.T) { } }() - stopInsertion := doContinuousInsertion(ctx, t, completer, bundle.exec) + stopInsertion := doContinuousInsertion(ctx, t, completer, bundle.exec, bundle.schema) // Wait for some jobs to come through. Waiting for these jobs to come // through will provide plenty of opportunity for the completer to back @@ -468,23 +487,32 @@ func TestBatchCompleter(t *testing.T) { func TestInlineCompleter(t *testing.T) { t.Parallel() - testCompleter(t, func(t *testing.T, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh chan<- []CompleterJobUpdated) *InlineCompleter { + testCompleter(t, func(t *testing.T, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeChan chan<- []CompleterJobUpdated) *InlineCompleter { t.Helper() - return NewInlineCompleter(riversharedtest.BaseServiceArchetype(t), exec, pilot, subscribeCh) + return NewInlineCompleter(riversharedtest.BaseServiceArchetype(t), schema, exec, pilot, subscribeChan) }, func(completer *InlineCompleter) { completer.disableSleep = true }, + 100, func(completer *InlineCompleter, exec riverdriver.Executor) { completer.exec = exec }) } func testCompleter[TCompleter JobCompleter]( t *testing.T, - newCompleter func(t *testing.T, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh chan<- []CompleterJobUpdated) TCompleter, + newCompleter func(t *testing.T, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh chan<- []CompleterJobUpdated) TCompleter, // These functions are here to help us inject test behavior that's not part // of the JobCompleter interface. We could alternatively define a second // interface like jobCompleterWithTestFacilities to expose the additional // functionality, although that's not particularly beautiful either. disableSleep func(completer TCompleter), + + // Number of jobs to insert for the CompleteManyJobs subtest. The + // BatchCompleter should be tested against a very large number, but this + // really just isn't necessary for simpler completers that might also take a + // long time to complete a huge batch. At 4,400 jobs, we were seeing the + // InlineCompleter take 10s in CI to run this one test. + numManyJobs int, + setExec func(completer TCompleter, exec riverdriver.Executor), ) { t.Helper() @@ -493,6 +521,7 @@ func testCompleter[TCompleter JobCompleter]( type testBundle struct { exec riverdriver.Executor + schema string subscribeCh <-chan []CompleterJobUpdated } @@ -500,11 +529,13 @@ func testCompleter[TCompleter JobCompleter]( t.Helper() var ( - driver = riverpgxv5.New(riverinternaltest.TestDB(ctx, t)) + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) exec = driver.GetExecutor() pilot = &riverpilot.StandardPilot{} subscribeCh = make(chan []CompleterJobUpdated, 10) - completer = newCompleter(t, exec, pilot, subscribeCh) + completer = newCompleter(t, schema, exec, pilot, subscribeCh) ) require.NoError(t, completer.Start(ctx)) @@ -512,22 +543,23 @@ func testCompleter[TCompleter JobCompleter]( return completer, &testBundle{ exec: exec, + schema: schema, subscribeCh: subscribeCh, } } - requireJob := func(t *testing.T, exec riverdriver.Executor, jobID int64) *rivertype.JobRow { + requireJob := func(t *testing.T, bundle *testBundle, jobID int64) *rivertype.JobRow { t.Helper() - job, err := exec.JobGetByID(ctx, &riverdriver.JobGetByIDParams{ID: jobID, Schema: ""}) + job, err := bundle.exec.JobGetByID(ctx, &riverdriver.JobGetByIDParams{ID: jobID, Schema: bundle.schema}) require.NoError(t, err) return job } - requireState := func(t *testing.T, exec riverdriver.Executor, jobID int64, state rivertype.JobState) *rivertype.JobRow { + requireState := func(t *testing.T, bundle *testBundle, jobID int64, state rivertype.JobState) *rivertype.JobRow { t.Helper() - job := requireJob(t, exec, jobID) + job := requireJob(t, bundle, jobID) require.Equal(t, state, job.State) return job } @@ -542,9 +574,9 @@ func testCompleter[TCompleter JobCompleter]( finalizedAt2 = time.Now().UTC().Add(-2 * time.Minute) finalizedAt3 = time.Now().UTC().Add(-3 * time.Minute) - job1 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) - job2 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) - job3 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job1 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job2 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job3 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) ) require.NoError(t, completer.JobSetStateIfRunning(ctx, &jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(job1.ID, finalizedAt1, nil))) @@ -553,9 +585,9 @@ func testCompleter[TCompleter JobCompleter]( completer.Stop() - job1Updated := requireState(t, bundle.exec, job1.ID, rivertype.JobStateCompleted) - job2Updated := requireState(t, bundle.exec, job2.ID, rivertype.JobStateCompleted) - job3Updated := requireState(t, bundle.exec, job3.ID, rivertype.JobStateCompleted) + job1Updated := requireState(t, bundle, job1.ID, rivertype.JobStateCompleted) + job2Updated := requireState(t, bundle, job2.ID, rivertype.JobStateCompleted) + job3Updated := requireState(t, bundle, job3.ID, rivertype.JobStateCompleted) require.WithinDuration(t, finalizedAt1, *job1Updated.FinalizedAt, time.Microsecond) require.WithinDuration(t, finalizedAt2, *job2Updated.FinalizedAt, time.Microsecond) @@ -570,16 +602,13 @@ func testCompleter[TCompleter JobCompleter]( completer, bundle := setup(t) - const ( - kind = "many_jobs_kind" - numJobs = 4_400 - ) + const kind = "many_jobs_kind" var ( - insertParams = make([]*riverdriver.JobInsertFastParams, numJobs) - stats = make([]jobstats.JobStatistics, numJobs) + insertParams = make([]*riverdriver.JobInsertFastParams, numManyJobs) + stats = make([]jobstats.JobStatistics, numManyJobs) ) - for i := range numJobs { + for i := range numManyJobs { insertParams[i] = &riverdriver.JobInsertFastParams{ EncodedArgs: []byte(`{}`), Kind: kind, @@ -592,13 +621,13 @@ func testCompleter[TCompleter JobCompleter]( _, err := bundle.exec.JobInsertFastMany(ctx, &riverdriver.JobInsertFastManyParams{ Jobs: insertParams, - Schema: "", + Schema: bundle.schema, }) require.NoError(t, err) jobs, err := bundle.exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{kind}, - Schema: "", + Schema: bundle.schema, }) require.NoError(t, err) @@ -612,7 +641,7 @@ func testCompleter[TCompleter JobCompleter]( updatedJobs, err := bundle.exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{kind}, - Schema: "", + Schema: bundle.schema, }) require.NoError(t, err) for i := range updatedJobs { @@ -620,22 +649,15 @@ func testCompleter[TCompleter JobCompleter]( } }) - // The minimum time to wait go guarantee a batch of completions from the - // batch completer. Unless jobs are above a threshold it'll wait a number of - // ticks before starting completions. 5 ticks @ 50 milliseconds. - const minBatchCompleterPassDuration = 5 * 50 * time.Millisecond - t.Run("FastContinuousCompletion", func(t *testing.T) { t.Parallel() completer, bundle := setup(t) t.Cleanup(riverinternaltest.DiscardContinuously(bundle.subscribeCh)) - stopInsertion := doContinuousInsertion(ctx, t, completer, bundle.exec) + stopInsertion := doContinuousInsertion(ctx, t, completer, bundle.exec, bundle.schema) - // Give some time for some jobs to be inserted, and a guaranteed pass by - // the batch completer. - time.Sleep(minBatchCompleterPassDuration) + riversharedtest.WaitOrTimeout(t, bundle.subscribeCh) // Signal to stop insertion and wait for the goroutine to return. numInserted := stopInsertion() @@ -643,7 +665,8 @@ func testCompleter[TCompleter JobCompleter]( require.Positive(t, numInserted) numCompleted, err := bundle.exec.JobCountByState(ctx, &riverdriver.JobCountByStateParams{ - State: rivertype.JobStateCompleted, + Schema: bundle.schema, + State: rivertype.JobStateCompleted, }) require.NoError(t, err) t.Logf("Counted %d jobs as completed", numCompleted) @@ -658,11 +681,9 @@ func testCompleter[TCompleter JobCompleter]( // Number here is chosen to be a little higher than the batch // completer's tick interval so we can make sure that the right thing // happens even on an empty tick. - stopInsertion := doContinuousInsertionInterval(ctx, t, completer, bundle.exec, 30*time.Millisecond) + stopInsertion := doContinuousInsertionInterval(ctx, t, completer, bundle.exec, bundle.schema, 30*time.Millisecond) - // Give some time for some jobs to be inserted, and a guaranteed pass by - // the batch completer. - time.Sleep(minBatchCompleterPassDuration) + riversharedtest.WaitOrTimeout(t, bundle.subscribeCh) // Signal to stop insertion and wait for the goroutine to return. numInserted := stopInsertion() @@ -670,7 +691,8 @@ func testCompleter[TCompleter JobCompleter]( require.Positive(t, numInserted) numCompleted, err := bundle.exec.JobCountByState(ctx, &riverdriver.JobCountByStateParams{ - State: rivertype.JobStateCompleted, + Schema: bundle.schema, + State: rivertype.JobStateCompleted, }) require.NoError(t, err) t.Logf("Counted %d jobs as completed", numCompleted) @@ -683,13 +705,13 @@ func testCompleter[TCompleter JobCompleter]( completer, bundle := setup(t) var ( - job1 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) - job2 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) - job3 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) - job4 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) - job5 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) - job6 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) - job7 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job1 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job2 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job3 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job4 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job5 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job6 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job7 = testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) ) require.NoError(t, completer.JobSetStateIfRunning(ctx, &jobstats.JobStatistics{}, riverdriver.JobSetStateCancelled(job1.ID, time.Now(), []byte("{}"), nil))) @@ -702,13 +724,13 @@ func testCompleter[TCompleter JobCompleter]( completer.Stop() - requireState(t, bundle.exec, job1.ID, rivertype.JobStateCancelled) - requireState(t, bundle.exec, job2.ID, rivertype.JobStateCompleted) - requireState(t, bundle.exec, job3.ID, rivertype.JobStateDiscarded) - requireState(t, bundle.exec, job4.ID, rivertype.JobStateAvailable) - requireState(t, bundle.exec, job5.ID, rivertype.JobStateRetryable) - requireState(t, bundle.exec, job6.ID, rivertype.JobStateScheduled) - requireState(t, bundle.exec, job7.ID, rivertype.JobStateAvailable) + requireState(t, bundle, job1.ID, rivertype.JobStateCancelled) + requireState(t, bundle, job2.ID, rivertype.JobStateCompleted) + requireState(t, bundle, job3.ID, rivertype.JobStateDiscarded) + requireState(t, bundle, job4.ID, rivertype.JobStateAvailable) + requireState(t, bundle, job5.ID, rivertype.JobStateRetryable) + requireState(t, bundle, job6.ID, rivertype.JobStateScheduled) + requireState(t, bundle, job7.ID, rivertype.JobStateAvailable) }) t.Run("Subscription", func(t *testing.T) { @@ -716,7 +738,7 @@ func testCompleter[TCompleter JobCompleter]( completer, bundle := setup(t) - job := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) require.NoError(t, completer.JobSetStateIfRunning(ctx, &jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(job.ID, time.Now(), nil))) @@ -733,13 +755,13 @@ func testCompleter[TCompleter JobCompleter]( completer, bundle := setup(t) { - job := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) require.NoError(t, completer.JobSetStateIfRunning(ctx, &jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(job.ID, time.Now(), nil))) completer.Stop() - requireState(t, bundle.exec, job.ID, rivertype.JobStateCompleted) + requireState(t, bundle, job.ID, rivertype.JobStateCompleted) } // Completer closes the subscribe channel on stop, so we need to reset it between runs. @@ -748,13 +770,13 @@ func testCompleter[TCompleter JobCompleter]( { require.NoError(t, completer.Start(ctx)) - job := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) require.NoError(t, completer.JobSetStateIfRunning(ctx, &jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(job.ID, time.Now(), nil))) completer.Stop() - requireState(t, bundle.exec, job.ID, rivertype.JobStateCompleted) + requireState(t, bundle, job.ID, rivertype.JobStateCompleted) } }) @@ -788,7 +810,7 @@ func testCompleter[TCompleter JobCompleter]( } setExec(completer, execMock) - job := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) require.NoError(t, completer.JobSetStateIfRunning(ctx, &jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(job.ID, time.Now(), nil))) @@ -798,7 +820,7 @@ func testCompleter[TCompleter JobCompleter]( require.True(t, execMock.JobSetStateIfRunningManyCalled) // Job still managed to complete despite the errors. - requireState(t, bundle.exec, job.ID, rivertype.JobStateCompleted) + requireState(t, bundle, job.ID, rivertype.JobStateCompleted) }) t.Run("CompletionImmediateFailureOnContextCanceled", func(t *testing.T) { //nolint:dupl @@ -816,7 +838,7 @@ func testCompleter[TCompleter JobCompleter]( } setExec(completer, execMock) - job := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) err := completer.JobSetStateIfRunning(ctx, &jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(job.ID, time.Now(), nil)) @@ -831,7 +853,7 @@ func testCompleter[TCompleter JobCompleter]( // Job is still running because the completer is forced to give up // immediately on certain types of errors like where a pool is closed. - requireState(t, bundle.exec, job.ID, rivertype.JobStateRunning) + requireState(t, bundle, job.ID, rivertype.JobStateRunning) }) t.Run("CompletionImmediateFailureOnErrClosedPool", func(t *testing.T) { //nolint:dupl @@ -849,7 +871,7 @@ func testCompleter[TCompleter JobCompleter]( } setExec(completer, execMock) - job := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job := testfactory.Job(ctx, t, bundle.exec, &testfactory.JobOpts{Schema: bundle.schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) err := completer.JobSetStateIfRunning(ctx, &jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(job.ID, time.Now(), nil)) @@ -864,7 +886,7 @@ func testCompleter[TCompleter JobCompleter]( // Job is still running because the completer is forced to give up // immediately on certain types of errors like where a pool is closed. - requireState(t, bundle.exec, job.ID, rivertype.JobStateRunning) + requireState(t, bundle, job.ID, rivertype.JobStateRunning) }) // The batch completer supports an interface that lets caller wait for it to @@ -882,57 +904,60 @@ func testCompleter[TCompleter JobCompleter]( } func BenchmarkAsyncCompleter_Concurrency10(b *testing.B) { - benchmarkCompleter(b, func(b *testing.B, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh chan<- []CompleterJobUpdated) JobCompleter { + benchmarkCompleter(b, func(b *testing.B, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeChan chan<- []CompleterJobUpdated) JobCompleter { b.Helper() - return newAsyncCompleterWithConcurrency(riversharedtest.BaseServiceArchetype(b), exec, pilot, 10, subscribeCh) + return newAsyncCompleterWithConcurrency(riversharedtest.BaseServiceArchetype(b), schema, exec, pilot, 10, subscribeChan) }) } func BenchmarkAsyncCompleter_Concurrency100(b *testing.B) { - benchmarkCompleter(b, func(b *testing.B, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh chan<- []CompleterJobUpdated) JobCompleter { + benchmarkCompleter(b, func(b *testing.B, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeChan chan<- []CompleterJobUpdated) JobCompleter { b.Helper() - return newAsyncCompleterWithConcurrency(riversharedtest.BaseServiceArchetype(b), exec, pilot, 100, subscribeCh) + return newAsyncCompleterWithConcurrency(riversharedtest.BaseServiceArchetype(b), schema, exec, pilot, 100, subscribeChan) }) } func BenchmarkBatchCompleter(b *testing.B) { - benchmarkCompleter(b, func(b *testing.B, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh chan<- []CompleterJobUpdated) JobCompleter { + benchmarkCompleter(b, func(b *testing.B, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeChan chan<- []CompleterJobUpdated) JobCompleter { b.Helper() - return NewBatchCompleter(riversharedtest.BaseServiceArchetype(b), exec, pilot, subscribeCh) + return NewBatchCompleter(riversharedtest.BaseServiceArchetype(b), schema, exec, pilot, subscribeChan) }) } func BenchmarkInlineCompleter(b *testing.B) { - benchmarkCompleter(b, func(b *testing.B, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh chan<- []CompleterJobUpdated) JobCompleter { + benchmarkCompleter(b, func(b *testing.B, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeChan chan<- []CompleterJobUpdated) JobCompleter { b.Helper() - return NewInlineCompleter(riversharedtest.BaseServiceArchetype(b), exec, pilot, subscribeCh) + return NewInlineCompleter(riversharedtest.BaseServiceArchetype(b), schema, exec, pilot, subscribeChan) }) } func benchmarkCompleter( b *testing.B, - newCompleter func(b *testing.B, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeCh chan<- []CompleterJobUpdated) JobCompleter, + newCompleter func(b *testing.B, schema string, exec riverdriver.Executor, pilot riverpilot.Pilot, subscribeChan chan<- []CompleterJobUpdated) JobCompleter, ) { b.Helper() ctx := context.Background() type testBundle struct { - exec riverdriver.Executor - jobs []*rivertype.JobRow - pilot riverpilot.Pilot - stats []jobstats.JobStatistics + exec riverdriver.Executor + jobs []*rivertype.JobRow + pilot riverpilot.Pilot + schema string + stats []jobstats.JobStatistics } setup := func(b *testing.B) (JobCompleter, *testBundle) { b.Helper() var ( - driver = riverpgxv5.New(riverinternaltest.TestDB(ctx, b)) + dbPool = riversharedtest.DBPool(ctx, b) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, b, driver, nil) exec = driver.GetExecutor() pilot = &riverpilot.StandardPilot{} subscribeCh = make(chan []CompleterJobUpdated, 100) - completer = newCompleter(b, exec, pilot, subscribeCh) + completer = newCompleter(b, schema, exec, pilot, subscribeCh) ) b.Cleanup(riverinternaltest.DiscardContinuously(subscribeCh)) @@ -958,21 +983,22 @@ func benchmarkCompleter( _, err := exec.JobInsertFastMany(ctx, &riverdriver.JobInsertFastManyParams{ Jobs: insertParams, - Schema: "", + Schema: schema, }) require.NoError(b, err) jobs, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{"benchmark_kind"}, - Schema: "", + Schema: schema, }) require.NoError(b, err) return completer, &testBundle{ - exec: exec, - jobs: jobs, - pilot: pilot, - stats: make([]jobstats.JobStatistics, b.N), + exec: exec, + jobs: jobs, + pilot: pilot, + schema: schema, + stats: make([]jobstats.JobStatistics, b.N), } } @@ -1036,13 +1062,13 @@ func benchmarkCompleter( // Performs continuous job insertion from a background goroutine. Returns a // function that should be invoked to stop insertion, which will block until // insertion stops, then return the total number of jobs that were inserted. -func doContinuousInsertion(ctx context.Context, t *testing.T, completer JobCompleter, exec riverdriver.Executor) func() int { +func doContinuousInsertion(ctx context.Context, t *testing.T, completer JobCompleter, exec riverdriver.Executor, schema string) func() int { t.Helper() - return doContinuousInsertionInterval(ctx, t, completer, exec, 1*time.Millisecond) + return doContinuousInsertionInterval(ctx, t, completer, exec, schema, 1*time.Millisecond) } -func doContinuousInsertionInterval(ctx context.Context, t *testing.T, completer JobCompleter, exec riverdriver.Executor, insertInterval time.Duration) func() int { +func doContinuousInsertionInterval(ctx context.Context, t *testing.T, completer JobCompleter, exec riverdriver.Executor, schema string, insertInterval time.Duration) func() int { t.Helper() var ( @@ -1061,7 +1087,7 @@ func doContinuousInsertionInterval(ctx context.Context, t *testing.T, completer }() for { - job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{State: ptrutil.Ptr(rivertype.JobStateRunning)}) + job := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{Schema: schema, State: ptrutil.Ptr(rivertype.JobStateRunning)}) require.NoError(t, completer.JobSetStateIfRunning(ctx, &jobstats.JobStatistics{}, riverdriver.JobSetStateCompleted(job.ID, time.Now(), nil))) numInserted.Add(1) diff --git a/internal/jobexecutor/job_executor.go b/internal/jobexecutor/job_executor.go index 529cd18c..84e8ca50 100644 --- a/internal/jobexecutor/job_executor.go +++ b/internal/jobexecutor/job_executor.go @@ -116,7 +116,6 @@ type JobExecutor struct { JobRow *rivertype.JobRow MiddlewareLookupGlobal middlewarelookup.MiddlewareLookupInterface SchedulerInterval time.Duration - Schema string WorkerMiddleware []rivertype.WorkerMiddleware WorkUnit workunit.WorkUnit @@ -359,7 +358,7 @@ func (e *JobExecutor) reportError(ctx context.Context, res *jobExecutorResult, m return } - now := time.Now() + now := e.Time.NowUTC() if cancelJob { if err := e.Completer.JobSetStateIfRunning(ctx, e.stats, riverdriver.JobSetStateCancelled(e.JobRow.ID, now, errData, metadataUpdates)); err != nil { diff --git a/internal/jobexecutor/job_executor_test.go b/internal/jobexecutor/job_executor_test.go index 483d5f84..7049e3b8 100644 --- a/internal/jobexecutor/job_executor_test.go +++ b/internal/jobexecutor/job_executor_test.go @@ -16,6 +16,7 @@ import ( "github.com/riverqueue/river/internal/riverinternaltest" "github.com/riverqueue/river/internal/riverinternaltest/retrypolicytest" "github.com/riverqueue/river/internal/workunit" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/baseservice" @@ -126,11 +127,11 @@ func TestJobExecutor_Execute(t *testing.T) { t.Helper() var ( - tx = riverinternaltest.TestTx(ctx, t) + tx = riverdbtest.TestTxPgx(ctx, t) archetype = riversharedtest.BaseServiceArchetype(t) exec = riverpgxv5.New(nil).UnwrapExecutor(tx) updateCh = make(chan []jobcompleter.CompleterJobUpdated, 10) - completer = jobcompleter.NewInlineCompleter(archetype, exec, &riverpilot.StandardPilot{}, updateCh) + completer = jobcompleter.NewInlineCompleter(archetype, "", exec, &riverpilot.StandardPilot{}, updateCh) ) t.Cleanup(completer.Stop) diff --git a/internal/leadership/elector.go b/internal/leadership/elector.go index 4b307386..8be55946 100644 --- a/internal/leadership/elector.go +++ b/internal/leadership/elector.go @@ -113,6 +113,7 @@ func NewElector(archetype *baseservice.Archetype, exec riverdriver.Executor, not ClientID: config.ClientID, ElectInterval: valutil.ValOrDefault(config.ElectInterval, electIntervalDefault), ElectIntervalJitter: valutil.ValOrDefault(config.ElectIntervalJitter, electIntervalJitterDefault), + Schema: config.Schema, }).mustValidate(), exec: exec, notifier: notifier, @@ -196,6 +197,7 @@ func (e *Elector) attemptGainLeadershipLoop(ctx context.Context) error { elected, err := attemptElectOrReelect(ctx, e.exec, false, &riverdriver.LeaderElectParams{ LeaderID: e.config.ClientID, + Schema: e.config.Schema, TTL: e.leaderTTL(), }) if err != nil { @@ -405,6 +407,7 @@ func (e *Elector) attemptResign(ctx context.Context, attempt int) error { resigned, err := e.exec.LeaderResign(ctx, &riverdriver.LeaderResignParams{ LeaderID: e.config.ClientID, LeadershipTopic: string(notifier.NotificationTopicLeadership), + Schema: e.config.Schema, }) if err != nil { return err diff --git a/internal/leadership/elector_test.go b/internal/leadership/elector_test.go index 85f6d922..d61ba7b2 100644 --- a/internal/leadership/elector_test.go +++ b/internal/leadership/elector_test.go @@ -8,11 +8,12 @@ import ( "time" "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" "github.com/riverqueue/river/internal/notifier" - "github.com/riverqueue/river/internal/riverinternaltest" "github.com/riverqueue/river/internal/riverinternaltest/sharedtx" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/baseservice" @@ -36,10 +37,10 @@ func TestElector_PollOnly(t *testing.T) { } testElector(ctx, t, - func(t *testing.T) *electorBundle { + func(ctx context.Context, t *testing.T, stress bool) *electorBundle { t.Helper() - tx := riverinternaltest.TestTx(ctx, t) + tx := riverdbtest.TestTxPgx(ctx, t) // We'll put multiple electors on one transaction. Make sure they can // live with each other in relative harmony. @@ -56,7 +57,9 @@ func TestElector_PollOnly(t *testing.T) { riversharedtest.BaseServiceArchetype(t), driver.UnwrapExecutor(electorBundle.tx), nil, - &Config{ClientID: "test_client_id"}, + &Config{ + ClientID: "test_client_id", + }, ) }) } @@ -70,17 +73,24 @@ func TestElector_WithNotifier(t *testing.T) { archetype *baseservice.Archetype exec riverdriver.Executor notifier *notifier.Notifier + schema string } testElector(ctx, t, - func(t *testing.T) *electorBundle { + func(ctx context.Context, t *testing.T, stress bool) *electorBundle { t.Helper() + var dbPool *pgxpool.Pool + if stress { + dbPool = riversharedtest.DBPoolClone(ctx, t) + } else { + dbPool = riversharedtest.DBPool(ctx, t) + } + var ( - archetype = riversharedtest.BaseServiceArchetype(t) - dbPool = riverinternaltest.TestDB(ctx, t) driver = riverpgxv5.New(dbPool) - schema = "" // try to make tests schema-based rather than database-based in the future + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + archetype = riversharedtest.BaseServiceArchetype(t) ) notifier := notifier.New(archetype, driver.GetListener(schema)) @@ -93,6 +103,7 @@ func TestElector_WithNotifier(t *testing.T) { archetype: archetype, exec: driver.GetExecutor(), notifier: notifier, + schema: schema, } }, func(t *testing.T, electorBundle *electorBundle) *Elector { @@ -102,7 +113,10 @@ func TestElector_WithNotifier(t *testing.T) { electorBundle.archetype, electorBundle.exec, electorBundle.notifier, - &Config{ClientID: "test_client_id"}, + &Config{ + ClientID: "test_client_id", + Schema: electorBundle.schema, + }, ) }) } @@ -113,7 +127,7 @@ func TestElector_WithNotifier(t *testing.T) { func testElector[TElectorBundle any]( ctx context.Context, t *testing.T, - makeElectorBundle func(t *testing.T) TElectorBundle, + makeElectorBundle func(ctx context.Context, t *testing.T, stress bool) TElectorBundle, makeElector func(t *testing.T, bundle TElectorBundle) *Elector, ) { t.Helper() @@ -123,10 +137,18 @@ func testElector[TElectorBundle any]( exec riverdriver.Executor } - setup := func(t *testing.T) (*Elector, *testBundle) { + type testOpts struct { + stress bool + } + + setup := func(t *testing.T, opts *testOpts) (*Elector, *testBundle) { t.Helper() - electorBundle := makeElectorBundle(t) + if opts == nil { + opts = &testOpts{} + } + + electorBundle := makeElectorBundle(ctx, t, opts.stress) elector := makeElector(t, electorBundle) elector.testSignals.Init() @@ -147,7 +169,7 @@ func testElector[TElectorBundle any]( t.Run("StartsGainsLeadershipAndStops", func(t *testing.T) { t.Parallel() - elector, bundle := setup(t) + elector, bundle := setup(t, nil) startElector(ctx, t, elector) @@ -172,7 +194,7 @@ func testElector[TElectorBundle any]( t.Run("NotifiesSubscribers", func(t *testing.T) { t.Parallel() - elector, _ := setup(t) + elector, _ := setup(t, nil) sub := elector.Listen() t.Cleanup(func() { elector.unlisten(sub) }) @@ -199,7 +221,7 @@ func testElector[TElectorBundle any]( t.Run("SustainsLeadership", func(t *testing.T) { t.Parallel() - elector, _ := setup(t) + elector, _ := setup(t, nil) startElector(ctx, t, elector) @@ -225,7 +247,7 @@ func testElector[TElectorBundle any]( t.Run("LosesLeadership", func(t *testing.T) { t.Parallel() - elector, bundle := setup(t) + elector, bundle := setup(t, nil) startElector(ctx, t, elector) @@ -238,11 +260,13 @@ func testElector[TElectorBundle any]( _, err := bundle.exec.LeaderResign(ctx, &riverdriver.LeaderResignParams{ LeaderID: elector.config.ClientID, LeadershipTopic: string(notifier.NotificationTopicLeadership), + Schema: elector.config.Schema, }) require.NoError(t, err) _ = testfactory.Leader(ctx, t, bundle.exec, &testfactory.LeaderOpts{ LeaderID: ptrutil.Ptr("other-client-id"), + Schema: elector.config.Schema, }) elector.leadershipNotificationChan <- struct{}{} @@ -258,7 +282,7 @@ func testElector[TElectorBundle any]( t.Run("CompetingElectors", func(t *testing.T) { t.Parallel() - elector1, bundle := setup(t) + elector1, bundle := setup(t, nil) elector1.config.ClientID = "elector1" { @@ -313,7 +337,7 @@ func testElector[TElectorBundle any]( t.Run("StartStopStress", func(t *testing.T) { t.Parallel() - elector, _ := setup(t) + elector, _ := setup(t, &testOpts{stress: true}) elector.Logger = riversharedtest.LoggerWarn(t) // loop started/stop log is very noisy; suppress elector.testSignals = electorTestSignals{} // deinit so channels don't fill @@ -343,7 +367,7 @@ func TestAttemptElectOrReelect(t *testing.T) { driver := riverpgxv5.New(nil) return &testBundle{ - exec: driver.UnwrapExecutor(riverinternaltest.TestTx(ctx, t)), + exec: driver.UnwrapExecutor(riverdbtest.TestTxPgx(ctx, t)), logger: riversharedtest.Logger(t), } } @@ -365,8 +389,8 @@ func TestAttemptElectOrReelect(t *testing.T) { Schema: "", }) require.NoError(t, err) - require.WithinDuration(t, time.Now(), leader.ElectedAt, 100*time.Millisecond) - require.WithinDuration(t, time.Now().Add(leaderTTL), leader.ExpiresAt, 100*time.Millisecond) + require.WithinDuration(t, time.Now(), leader.ElectedAt, 1*time.Second) + require.WithinDuration(t, time.Now().Add(leaderTTL), leader.ExpiresAt, 1*time.Second) }) t.Run("ReelectsSameLeader", func(t *testing.T) { @@ -376,6 +400,7 @@ func TestAttemptElectOrReelect(t *testing.T) { leader := testfactory.Leader(ctx, t, bundle.exec, &testfactory.LeaderOpts{ LeaderID: ptrutil.Ptr(clientID), + Schema: "", }) // Re-elect the same leader. Use a larger TTL to see if time is updated, @@ -405,6 +430,7 @@ func TestAttemptElectOrReelect(t *testing.T) { leader := testfactory.Leader(ctx, t, bundle.exec, &testfactory.LeaderOpts{ LeaderID: ptrutil.Ptr(clientID), + Schema: "", }) elected, err := attemptElectOrReelect(ctx, bundle.exec, true, &riverdriver.LeaderElectParams{ @@ -439,7 +465,7 @@ func TestElectorHandleLeadershipNotification(t *testing.T) { setup := func(t *testing.T) (*Elector, *testBundle) { t.Helper() - tx := riverinternaltest.TestTx(ctx, t) + tx := riverdbtest.TestTxPgx(ctx, t) elector := NewElector( riversharedtest.BaseServiceArchetype(t), diff --git a/internal/maintenance/job_cleaner.go b/internal/maintenance/job_cleaner.go index eb83f59c..d7417f03 100644 --- a/internal/maintenance/job_cleaner.go +++ b/internal/maintenance/job_cleaner.go @@ -99,6 +99,7 @@ func NewJobCleaner(archetype *baseservice.Archetype, config *JobCleanerConfig, e CompletedJobRetentionPeriod: valutil.ValOrDefault(config.CompletedJobRetentionPeriod, CompletedJobRetentionPeriodDefault), DiscardedJobRetentionPeriod: valutil.ValOrDefault(config.DiscardedJobRetentionPeriod, DiscardedJobRetentionPeriodDefault), Interval: valutil.ValOrDefault(config.Interval, JobCleanerIntervalDefault), + Schema: config.Schema, Timeout: valutil.ValOrDefault(config.Timeout, JobCleanerTimeoutDefault), }).mustValidate(), diff --git a/internal/maintenance/job_cleaner_test.go b/internal/maintenance/job_cleaner_test.go index 57849bc6..77d41e30 100644 --- a/internal/maintenance/job_cleaner_test.go +++ b/internal/maintenance/job_cleaner_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riversharedtest" @@ -32,7 +32,7 @@ func TestJobCleaner(t *testing.T) { setup := func(t *testing.T) (*JobCleaner, *testBundle) { t.Helper() - tx := riverinternaltest.TestTx(ctx, t) + tx := riverdbtest.TestTxPgx(ctx, t) bundle := &testBundle{ cancelledDeleteHorizon: time.Now().Add(-CancelledJobRetentionPeriodDefault), completedDeleteHorizon: time.Now().Add(-CompletedJobRetentionPeriodDefault), diff --git a/internal/maintenance/job_rescuer.go b/internal/maintenance/job_rescuer.go index b5844431..e00c755d 100644 --- a/internal/maintenance/job_rescuer.go +++ b/internal/maintenance/job_rescuer.go @@ -94,6 +94,7 @@ func NewRescuer(archetype *baseservice.Archetype, config *JobRescuerConfig, exec ClientRetryPolicy: config.ClientRetryPolicy, Interval: valutil.ValOrDefault(config.Interval, JobRescuerIntervalDefault), RescueAfter: valutil.ValOrDefault(config.RescueAfter, JobRescuerRescueAfterDefault), + Schema: config.Schema, WorkUnitFactoryFunc: config.WorkUnitFactoryFunc, }).mustValidate(), diff --git a/internal/maintenance/job_rescuer_test.go b/internal/maintenance/job_rescuer_test.go index cb7f66d3..dce3a734 100644 --- a/internal/maintenance/job_rescuer_test.go +++ b/internal/maintenance/job_rescuer_test.go @@ -10,8 +10,8 @@ import ( "github.com/stretchr/testify/require" "github.com/riverqueue/river/internal/hooklookup" - "github.com/riverqueue/river/internal/riverinternaltest" "github.com/riverqueue/river/internal/workunit" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riversharedtest" @@ -74,7 +74,7 @@ func TestJobRescuer(t *testing.T) { setup := func(t *testing.T) (*JobRescuer, *testBundle) { t.Helper() - tx := riverinternaltest.TestTx(ctx, t) + tx := riverdbtest.TestTxPgx(ctx, t) bundle := &testBundle{ exec: riverpgxv5.New(nil).UnwrapExecutor(tx), rescueHorizon: time.Now().Add(-JobRescuerRescueAfterDefault), diff --git a/internal/maintenance/job_scheduler.go b/internal/maintenance/job_scheduler.go index be643b87..0e176833 100644 --- a/internal/maintenance/job_scheduler.go +++ b/internal/maintenance/job_scheduler.go @@ -89,6 +89,7 @@ func NewJobScheduler(archetype *baseservice.Archetype, config *JobSchedulerConfi Interval: valutil.ValOrDefault(config.Interval, JobSchedulerIntervalDefault), Limit: valutil.ValOrDefault(config.Limit, JobSchedulerLimitDefault), NotifyInsert: config.NotifyInsert, + Schema: config.Schema, }).mustValidate(), exec: exec, }) diff --git a/internal/maintenance/job_scheduler_test.go b/internal/maintenance/job_scheduler_test.go index f04eec8f..e94797fa 100644 --- a/internal/maintenance/job_scheduler_test.go +++ b/internal/maintenance/job_scheduler_test.go @@ -10,7 +10,7 @@ import ( "github.com/tidwall/gjson" "github.com/riverqueue/river/internal/dbunique" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riversharedtest" @@ -30,13 +30,18 @@ func TestJobScheduler(t *testing.T) { notificationsByQueue map[string]int } - setup := func(t *testing.T, exec riverdriver.Executor) (*JobScheduler, *testBundle) { + type testOpts struct { + exec riverdriver.Executor + schema string // for use when using a non-TestTx + } + + setup := func(t *testing.T, opts *testOpts) (*JobScheduler, *testBundle) { t.Helper() archetype := riversharedtest.BaseServiceArchetype(t) bundle := &testBundle{ - exec: exec, + exec: opts.exec, notificationsByQueue: make(map[string]int), } @@ -51,6 +56,7 @@ func TestJobScheduler(t *testing.T) { } return nil }, + Schema: opts.schema, }, bundle.exec) scheduler.TestSignals.Init() @@ -61,8 +67,8 @@ func TestJobScheduler(t *testing.T) { setupTx := func(t *testing.T) (*JobScheduler, *testBundle) { t.Helper() - tx := riverinternaltest.TestTx(ctx, t) - return setup(t, riverpgxv5.New(nil).UnwrapExecutor(tx)) + tx := riverdbtest.TestTxPgx(ctx, t) + return setup(t, &testOpts{exec: riverpgxv5.New(nil).UnwrapExecutor(tx)}) } requireJobStateUnchanged := func(t *testing.T, scheduler *JobScheduler, exec riverdriver.Executor, job *rivertype.JobRow) *rivertype.JobRow { @@ -304,12 +310,15 @@ func TestJobScheduler(t *testing.T) { t.Run("TriggersNotificationsOnEachQueueWithNewlyAvailableJobs", func(t *testing.T) { t.Parallel() - dbPool := riverinternaltest.TestDB(ctx, t) - driver := riverpgxv5.New(dbPool) - exec := driver.GetExecutor() + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + exec = driver.GetExecutor() + ) notifyCh := make(chan []string, 10) - scheduler, _ := setup(t, exec) + scheduler, _ := setup(t, &testOpts{exec: exec, schema: schema}) scheduler.config.Interval = time.Minute // should only trigger once for the initial run scheduler.config.NotifyInsert = func(ctx context.Context, tx riverdriver.ExecutorTx, queues []string) error { notifyCh <- queues @@ -327,6 +336,7 @@ func TestJobScheduler(t *testing.T) { testfactory.Job(ctx, t, exec, &testfactory.JobOpts{ FinalizedAt: finalizedAt, Queue: &queue, + Schema: schema, State: &state, ScheduledAt: ptrutil.Ptr(now.Add(fromNow)), }) diff --git a/internal/maintenance/periodic_job_enqueuer_test.go b/internal/maintenance/periodic_job_enqueuer_test.go index 5a76a802..04813ade 100644 --- a/internal/maintenance/periodic_job_enqueuer_test.go +++ b/internal/maintenance/periodic_job_enqueuer_test.go @@ -11,7 +11,7 @@ import ( "github.com/riverqueue/river/internal/dbunique" "github.com/riverqueue/river/internal/rivercommon" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riversharedtest" @@ -34,6 +34,7 @@ func TestPeriodicJobEnqueuer(t *testing.T) { type testBundle struct { exec riverdriver.Executor notificationsByQueue map[string]int + schema string waitChan chan (struct{}) } @@ -78,46 +79,54 @@ func TestPeriodicJobEnqueuer(t *testing.T) { // A simplified version of `Client.insertMany` that only inserts jobs directly // via the driver instead of using the pilot. - insertFunc := func(ctx context.Context, tx riverdriver.ExecutorTx, insertParams []*rivertype.JobInsertParams) ([]*rivertype.JobInsertResult, error) { - finalInsertParams := sliceutil.Map(insertParams, func(params *rivertype.JobInsertParams) *riverdriver.JobInsertFastParams { - return (*riverdriver.JobInsertFastParams)(params) - }) - results, err := tx.JobInsertFastMany(ctx, &riverdriver.JobInsertFastManyParams{ - Jobs: finalInsertParams, - Schema: "", - }) - if err != nil { - return nil, err + makeInsertFunc := func(schema string) func(ctx context.Context, tx riverdriver.ExecutorTx, insertParams []*rivertype.JobInsertParams) ([]*rivertype.JobInsertResult, error) { + return func(ctx context.Context, tx riverdriver.ExecutorTx, insertParams []*rivertype.JobInsertParams) ([]*rivertype.JobInsertResult, error) { + results, err := tx.JobInsertFastMany(ctx, &riverdriver.JobInsertFastManyParams{ + Jobs: sliceutil.Map(insertParams, func(params *rivertype.JobInsertParams) *riverdriver.JobInsertFastParams { + return (*riverdriver.JobInsertFastParams)(params) + }), + Schema: schema, + }) + if err != nil { + return nil, err + } + return sliceutil.Map(results, + func(result *riverdriver.JobInsertFastResult) *rivertype.JobInsertResult { + return (*rivertype.JobInsertResult)(result) + }, + ), nil } - return sliceutil.Map(results, - func(result *riverdriver.JobInsertFastResult) *rivertype.JobInsertResult { - return (*rivertype.JobInsertResult)(result) - }, - ), nil } setup := func(t *testing.T) (*PeriodicJobEnqueuer, *testBundle) { t.Helper() + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + bundle := &testBundle{ - exec: riverpgxv5.New(riverinternaltest.TestDB(ctx, t)).GetExecutor(), + exec: riverpgxv5.New(dbPool).GetExecutor(), notificationsByQueue: make(map[string]int), + schema: schema, waitChan: make(chan struct{}), } - svc := NewPeriodicJobEnqueuer(riversharedtest.BaseServiceArchetype(t), &PeriodicJobEnqueuerConfig{Insert: insertFunc}, bundle.exec) + svc := NewPeriodicJobEnqueuer(riversharedtest.BaseServiceArchetype(t), &PeriodicJobEnqueuerConfig{Insert: makeInsertFunc(schema)}, bundle.exec) svc.StaggerStartupDisable(true) svc.TestSignals.Init() return svc, bundle } - requireNJobs := func(t *testing.T, exec riverdriver.Executor, kind string, expectedNumJobs int) []*rivertype.JobRow { + requireNJobs := func(t *testing.T, bundle *testBundle, kind string, expectedNumJobs int) []*rivertype.JobRow { t.Helper() - jobs, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ + jobs, err := bundle.exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{kind}, - Schema: "", + Schema: bundle.schema, }) require.NoError(t, err) require.Len(t, jobs, expectedNumJobs, "Expected to find exactly %d job(s) of kind: %s, but found %d", expectedNumJobs, kind, len(jobs)) @@ -166,17 +175,17 @@ func TestPeriodicJobEnqueuer(t *testing.T) { startService(t, svc) // Should be no jobs to start. - requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) + requireNJobs(t, bundle, "periodic_job_500ms", 0) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "periodic_job_500ms", 1) + requireNJobs(t, bundle, "periodic_job_500ms", 1) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "periodic_job_500ms", 2) + requireNJobs(t, bundle, "periodic_job_500ms", 2) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "periodic_job_500ms", 3) - requireNJobs(t, bundle.exec, "periodic_job_1500ms", 1) + requireNJobs(t, bundle, "periodic_job_500ms", 3) + requireNJobs(t, bundle, "periodic_job_1500ms", 1) }) t.Run("SetsPeriodicMetadataAttribute", func(t *testing.T) { @@ -207,7 +216,7 @@ func TestPeriodicJobEnqueuer(t *testing.T) { svc.TestSignals.InsertedJobs.WaitOrTimeout() assertMetadata := func(name string, expected string) { - job := requireNJobs(t, bundle.exec, name, 1)[0] + job := requireNJobs(t, bundle, name, 1)[0] require.JSONEq(t, expected, string(job.Metadata)) } @@ -229,12 +238,12 @@ func TestPeriodicJobEnqueuer(t *testing.T) { startService(t, svc) svc.TestSignals.InsertedJobs.WaitOrTimeout() - job1 := requireNJobs(t, bundle.exec, "periodic_job_500ms", 1)[0] + job1 := requireNJobs(t, bundle, "periodic_job_500ms", 1)[0] require.Equal(t, rivertype.JobStateAvailable, job1.State) require.WithinDuration(t, time.Now(), job1.ScheduledAt, 1*time.Second) svc.TestSignals.InsertedJobs.WaitOrTimeout() - job2 := requireNJobs(t, bundle.exec, "periodic_job_500ms", 2)[1] // ordered by ID + job2 := requireNJobs(t, bundle, "periodic_job_500ms", 2)[1] // ordered by ID // The new `scheduled_at` is *exactly* the original `scheduled_at` plus // 500 milliseconds because the enqueuer used the target next run time @@ -256,18 +265,18 @@ func TestPeriodicJobEnqueuer(t *testing.T) { startService(t, svc) // Should be no jobs to start. - requireNJobs(t, bundle.exec, "unique_periodic_job_500ms", 0) + requireNJobs(t, bundle, "unique_periodic_job_500ms", 0) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "unique_periodic_job_500ms", 1) + requireNJobs(t, bundle, "unique_periodic_job_500ms", 1) // Another insert was attempted, but there's still only one job due to // uniqueness conditions. svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "unique_periodic_job_500ms", 1) + requireNJobs(t, bundle, "unique_periodic_job_500ms", 1) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "unique_periodic_job_500ms", 1) + requireNJobs(t, bundle, "unique_periodic_job_500ms", 1) }) t.Run("RunOnStart", func(t *testing.T) { @@ -284,8 +293,8 @@ func TestPeriodicJobEnqueuer(t *testing.T) { startService(t, svc) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "periodic_job_5s", 1) - requireNJobs(t, bundle.exec, "unique_periodic_job_5s", 1) + requireNJobs(t, bundle, "periodic_job_5s", 1) + requireNJobs(t, bundle, "unique_periodic_job_5s", 1) // Should've happened quite quickly. require.WithinDuration(t, time.Now(), start, 1*time.Second) @@ -403,7 +412,7 @@ func TestPeriodicJobEnqueuer(t *testing.T) { svc := NewPeriodicJobEnqueuer( riversharedtest.BaseServiceArchetype(t), &PeriodicJobEnqueuerConfig{ - Insert: insertFunc, + Insert: makeInsertFunc(bundle.schema), PeriodicJobs: []*PeriodicJob{ {ScheduleFunc: periodicIntervalSchedule(500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_500ms", false), RunOnStart: true}, {ScheduleFunc: periodicIntervalSchedule(1500 * time.Millisecond), ConstructorFunc: jobConstructorFunc("periodic_job_1500ms", false), RunOnStart: true}, @@ -415,8 +424,8 @@ func TestPeriodicJobEnqueuer(t *testing.T) { startService(t, svc) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "periodic_job_500ms", 1) - requireNJobs(t, bundle.exec, "periodic_job_1500ms", 1) + requireNJobs(t, bundle, "periodic_job_500ms", 1) + requireNJobs(t, bundle, "periodic_job_1500ms", 1) }) t.Run("AddAfterStart", func(t *testing.T) { @@ -434,8 +443,8 @@ func TestPeriodicJobEnqueuer(t *testing.T) { ) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) - requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) + requireNJobs(t, bundle, "periodic_job_500ms", 0) + requireNJobs(t, bundle, "periodic_job_500ms_start", 1) }) t.Run("AddManyAfterStart", func(t *testing.T) { @@ -451,8 +460,8 @@ func TestPeriodicJobEnqueuer(t *testing.T) { }) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) - requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) + requireNJobs(t, bundle, "periodic_job_500ms", 0) + requireNJobs(t, bundle, "periodic_job_500ms_start", 1) }) t.Run("ClearAfterStart", func(t *testing.T) { @@ -468,8 +477,8 @@ func TestPeriodicJobEnqueuer(t *testing.T) { }) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) - requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) + requireNJobs(t, bundle, "periodic_job_500ms", 0) + requireNJobs(t, bundle, "periodic_job_500ms_start", 1) svc.Clear() @@ -497,8 +506,8 @@ func TestPeriodicJobEnqueuer(t *testing.T) { }) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) - requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) + requireNJobs(t, bundle, "periodic_job_500ms", 0) + requireNJobs(t, bundle, "periodic_job_500ms_start", 1) svc.Remove(handles[1]) @@ -519,9 +528,9 @@ func TestPeriodicJobEnqueuer(t *testing.T) { }) svc.TestSignals.InsertedJobs.WaitOrTimeout() - requireNJobs(t, bundle.exec, "periodic_job_500ms", 0) - requireNJobs(t, bundle.exec, "periodic_job_500ms_other", 0) - requireNJobs(t, bundle.exec, "periodic_job_500ms_start", 1) + requireNJobs(t, bundle, "periodic_job_500ms", 0) + requireNJobs(t, bundle, "periodic_job_500ms_other", 0) + requireNJobs(t, bundle, "periodic_job_500ms_start", 1) svc.RemoveMany([]rivertype.PeriodicJobHandle{handles[1], handles[2]}) diff --git a/internal/maintenance/queue_cleaner.go b/internal/maintenance/queue_cleaner.go index 90b6e235..af97e042 100644 --- a/internal/maintenance/queue_cleaner.go +++ b/internal/maintenance/queue_cleaner.go @@ -75,6 +75,7 @@ func NewQueueCleaner(archetype *baseservice.Archetype, config *QueueCleanerConfi Config: (&QueueCleanerConfig{ Interval: valutil.ValOrDefault(config.Interval, queueCleanerIntervalDefault), RetentionPeriod: valutil.ValOrDefault(config.RetentionPeriod, QueueRetentionPeriodDefault), + Schema: config.Schema, }).mustValidate(), batchSize: BatchSizeDefault, diff --git a/internal/maintenance/queue_cleaner_test.go b/internal/maintenance/queue_cleaner_test.go index fff1bf93..ed3483e4 100644 --- a/internal/maintenance/queue_cleaner_test.go +++ b/internal/maintenance/queue_cleaner_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riversharedtest" @@ -31,7 +31,7 @@ func TestQueueCleaner(t *testing.T) { setup := func(t *testing.T) (*QueueCleaner, *testBundle) { t.Helper() - tx := riverinternaltest.TestTx(ctx, t) + tx := riverdbtest.TestTxPgx(ctx, t) bundle := &testBundle{ deleteHorizon: time.Now().Add(-QueueRetentionPeriodDefault), exec: riverpgxv5.New(nil).UnwrapExecutor(tx), diff --git a/internal/maintenance/queue_maintainer_test.go b/internal/maintenance/queue_maintainer_test.go index 28181aad..ac486334 100644 --- a/internal/maintenance/queue_maintainer_test.go +++ b/internal/maintenance/queue_maintainer_test.go @@ -8,8 +8,8 @@ import ( "github.com/robfig/cron/v3" "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/riverinternaltest" "github.com/riverqueue/river/internal/riverinternaltest/sharedtx" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/baseservice" "github.com/riverqueue/river/rivershared/riversharedtest" @@ -92,7 +92,7 @@ func TestQueueMaintainer(t *testing.T) { t.Run("StartStopStress", func(t *testing.T) { t.Parallel() - tx := riverinternaltest.TestTx(ctx, t) + tx := riverdbtest.TestTxPgx(ctx, t) sharedTx := sharedtx.NewSharedTx(tx) archetype := riversharedtest.BaseServiceArchetype(t) diff --git a/internal/maintenance/reindexer.go b/internal/maintenance/reindexer.go index 5d37f430..d736e956 100644 --- a/internal/maintenance/reindexer.go +++ b/internal/maintenance/reindexer.go @@ -85,6 +85,7 @@ func NewReindexer(archetype *baseservice.Archetype, config *ReindexerConfig, exe Config: (&ReindexerConfig{ IndexNames: indexNames, ScheduleFunc: scheduleFunc, + Schema: config.Schema, Timeout: valutil.ValOrDefault(config.Timeout, ReindexerTimeoutDefault), }).mustValidate(), diff --git a/internal/maintenance/reindexer_test.go b/internal/maintenance/reindexer_test.go index 7130b4fb..9119f0d1 100644 --- a/internal/maintenance/reindexer_test.go +++ b/internal/maintenance/reindexer_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riversharedtest" @@ -27,7 +27,12 @@ func TestReindexer(t *testing.T) { setup := func(t *testing.T) (*Reindexer, *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) + bundle := &testBundle{ exec: riverpgxv5.New(dbPool).GetExecutor(), } @@ -43,6 +48,7 @@ func TestReindexer(t *testing.T) { svc := NewReindexer(archetype, &ReindexerConfig{ ScheduleFunc: fromNow(500 * time.Millisecond), + Schema: schema, }, bundle.exec) svc.StaggerStartupDisable(true) svc.TestSignals.Init() diff --git a/internal/notifier/notifier.go b/internal/notifier/notifier.go index 97d1aa6e..4842b10e 100644 --- a/internal/notifier/notifier.go +++ b/internal/notifier/notifier.go @@ -26,6 +26,22 @@ const ( NotificationTopicLeadership NotificationTopic = "river_leadership" ) +var notificationTopicAll = []NotificationTopic{ //nolint:gochecknoglobals + NotificationTopicControl, + NotificationTopicInsert, + NotificationTopicLeadership, +} + +// NotificationTopicLongest is just the longest notification topic. This is used +// to determine the maximum length of allowed custom schema names because +// schemas are prefixed to notification topic names and Postgres enforces a +// maximum topic length of 63 characters. +var NotificationTopicLongest = func() NotificationTopic { //nolint:gochecknoglobals + return slices.MaxFunc(notificationTopicAll, func(t1, t2 NotificationTopic) int { + return len(string(t1)) - len(string(t2)) + }) +}() + type NotifyFunc func(topic NotificationTopic, payload string) type Subscription struct { diff --git a/internal/notifier/notifier_test.go b/internal/notifier/notifier_test.go index 55b86d24..15d7550c 100644 --- a/internal/notifier/notifier_test.go +++ b/internal/notifier/notifier_test.go @@ -1,6 +1,7 @@ package notifier import ( + "cmp" "context" "errors" "fmt" @@ -12,7 +13,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riversharedtest" @@ -22,6 +23,12 @@ import ( "github.com/riverqueue/river/rivershared/util/serviceutil" ) +func TestNotificationTopicLongest(t *testing.T) { + t.Parallel() + + require.Equal(t, NotificationTopicLeadership, NotificationTopicLongest) +} + func TestNotifier(t *testing.T) { t.Parallel() @@ -35,15 +42,24 @@ func TestNotifier(t *testing.T) { type testBundle struct { dbPool *pgxpool.Pool exec riverdriver.Executor + schema string } - setup := func(t *testing.T) (*Notifier, *testBundle) { + type testOpts struct { + dbPool *pgxpool.Pool // may be left nil for riversharedtest.DBPool + } + + setup := func(t *testing.T, opts *testOpts) (*Notifier, *testBundle) { t.Helper() + if opts == nil { + opts = &testOpts{} + } + var ( - dbPool = riverinternaltest.TestDB(ctx, t) + dbPool = cmp.Or(opts.dbPool, riversharedtest.DBPool(ctx, t)) driver = riverpgxv5.New(dbPool) - schema = "" // try to make tests schema-based rather than database-based in the future + schema = riverdbtest.TestSchema(ctx, t, driver, nil) listener = driver.GetListener(schema) ) @@ -53,6 +69,7 @@ func TestNotifier(t *testing.T) { return notifier, &testBundle{ dbPool: dbPool, exec: driver.GetExecutor(), + schema: schema, } } @@ -66,7 +83,7 @@ func TestNotifier(t *testing.T) { t.Run("StartsAndStops", func(t *testing.T) { t.Parallel() - notifier, _ := setup(t) + notifier, _ := setup(t, nil) start(t, notifier) notifier.testSignals.ListeningBegin.WaitOrTimeout() @@ -79,7 +96,7 @@ func TestNotifier(t *testing.T) { t.Run("StartStopStress", func(t *testing.T) { t.Parallel() - notifier, _ := setup(t) + notifier, _ := setup(t, &testOpts{dbPool: riversharedtest.DBPoolClone(ctx, t)}) notifier.Logger = riversharedtest.LoggerWarn(t) // loop started/stop log is very noisy; suppress notifier.testSignals = notifierTestSignals{} // deinit so channels don't fill @@ -89,7 +106,7 @@ func TestNotifier(t *testing.T) { t.Run("StartErrorsOnImmediateProblem", func(t *testing.T) { t.Parallel() - notifier, bundle := setup(t) + notifier, bundle := setup(t, &testOpts{dbPool: riversharedtest.DBPoolClone(ctx, t)}) t.Log("Closing database pool") bundle.dbPool.Close() @@ -100,7 +117,7 @@ func TestNotifier(t *testing.T) { t.Run("ListenErrorsOnImmediateProblem", func(t *testing.T) { t.Parallel() - notifier, _ := setup(t) + notifier, _ := setup(t, nil) // Use a mock to simulate an error for this one because it's really hard // to get the timing right otherwise, and hard to avoid races. @@ -136,7 +153,7 @@ func TestNotifier(t *testing.T) { t.Run("ListensAndUnlistens", func(t *testing.T) { t.Parallel() - notifier, bundle := setup(t) + notifier, bundle := setup(t, nil) start(t, notifier) notifyChan := make(chan TopicAndPayload, 10) @@ -144,7 +161,7 @@ func TestNotifier(t *testing.T) { sub, err := notifier.Listen(ctx, testTopic1, topicAndPayloadNotifyFunc(notifyChan)) require.NoError(t, err) - sendNotification(ctx, t, bundle.exec, testTopic1, "msg1") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg1") require.Equal(t, TopicAndPayload{testTopic1, "msg1"}, riversharedtest.WaitOrTimeout(t, notifyChan)) @@ -152,7 +169,7 @@ func TestNotifier(t *testing.T) { require.Empty(t, notifier.subscriptions) - sendNotification(ctx, t, bundle.exec, testTopic1, "msg2") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg2") time.Sleep(notificationWaitLeeway) @@ -162,7 +179,7 @@ func TestNotifier(t *testing.T) { t.Run("ListenWithoutStart", func(t *testing.T) { t.Parallel() - notifier, bundle := setup(t) + notifier, bundle := setup(t, nil) notifyChan := make(chan TopicAndPayload, 10) @@ -170,7 +187,7 @@ func TestNotifier(t *testing.T) { require.NoError(t, err) t.Cleanup(func() { sub.Unlisten(ctx) }) - sendNotification(ctx, t, bundle.exec, testTopic1, "msg1") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg1") time.Sleep(notificationWaitLeeway) @@ -186,7 +203,7 @@ func TestNotifier(t *testing.T) { t.Run("ListenWithoutStartConnectError", func(t *testing.T) { t.Parallel() - notifier, _ := setup(t) + notifier, _ := setup(t, nil) listenerMock := NewListenerMock(notifier.listener) listenerMock.connectFunc = func(ctx context.Context) error { @@ -203,7 +220,7 @@ func TestNotifier(t *testing.T) { t.Run("ListenWithoutStartListenError", func(t *testing.T) { t.Parallel() - notifier, _ := setup(t) + notifier, _ := setup(t, nil) listenerMock := NewListenerMock(notifier.listener) listenerMock.listenFunc = func(ctx context.Context, topic string) error { @@ -220,7 +237,7 @@ func TestNotifier(t *testing.T) { t.Run("ListenWithoutStartMultipleSubscriptionsError", func(t *testing.T) { t.Parallel() - notifier, _ := setup(t) + notifier, _ := setup(t, nil) listenerMock := NewListenerMock(notifier.listener) listenerMock.listenFunc = func(ctx context.Context, topic string) error { @@ -248,7 +265,7 @@ func TestNotifier(t *testing.T) { t.Run("ListenBeforeStart", func(t *testing.T) { t.Parallel() - notifier, bundle := setup(t) + notifier, bundle := setup(t, nil) notifyChan := make(chan TopicAndPayload, 10) @@ -258,7 +275,7 @@ func TestNotifier(t *testing.T) { start(t, notifier) - sendNotification(ctx, t, bundle.exec, testTopic1, "msg1") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg1") require.Equal(t, TopicAndPayload{testTopic1, "msg1"}, riversharedtest.WaitOrTimeout(t, notifyChan)) }) @@ -266,7 +283,7 @@ func TestNotifier(t *testing.T) { t.Run("SingleTopicMultipleSubscribers", func(t *testing.T) { t.Parallel() - notifier, bundle := setup(t) + notifier, bundle := setup(t, nil) start(t, notifier) notifyChan1 := make(chan TopicAndPayload, 10) @@ -277,7 +294,7 @@ func TestNotifier(t *testing.T) { sub2, err := notifier.Listen(ctx, testTopic1, topicAndPayloadNotifyFunc(notifyChan2)) require.NoError(t, err) - sendNotification(ctx, t, bundle.exec, testTopic1, "msg1") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg1") require.Equal(t, TopicAndPayload{testTopic1, "msg1"}, riversharedtest.WaitOrTimeout(t, notifyChan1)) require.Equal(t, TopicAndPayload{testTopic1, "msg1"}, riversharedtest.WaitOrTimeout(t, notifyChan2)) @@ -287,7 +304,7 @@ func TestNotifier(t *testing.T) { require.Empty(t, notifier.subscriptions) - sendNotification(ctx, t, bundle.exec, testTopic1, "msg2") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg2") time.Sleep(notificationWaitLeeway) @@ -298,7 +315,7 @@ func TestNotifier(t *testing.T) { t.Run("MultipleTopicsLockStep", func(t *testing.T) { t.Parallel() - notifier, bundle := setup(t) + notifier, bundle := setup(t, nil) start(t, notifier) notifyChan1 := make(chan TopicAndPayload, 10) @@ -309,8 +326,8 @@ func TestNotifier(t *testing.T) { sub2, err := notifier.Listen(ctx, testTopic2, topicAndPayloadNotifyFunc(notifyChan2)) require.NoError(t, err) - sendNotification(ctx, t, bundle.exec, testTopic1, "msg1_1") - sendNotification(ctx, t, bundle.exec, testTopic2, "msg1_2") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg1_1") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic2, "msg1_2") require.Equal(t, TopicAndPayload{testTopic1, "msg1_1"}, riversharedtest.WaitOrTimeout(t, notifyChan1)) require.Equal(t, TopicAndPayload{testTopic2, "msg1_2"}, riversharedtest.WaitOrTimeout(t, notifyChan2)) @@ -320,8 +337,8 @@ func TestNotifier(t *testing.T) { require.Empty(t, notifier.subscriptions) - sendNotification(ctx, t, bundle.exec, testTopic1, "msg2_1") - sendNotification(ctx, t, bundle.exec, testTopic2, "msg2_2") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg2_1") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic2, "msg2_2") time.Sleep(notificationWaitLeeway) @@ -332,7 +349,7 @@ func TestNotifier(t *testing.T) { t.Run("MultipleTopicsStaggered", func(t *testing.T) { t.Parallel() - notifier, bundle := setup(t) + notifier, bundle := setup(t, nil) start(t, notifier) notifyChan1 := make(chan TopicAndPayload, 10) @@ -341,8 +358,8 @@ func TestNotifier(t *testing.T) { sub1, err := notifier.Listen(ctx, testTopic1, topicAndPayloadNotifyFunc(notifyChan1)) require.NoError(t, err) - sendNotification(ctx, t, bundle.exec, testTopic1, "msg1_1") - sendNotification(ctx, t, bundle.exec, testTopic2, "msg1_2") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg1_1") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic2, "msg1_2") time.Sleep(notificationWaitLeeway) @@ -353,8 +370,8 @@ func TestNotifier(t *testing.T) { sub2, err := notifier.Listen(ctx, testTopic2, topicAndPayloadNotifyFunc(notifyChan2)) require.NoError(t, err) - sendNotification(ctx, t, bundle.exec, testTopic1, "msg2_1") - sendNotification(ctx, t, bundle.exec, testTopic2, "msg2_2") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg2_1") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic2, "msg2_2") // Now both subscriptions are active. require.Equal(t, TopicAndPayload{testTopic1, "msg2_1"}, riversharedtest.WaitOrTimeout(t, notifyChan1)) @@ -362,8 +379,8 @@ func TestNotifier(t *testing.T) { sub1.Unlisten(ctx) - sendNotification(ctx, t, bundle.exec, testTopic1, "msg3_1") - sendNotification(ctx, t, bundle.exec, testTopic2, "msg3_2") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg3_1") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic2, "msg3_2") time.Sleep(notificationWaitLeeway) @@ -375,8 +392,8 @@ func TestNotifier(t *testing.T) { require.Empty(t, notifier.subscriptions) - sendNotification(ctx, t, bundle.exec, testTopic1, "msg4_1") - sendNotification(ctx, t, bundle.exec, testTopic2, "msg4_2") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg4_1") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic2, "msg4_2") time.Sleep(notificationWaitLeeway) @@ -389,7 +406,7 @@ func TestNotifier(t *testing.T) { t.Run("MultipleSubscribersStress", func(t *testing.T) { t.Parallel() - notifier, bundle := setup(t) + notifier, bundle := setup(t, &testOpts{dbPool: riversharedtest.DBPoolClone(ctx, t)}) start(t, notifier) const ( @@ -414,7 +431,7 @@ func TestNotifier(t *testing.T) { defer ticker.Stop() for messageNum := 0; ; messageNum++ { - sendNotification(ctx, t, bundle.exec, testTopic1, "msg"+strconv.Itoa(messageNum)) + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg"+strconv.Itoa(messageNum)) select { case <-ctx.Done(): @@ -464,7 +481,7 @@ func TestNotifier(t *testing.T) { t.Run("WaitErrorAndBackoff", func(t *testing.T) { t.Parallel() - notifier, _ := setup(t) + notifier, _ := setup(t, nil) notifier.disableSleep = true @@ -492,7 +509,7 @@ func TestNotifier(t *testing.T) { t.Run("BackoffSleepCancelledOnStop", func(t *testing.T) { t.Parallel() - notifier, _ := setup(t) + notifier, _ := setup(t, nil) listenerMock := NewListenerMock(notifier.listener) listenerMock.waitForNotificationFunc = func(ctx context.Context) (*riverdriver.Notification, error) { @@ -510,7 +527,7 @@ func TestNotifier(t *testing.T) { t.Run("StillFunctionalAfterMainLoopFailure", func(t *testing.T) { t.Parallel() - notifier, bundle := setup(t) + notifier, bundle := setup(t, nil) // Disable the backoff sleep that would occur after the first retry. notifier.disableSleep = true @@ -548,7 +565,7 @@ func TestNotifier(t *testing.T) { // sending the notification below. notifier.testSignals.ListeningBegin.WaitOrTimeout() - sendNotification(ctx, t, bundle.exec, testTopic1, "msg1") + sendNotification(ctx, t, bundle.exec, bundle.schema, testTopic1, "msg1") // Subscription should still work. require.Equal(t, TopicAndPayload{testTopic1, "msg1"}, riversharedtest.WaitOrTimeout(t, notifyChan)) @@ -596,9 +613,13 @@ func topicAndPayloadNotifyFunc(notifyChan chan TopicAndPayload) NotifyFunc { } } -func sendNotification(ctx context.Context, t *testing.T, exec riverdriver.Executor, topic string, payload string) { +func sendNotification(ctx context.Context, t *testing.T, exec riverdriver.Executor, schema, topic string, payload string) { t.Helper() t.Logf("Sending notification on %q: %s", topic, payload) - require.NoError(t, exec.NotifyMany(ctx, &riverdriver.NotifyManyParams{Payload: []string{payload}, Schema: "", Topic: topic})) + require.NoError(t, exec.NotifyMany(ctx, &riverdriver.NotifyManyParams{ + Payload: []string{payload}, + Schema: schema, + Topic: topic, + })) } diff --git a/internal/riverinternaltest/retrypolicytest/retrypolicytest.go b/internal/riverinternaltest/retrypolicytest/retrypolicytest.go index 77caf144..bb150234 100644 --- a/internal/riverinternaltest/retrypolicytest/retrypolicytest.go +++ b/internal/riverinternaltest/retrypolicytest/retrypolicytest.go @@ -5,6 +5,7 @@ import ( "math" "time" + "github.com/riverqueue/river/rivershared/baseservice" "github.com/riverqueue/river/rivershared/util/timeutil" "github.com/riverqueue/river/rivertype" ) @@ -41,7 +42,9 @@ var maxDurationSeconds = maxDuration.Seconds() //nolint:gochecknoglobals // RetryPolicyNoJitter is identical to default retry policy except that it // leaves off the jitter to make checking against it more convenient. -type RetryPolicyNoJitter struct{} +type RetryPolicyNoJitter struct { + baseservice.BaseService +} func (p *RetryPolicyNoJitter) NextRetry(job *rivertype.JobRow) time.Time { return job.AttemptedAt.Add(timeutil.SecondsAsDuration(p.retrySecondsWithoutJitter(job.Attempt))) diff --git a/internal/riverinternaltest/riverdrivertest/riverdrivertest.go b/internal/riverinternaltest/riverdrivertest/riverdrivertest.go index 749467ff..057b6f2f 100644 --- a/internal/riverinternaltest/riverdrivertest/riverdrivertest.go +++ b/internal/riverinternaltest/riverdrivertest/riverdrivertest.go @@ -30,19 +30,24 @@ import ( // Exercise fully exercises a driver. The driver's listener is exercised if // supported. func Exercise[TTx any](ctx context.Context, t *testing.T, - driverWithPool func(ctx context.Context, t *testing.T) riverdriver.Driver[TTx], + driverWithSchema func(ctx context.Context, t *testing.T) (riverdriver.Driver[TTx], string), executorWithTx func(ctx context.Context, t *testing.T) riverdriver.Executor, ) { t.Helper() - if driverWithPool(ctx, t).SupportsListener() { - exerciseListener(ctx, t, driverWithPool) - } else { - t.Logf("Driver does not support listener; skipping listener tests") + { + driver, _ := driverWithSchema(ctx, t) + if driver.SupportsListener() { + exerciseListener(ctx, t, driverWithSchema) + } else { + t.Logf("Driver does not support listener; skipping listener tests") + } } t.Run("GetMigrationFS", func(t *testing.T) { - driver := driverWithPool(ctx, t) + t.Parallel() + + driver, _ := driverWithSchema(ctx, t) for _, line := range driver.GetMigrationLines() { migrationFS := driver.GetMigrationFS(line) @@ -53,8 +58,25 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, } }) + t.Run("GetMigrationTruncateTables", func(t *testing.T) { + t.Parallel() + + driver, _ := driverWithSchema(ctx, t) + + for _, line := range driver.GetMigrationLines() { + truncateTables := driver.GetMigrationTruncateTables(line) + + // Technically a migration line's truncate tables might be empty, + // but this never happens in any of our migration lines, so check + // non-empty until it becomes an actual problem. + require.NotEmpty(t, truncateTables) + } + }) + t.Run("GetMigrationLines", func(t *testing.T) { - driver := driverWithPool(ctx, t) + t.Parallel() + + driver, _ := driverWithSchema(ctx, t) // Should contain at minimum a main migration line. require.Contains(t, driver.GetMigrationLines(), riverdriver.MigrationLineMain) @@ -84,6 +106,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, // Job visible in subtransaction, but not parent. { job := testfactory.Job(ctx, t, tx, &testfactory.JobOpts{}) + _ = testfactory.Job(ctx, t, tx, &testfactory.JobOpts{}) _, err := tx.JobGetByID(ctx, &riverdriver.JobGetByIDParams{ID: job.ID, Schema: ""}) require.NoError(t, err) @@ -995,6 +1018,8 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, }) t.Run("MissingCreatedAtDefaultsToNow", func(t *testing.T) { + t.Parallel() + exec, _ := setup(ctx, t) insertParams := make([]*riverdriver.JobInsertFastParams, 10) @@ -1032,6 +1057,8 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, }) t.Run("MissingScheduledAtDefaultsToNow", func(t *testing.T) { + t.Parallel() + exec, _ := setup(ctx, t) insertParams := make([]*riverdriver.JobInsertFastParams, 10) @@ -1066,6 +1093,53 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, require.WithinDuration(t, time.Now().UTC(), job.ScheduledAt, 2*time.Second) } }) + + t.Run("AlternateSchema", func(t *testing.T) { + t.Parallel() + + var ( + driver, schema = driverWithSchema(ctx, t) + exec = driver.GetExecutor() + ) + + // This test needs to use a time from before the transaction begins, otherwise + // the newly-scheduled jobs won't yet show as available because their + // scheduled_at (which gets a default value from time.Now() in code) will be + // after the start of the transaction. + now := time.Now().UTC().Add(-1 * time.Minute) + + insertParams := make([]*riverdriver.JobInsertFastParams, 10) + for i := 0; i < len(insertParams); i++ { + insertParams[i] = &riverdriver.JobInsertFastParams{ + CreatedAt: ptrutil.Ptr(now.Add(time.Duration(i) * 5 * time.Second)), + EncodedArgs: []byte(`{"encoded": "args"}`), + Kind: "test_kind", + MaxAttempts: rivercommon.MaxAttemptsDefault, + Metadata: []byte(`{"meta": "data"}`), + Priority: rivercommon.PriorityDefault, + Queue: rivercommon.QueueDefault, + ScheduledAt: &now, + State: rivertype.JobStateAvailable, + Tags: []string{"tag"}, + UniqueKey: []byte("unique-key-no-returning-" + strconv.Itoa(i)), + UniqueStates: 0xff, + } + } + + count, err := exec.JobInsertFastManyNoReturning(ctx, &riverdriver.JobInsertFastManyParams{ + Jobs: insertParams, + Schema: schema, + }) + require.NoError(t, err) + require.Len(t, insertParams, count) + + jobsAfter, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ + Kind: []string{"test_kind"}, + Schema: schema, + }) + require.NoError(t, err) + require.Len(t, jobsAfter, len(insertParams)) + }) }) t.Run("JobInsertFull", func(t *testing.T) { @@ -1416,7 +1490,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, }) require.NoError(t, err) require.Equal(t, rivertype.JobStateAvailable, jobAfter.State) - require.WithinDuration(t, time.Now().UTC(), jobAfter.ScheduledAt, 100*time.Millisecond) + require.WithinDuration(t, time.Now().UTC(), jobAfter.ScheduledAt, 250*time.Millisecond) // TODO: Bad clock-based test jobUpdated, err := exec.JobGetByID(ctx, &riverdriver.JobGetByIDParams{ID: job.ID, Schema: ""}) require.NoError(t, err) @@ -2149,6 +2223,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, ElectedAt: ptrutil.Ptr(now.Add(-2 * time.Hour)), ExpiresAt: ptrutil.Ptr(now.Add(-1 * time.Hour)), LeaderID: ptrutil.Ptr(clientID), + Schema: "", }) { @@ -2170,6 +2245,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, elected, err := exec.LeaderAttemptElect(ctx, &riverdriver.LeaderElectParams{ LeaderID: clientID, + Schema: "", TTL: leaderTTL, }) require.NoError(t, err) @@ -2190,10 +2266,12 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, leader := testfactory.Leader(ctx, t, exec, &testfactory.LeaderOpts{ LeaderID: ptrutil.Ptr(clientID), + Schema: "", }) elected, err := exec.LeaderAttemptElect(ctx, &riverdriver.LeaderElectParams{ LeaderID: "different-client-id", + Schema: "", TTL: leaderTTL, }) require.NoError(t, err) @@ -2220,6 +2298,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, elected, err := exec.LeaderAttemptReelect(ctx, &riverdriver.LeaderElectParams{ LeaderID: clientID, + Schema: "", TTL: leaderTTL, }) require.NoError(t, err) @@ -2240,6 +2319,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, leader := testfactory.Leader(ctx, t, exec, &testfactory.LeaderOpts{ LeaderID: ptrutil.Ptr(clientID), + Schema: "", }) // Re-elect the same leader. Use a larger TTL to see if time is updated, @@ -2247,6 +2327,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, // the transaction. elected, err := exec.LeaderAttemptReelect(ctx, &riverdriver.LeaderElectParams{ LeaderID: clientID, + Schema: "", TTL: 30 * time.Second, }) require.NoError(t, err) @@ -2269,6 +2350,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, leader, err := exec.LeaderInsert(ctx, &riverdriver.LeaderInsertParams{ LeaderID: clientID, + Schema: "", TTL: leaderTTL, }) require.NoError(t, err) @@ -2284,6 +2366,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, _ = testfactory.Leader(ctx, t, exec, &testfactory.LeaderOpts{ LeaderID: ptrutil.Ptr(clientID), + Schema: "", }) leader, err := exec.LeaderGetElectedLeader(ctx, &riverdriver.LeaderGetElectedLeaderParams{ @@ -2307,6 +2390,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, resigned, err := exec.LeaderResign(ctx, &riverdriver.LeaderResignParams{ LeaderID: clientID, LeadershipTopic: string(notifier.NotificationTopicLeadership), + Schema: "", }) require.NoError(t, err) require.False(t, resigned) @@ -2314,12 +2398,14 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, _ = testfactory.Leader(ctx, t, exec, &testfactory.LeaderOpts{ LeaderID: ptrutil.Ptr(clientID), + Schema: "", }) { resigned, err := exec.LeaderResign(ctx, &riverdriver.LeaderResignParams{ LeaderID: clientID, LeadershipTopic: string(notifier.NotificationTopicLeadership), + Schema: "", }) require.NoError(t, err) require.True(t, resigned) @@ -2333,11 +2419,13 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, _ = testfactory.Leader(ctx, t, exec, &testfactory.LeaderOpts{ LeaderID: ptrutil.Ptr("other-client-id"), + Schema: "", }) resigned, err := exec.LeaderResign(ctx, &riverdriver.LeaderResignParams{ LeaderID: clientID, LeadershipTopic: string(notifier.NotificationTopicLeadership), + Schema: "", }) require.NoError(t, err) require.False(t, resigned) @@ -2610,6 +2698,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, queue, err := exec.QueueCreateOrSetUpdatedAt(ctx, &riverdriver.QueueCreateOrSetUpdatedAtParams{ Metadata: metadata, Name: "new-queue", + Schema: "", }) require.NoError(t, err) require.WithinDuration(t, time.Now(), queue.CreatedAt, 500*time.Millisecond) @@ -2628,6 +2717,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, queue, err := exec.QueueCreateOrSetUpdatedAt(ctx, &riverdriver.QueueCreateOrSetUpdatedAtParams{ Name: "new-queue", PausedAt: ptrutil.Ptr(now), + Schema: "", }) require.NoError(t, err) require.Equal(t, "new-queue", queue.Name) @@ -2644,6 +2734,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, queueBefore, err := exec.QueueCreateOrSetUpdatedAt(ctx, &riverdriver.QueueCreateOrSetUpdatedAtParams{ Metadata: metadata, Name: "updatable-queue", + Schema: "", UpdatedAt: &tBefore, }) require.NoError(t, err) @@ -2653,6 +2744,7 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, queueAfter, err := exec.QueueCreateOrSetUpdatedAt(ctx, &riverdriver.QueueCreateOrSetUpdatedAtParams{ Metadata: []byte(`{"other": "metadata"}`), Name: "updatable-queue", + Schema: "", UpdatedAt: &tAfter, }) require.NoError(t, err) @@ -2666,356 +2758,407 @@ func Exercise[TTx any](ctx context.Context, t *testing.T, // Timestamp is bumped: require.WithinDuration(t, tAfter, queueAfter.UpdatedAt, time.Millisecond) }) + }) - t.Run("QueueDeleteExpired", func(t *testing.T) { - t.Parallel() + t.Run("QueueDeleteExpired", func(t *testing.T) { + t.Parallel() - exec, _ := setup(ctx, t) + exec, _ := setup(ctx, t) - now := time.Now() - _ = testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(now)}) - queue2 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(now.Add(-25 * time.Hour))}) - queue3 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(now.Add(-26 * time.Hour))}) - queue4 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(now.Add(-48 * time.Hour))}) - _ = testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(now.Add(-23 * time.Hour))}) + now := time.Now() + _ = testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(now)}) + queue2 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(now.Add(-25 * time.Hour))}) + queue3 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(now.Add(-26 * time.Hour))}) + queue4 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(now.Add(-48 * time.Hour))}) + _ = testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{UpdatedAt: ptrutil.Ptr(now.Add(-23 * time.Hour))}) - horizon := now.Add(-24 * time.Hour) - deletedQueueNames, err := exec.QueueDeleteExpired(ctx, &riverdriver.QueueDeleteExpiredParams{Max: 2, UpdatedAtHorizon: horizon}) - require.NoError(t, err) + horizon := now.Add(-24 * time.Hour) + deletedQueueNames, err := exec.QueueDeleteExpired(ctx, &riverdriver.QueueDeleteExpiredParams{Max: 2, UpdatedAtHorizon: horizon}) + require.NoError(t, err) - // queue2 and queue3 should be deleted, with queue4 being skipped due to max of 2: - require.Equal(t, []string{queue2.Name, queue3.Name}, deletedQueueNames) + // queue2 and queue3 should be deleted, with queue4 being skipped due to max of 2: + require.Equal(t, []string{queue2.Name, queue3.Name}, deletedQueueNames) - // Try again, make sure queue4 gets deleted this time: - deletedQueueNames, err = exec.QueueDeleteExpired(ctx, &riverdriver.QueueDeleteExpiredParams{Max: 2, UpdatedAtHorizon: horizon}) - require.NoError(t, err) + // Try again, make sure queue4 gets deleted this time: + deletedQueueNames, err = exec.QueueDeleteExpired(ctx, &riverdriver.QueueDeleteExpiredParams{Max: 2, UpdatedAtHorizon: horizon}) + require.NoError(t, err) + + require.Equal(t, []string{queue4.Name}, deletedQueueNames) + }) + + t.Run("QueueGet", func(t *testing.T) { + t.Parallel() + + exec, _ := setup(ctx, t) + + queue := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Metadata: []byte(`{"foo": "bar"}`)}) - require.Equal(t, []string{queue4.Name}, deletedQueueNames) + queueFetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ + Name: queue.Name, + Schema: "", + }) + require.NoError(t, err) + + require.WithinDuration(t, queue.CreatedAt, queueFetched.CreatedAt, time.Millisecond) + require.Equal(t, queue.Metadata, queueFetched.Metadata) + require.Equal(t, queue.Name, queueFetched.Name) + require.Nil(t, queueFetched.PausedAt) + require.WithinDuration(t, queue.UpdatedAt, queueFetched.UpdatedAt, time.Millisecond) + + queueFetched, err = exec.QueueGet(ctx, &riverdriver.QueueGetParams{ + Name: "nonexistent-queue", + Schema: "", + }) + require.ErrorIs(t, err, rivertype.ErrNotFound) + require.Nil(t, queueFetched) + }) + + t.Run("QueueList", func(t *testing.T) { + t.Parallel() + + exec, _ := setup(ctx, t) + + requireQueuesEqual := func(t *testing.T, target, actual *rivertype.Queue) { + t.Helper() + require.WithinDuration(t, target.CreatedAt, actual.CreatedAt, time.Millisecond) + require.Equal(t, target.Metadata, actual.Metadata) + require.Equal(t, target.Name, actual.Name) + if target.PausedAt == nil { + require.Nil(t, actual.PausedAt) + } else { + require.NotNil(t, actual.PausedAt) + require.WithinDuration(t, *target.PausedAt, *actual.PausedAt, time.Millisecond) + } + } + + queues, err := exec.QueueList(ctx, &riverdriver.QueueListParams{ + Limit: 10, + Schema: "", + }) + require.NoError(t, err) + require.Empty(t, queues) + + // Make queue1, already paused: + queue1 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Metadata: []byte(`{"foo": "bar"}`), PausedAt: ptrutil.Ptr(time.Now())}) + require.NoError(t, err) + + queue2 := testfactory.Queue(ctx, t, exec, nil) + queue3 := testfactory.Queue(ctx, t, exec, nil) + + queues, err = exec.QueueList(ctx, &riverdriver.QueueListParams{ + Limit: 2, + Schema: "", + }) + require.NoError(t, err) + + require.Len(t, queues, 2) + requireQueuesEqual(t, queue1, queues[0]) + requireQueuesEqual(t, queue2, queues[1]) + + queues, err = exec.QueueList(ctx, &riverdriver.QueueListParams{ + Limit: 3, + Schema: "", }) + require.NoError(t, err) + + require.Len(t, queues, 3) + requireQueuesEqual(t, queue3, queues[2]) + }) + + t.Run("QueuePause", func(t *testing.T) { + t.Parallel() - t.Run("QueueGet", func(t *testing.T) { + t.Run("ExistingPausedQueue", func(t *testing.T) { t.Parallel() exec, _ := setup(ctx, t) - queue := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Metadata: []byte(`{"foo": "bar"}`)}) + queue := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{ + PausedAt: ptrutil.Ptr(time.Now()), + }) + require.NoError(t, exec.QueuePause(ctx, &riverdriver.QueuePauseParams{ + Name: queue.Name, + Schema: "", + })) queueFetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ Name: queue.Name, Schema: "", }) require.NoError(t, err) + require.NotNil(t, queueFetched.PausedAt) + requireEqualTime(t, *queue.PausedAt, *queueFetched.PausedAt) // paused_at stays unchanged + requireEqualTime(t, queue.UpdatedAt, queueFetched.UpdatedAt) // updated_at stays unchanged + }) - require.WithinDuration(t, queue.CreatedAt, queueFetched.CreatedAt, time.Millisecond) - require.Equal(t, queue.Metadata, queueFetched.Metadata) - require.Equal(t, queue.Name, queueFetched.Name) - require.Nil(t, queueFetched.PausedAt) - require.WithinDuration(t, queue.UpdatedAt, queueFetched.UpdatedAt, time.Millisecond) + t.Run("ExistingUnpausedQueue", func(t *testing.T) { + t.Parallel() + + exec, _ := setup(ctx, t) + + queue := testfactory.Queue(ctx, t, exec, nil) + require.Nil(t, queue.PausedAt) - queueFetched, err = exec.QueueGet(ctx, &riverdriver.QueueGetParams{ - Name: "nonexistent-queue", + require.NoError(t, exec.QueuePause(ctx, &riverdriver.QueuePauseParams{ + Name: queue.Name, + Schema: "", + })) + + queueFetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ + Name: queue.Name, Schema: "", }) - require.ErrorIs(t, err, rivertype.ErrNotFound) - require.Nil(t, queueFetched) + require.NoError(t, err) + require.NotNil(t, queueFetched.PausedAt) + require.WithinDuration(t, time.Now(), *(queueFetched.PausedAt), 500*time.Millisecond) }) - t.Run("QueueList", func(t *testing.T) { + t.Run("NonExistentQueue", func(t *testing.T) { t.Parallel() exec, _ := setup(ctx, t) - requireQueuesEqual := func(t *testing.T, target, actual *rivertype.Queue) { - t.Helper() - require.WithinDuration(t, target.CreatedAt, actual.CreatedAt, time.Millisecond) - require.Equal(t, target.Metadata, actual.Metadata) - require.Equal(t, target.Name, actual.Name) - if target.PausedAt == nil { - require.Nil(t, actual.PausedAt) - } else { - require.NotNil(t, actual.PausedAt) - require.WithinDuration(t, *target.PausedAt, *actual.PausedAt, time.Millisecond) - } - } - - queues, err := exec.QueueList(ctx, &riverdriver.QueueListParams{ - Limit: 10, + err := exec.QueuePause(ctx, &riverdriver.QueuePauseParams{ + Name: "queue1", Schema: "", }) - require.NoError(t, err) - require.Empty(t, queues) + require.ErrorIs(t, err, rivertype.ErrNotFound) + }) - // Make queue1, already paused: - queue1 := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Metadata: []byte(`{"foo": "bar"}`), PausedAt: ptrutil.Ptr(time.Now())}) - require.NoError(t, err) + t.Run("AllQueuesExistingQueues", func(t *testing.T) { + t.Parallel() + + exec, _ := setup(ctx, t) + queue1 := testfactory.Queue(ctx, t, exec, nil) + require.Nil(t, queue1.PausedAt) queue2 := testfactory.Queue(ctx, t, exec, nil) - queue3 := testfactory.Queue(ctx, t, exec, nil) + require.Nil(t, queue2.PausedAt) - queues, err = exec.QueueList(ctx, &riverdriver.QueueListParams{ - Limit: 2, + require.NoError(t, exec.QueuePause(ctx, &riverdriver.QueuePauseParams{ + Name: rivercommon.AllQueuesString, Schema: "", - }) - require.NoError(t, err) + })) - require.Len(t, queues, 2) - requireQueuesEqual(t, queue1, queues[0]) - requireQueuesEqual(t, queue2, queues[1]) + now := time.Now() - queues, err = exec.QueueList(ctx, &riverdriver.QueueListParams{ - Limit: 3, + queue1Fetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ + Name: queue1.Name, Schema: "", }) require.NoError(t, err) + require.NotNil(t, queue1Fetched.PausedAt) + require.WithinDuration(t, now, *(queue1Fetched.PausedAt), 500*time.Millisecond) - require.Len(t, queues, 3) - requireQueuesEqual(t, queue3, queues[2]) + queue2Fetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ + Name: queue2.Name, + Schema: "", + }) + require.NoError(t, err) + require.NotNil(t, queue2Fetched.PausedAt) + require.WithinDuration(t, now, *(queue2Fetched.PausedAt), 500*time.Millisecond) }) - t.Run("QueuePause", func(t *testing.T) { + t.Run("AllQueuesNoQueues", func(t *testing.T) { t.Parallel() - t.Run("ExistingPausedQueue", func(t *testing.T) { - t.Parallel() - - exec, _ := setup(ctx, t) - - queue := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{ - PausedAt: ptrutil.Ptr(time.Now()), - }) - - require.NoError(t, exec.QueuePause(ctx, &riverdriver.QueuePauseParams{ - Name: queue.Name, - Schema: "", - })) - queueFetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ - Name: queue.Name, - Schema: "", - }) - require.NoError(t, err) - require.NotNil(t, queueFetched.PausedAt) - requireEqualTime(t, *queue.PausedAt, *queueFetched.PausedAt) // paused_at stays unchanged - requireEqualTime(t, queue.UpdatedAt, queueFetched.UpdatedAt) // updated_at stays unchanged - }) + exec, _ := setup(ctx, t) - t.Run("ExistingUnpausedQueue", func(t *testing.T) { - t.Parallel() + require.NoError(t, exec.QueuePause(ctx, &riverdriver.QueuePauseParams{ + Name: rivercommon.AllQueuesString, + Schema: "", + })) + }) + }) - exec, _ := setup(ctx, t) + t.Run("QueueResume", func(t *testing.T) { + t.Parallel() - queue := testfactory.Queue(ctx, t, exec, nil) - require.Nil(t, queue.PausedAt) + t.Run("ExistingPausedQueue", func(t *testing.T) { + t.Parallel() - require.NoError(t, exec.QueuePause(ctx, &riverdriver.QueuePauseParams{ - Name: queue.Name, - Schema: "", - })) + exec, _ := setup(ctx, t) - queueFetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ - Name: queue.Name, - Schema: "", - }) - require.NoError(t, err) - require.NotNil(t, queueFetched.PausedAt) - require.WithinDuration(t, time.Now(), *(queueFetched.PausedAt), 500*time.Millisecond) + queue := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{ + PausedAt: ptrutil.Ptr(time.Now()), }) - t.Run("NonExistentQueue", func(t *testing.T) { - t.Parallel() - - exec, _ := setup(ctx, t) + require.NoError(t, exec.QueueResume(ctx, &riverdriver.QueueResumeParams{ + Name: queue.Name, + Schema: "", + })) - err := exec.QueuePause(ctx, &riverdriver.QueuePauseParams{ - Name: "queue1", - Schema: "", - }) - require.ErrorIs(t, err, rivertype.ErrNotFound) + queueFetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ + Name: queue.Name, + Schema: "", }) + require.NoError(t, err) + require.Nil(t, queueFetched.PausedAt) + }) - t.Run("AllQueuesExistingQueues", func(t *testing.T) { - t.Parallel() - - exec, _ := setup(ctx, t) - - queue1 := testfactory.Queue(ctx, t, exec, nil) - require.Nil(t, queue1.PausedAt) - queue2 := testfactory.Queue(ctx, t, exec, nil) - require.Nil(t, queue2.PausedAt) + t.Run("ExistingUnpausedQueue", func(t *testing.T) { + t.Parallel() - require.NoError(t, exec.QueuePause(ctx, &riverdriver.QueuePauseParams{ - Name: rivercommon.AllQueuesString, - Schema: "", - })) + exec, _ := setup(ctx, t) - now := time.Now() + queue := testfactory.Queue(ctx, t, exec, nil) - queue1Fetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ - Name: queue1.Name, - Schema: "", - }) - require.NoError(t, err) - require.NotNil(t, queue1Fetched.PausedAt) - require.WithinDuration(t, now, *(queue1Fetched.PausedAt), 500*time.Millisecond) + require.NoError(t, exec.QueueResume(ctx, &riverdriver.QueueResumeParams{ + Name: queue.Name, + Schema: "", + })) - queue2Fetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ - Name: queue2.Name, - Schema: "", - }) - require.NoError(t, err) - require.NotNil(t, queue2Fetched.PausedAt) - require.WithinDuration(t, now, *(queue2Fetched.PausedAt), 500*time.Millisecond) + queueFetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ + Name: queue.Name, + Schema: "", }) + require.NoError(t, err) + require.Nil(t, queueFetched.PausedAt) + requireEqualTime(t, queue.UpdatedAt, queueFetched.UpdatedAt) // updated_at stays unchanged + }) - t.Run("AllQueuesNoQueues", func(t *testing.T) { - t.Parallel() + t.Run("NonExistentQueue", func(t *testing.T) { + t.Parallel() - exec, _ := setup(ctx, t) + exec, _ := setup(ctx, t) - require.NoError(t, exec.QueuePause(ctx, &riverdriver.QueuePauseParams{ - Name: rivercommon.AllQueuesString, - Schema: "", - })) + err := exec.QueueResume(ctx, &riverdriver.QueueResumeParams{ + Name: "queue1", + Schema: "", }) + require.ErrorIs(t, err, rivertype.ErrNotFound) }) - t.Run("QueueResume", func(t *testing.T) { + t.Run("AllQueuesExistingQueues", func(t *testing.T) { t.Parallel() - t.Run("ExistingPausedQueue", func(t *testing.T) { - t.Parallel() + exec, _ := setup(ctx, t) - exec, _ := setup(ctx, t) + queue1 := testfactory.Queue(ctx, t, exec, nil) + require.Nil(t, queue1.PausedAt) + queue2 := testfactory.Queue(ctx, t, exec, nil) + require.Nil(t, queue2.PausedAt) - queue := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{ - PausedAt: ptrutil.Ptr(time.Now()), - }) + require.NoError(t, exec.QueuePause(ctx, &riverdriver.QueuePauseParams{ + Name: rivercommon.AllQueuesString, + Schema: "", + })) + require.NoError(t, exec.QueueResume(ctx, &riverdriver.QueueResumeParams{ + Name: rivercommon.AllQueuesString, + Schema: "", + })) - require.NoError(t, exec.QueueResume(ctx, &riverdriver.QueueResumeParams{ - Name: queue.Name, - Schema: "", - })) + queue1Fetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ + Name: queue1.Name, + Schema: "", + }) + require.NoError(t, err) + require.Nil(t, queue1Fetched.PausedAt) - queueFetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ - Name: queue.Name, - Schema: "", - }) - require.NoError(t, err) - require.Nil(t, queueFetched.PausedAt) + queue2Fetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ + Name: queue2.Name, + Schema: "", }) + require.NoError(t, err) + require.Nil(t, queue2Fetched.PausedAt) + }) - t.Run("ExistingUnpausedQueue", func(t *testing.T) { - t.Parallel() + t.Run("AllQueuesNoQueues", func(t *testing.T) { + t.Parallel() - exec, _ := setup(ctx, t) + exec, _ := setup(ctx, t) - queue := testfactory.Queue(ctx, t, exec, nil) + require.NoError(t, exec.QueueResume(ctx, &riverdriver.QueueResumeParams{ + Name: rivercommon.AllQueuesString, + Schema: "", + })) + }) + }) - require.NoError(t, exec.QueueResume(ctx, &riverdriver.QueueResumeParams{ - Name: queue.Name, - Schema: "", - })) + t.Run("QueueUpdate", func(t *testing.T) { + t.Parallel() - queueFetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ - Name: queue.Name, - Schema: "", - }) - require.NoError(t, err) - require.Nil(t, queueFetched.PausedAt) - requireEqualTime(t, queue.UpdatedAt, queueFetched.UpdatedAt) // updated_at stays unchanged - }) + t.Run("UpdatesFieldsIfDoUpdateIsTrue", func(t *testing.T) { + t.Parallel() - t.Run("NonExistentQueue", func(t *testing.T) { - t.Parallel() + exec, _ := setup(ctx, t) - exec, _ := setup(ctx, t) + queue := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Metadata: []byte(`{"foo": "bar"}`)}) - err := exec.QueueResume(ctx, &riverdriver.QueueResumeParams{ - Name: "queue1", - Schema: "", - }) - require.ErrorIs(t, err, rivertype.ErrNotFound) + updatedQueue, err := exec.QueueUpdate(ctx, &riverdriver.QueueUpdateParams{ + Metadata: []byte(`{"baz": "qux"}`), + MetadataDoUpdate: true, + Name: queue.Name, }) + require.NoError(t, err) + require.JSONEq(t, `{"baz": "qux"}`, string(updatedQueue.Metadata)) + }) - t.Run("AllQueuesExistingQueues", func(t *testing.T) { - t.Parallel() - - exec, _ := setup(ctx, t) - - queue1 := testfactory.Queue(ctx, t, exec, nil) - require.Nil(t, queue1.PausedAt) - queue2 := testfactory.Queue(ctx, t, exec, nil) - require.Nil(t, queue2.PausedAt) + t.Run("DoesNotUpdateFieldsIfDoUpdateIsFalse", func(t *testing.T) { + t.Parallel() - require.NoError(t, exec.QueuePause(ctx, &riverdriver.QueuePauseParams{ - Name: rivercommon.AllQueuesString, - Schema: "", - })) - require.NoError(t, exec.QueueResume(ctx, &riverdriver.QueueResumeParams{ - Name: rivercommon.AllQueuesString, - Schema: "", - })) + exec, _ := setup(ctx, t) - queue1Fetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ - Name: queue1.Name, - Schema: "", - }) - require.NoError(t, err) - require.Nil(t, queue1Fetched.PausedAt) + queue := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Metadata: []byte(`{"foo": "bar"}`)}) - queue2Fetched, err := exec.QueueGet(ctx, &riverdriver.QueueGetParams{ - Name: queue2.Name, - Schema: "", - }) - require.NoError(t, err) - require.Nil(t, queue2Fetched.PausedAt) + updatedQueue, err := exec.QueueUpdate(ctx, &riverdriver.QueueUpdateParams{ + Metadata: []byte(`{"baz": "qux"}`), + MetadataDoUpdate: false, + Name: queue.Name, }) + require.NoError(t, err) + require.JSONEq(t, `{"foo": "bar"}`, string(updatedQueue.Metadata)) + }) + }) - t.Run("AllQueuesNoQueues", func(t *testing.T) { - t.Parallel() + t.Run("QueryRow", func(t *testing.T) { + t.Parallel() - exec, _ := setup(ctx, t) + exec, _ := setup(ctx, t) - require.NoError(t, exec.QueueResume(ctx, &riverdriver.QueueResumeParams{ - Name: rivercommon.AllQueuesString, - Schema: "", - })) - }) - }) + var ( + field1 int + field2 int + field3 int + fieldFoo string + ) - t.Run("QueueUpdate", func(t *testing.T) { - t.Parallel() + err := exec.QueryRow(ctx, "SELECT 1, 2, 3, 'foo'").Scan(&field1, &field2, &field3, &fieldFoo) + require.NoError(t, err) - t.Run("UpdatesFieldsIfDoUpdateIsTrue", func(t *testing.T) { - t.Parallel() + require.Equal(t, 1, field1) + require.Equal(t, 2, field2) + require.Equal(t, 3, field3) + require.Equal(t, "foo", fieldFoo) + }) - exec, _ := setup(ctx, t) + t.Run("SchemaGetExpired", func(t *testing.T) { + t.Parallel() - queue := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Metadata: []byte(`{"foo": "bar"}`)}) + t.Run("FiltersSchemasNotMatchingPrefix", func(t *testing.T) { + t.Parallel() - updatedQueue, err := exec.QueueUpdate(ctx, &riverdriver.QueueUpdateParams{ - Metadata: []byte(`{"baz": "qux"}`), - MetadataDoUpdate: true, - Name: queue.Name, - }) - require.NoError(t, err) - require.JSONEq(t, `{"baz": "qux"}`, string(updatedQueue.Metadata)) - }) + exec, _ := setup(ctx, t) - t.Run("DoesNotUpdateFieldsIfDoUpdateIsFalse", func(t *testing.T) { - t.Parallel() + schemas, err := exec.SchemaGetExpired(ctx, &riverdriver.SchemaGetExpiredParams{ + BeforeName: "zzzzzzzzzzzzzzzzzz", + Prefix: "this_prefix_will_not_exist_", + }) + require.NoError(t, err) + require.Empty(t, schemas) + }) - exec, _ := setup(ctx, t) + t.Run("ListsSchemasBelowMarker", func(t *testing.T) { + t.Parallel() - queue := testfactory.Queue(ctx, t, exec, &testfactory.QueueOpts{Metadata: []byte(`{"foo": "bar"}`)}) + exec, _ := setup(ctx, t) - updatedQueue, err := exec.QueueUpdate(ctx, &riverdriver.QueueUpdateParams{ - Metadata: []byte(`{"baz": "qux"}`), - MetadataDoUpdate: false, - Name: queue.Name, - }) - require.NoError(t, err) - require.JSONEq(t, `{"foo": "bar"}`, string(updatedQueue.Metadata)) + schemas, err := exec.SchemaGetExpired(ctx, &riverdriver.SchemaGetExpiredParams{ + BeforeName: "pg_toast", + Prefix: "pg_%", }) + require.NoError(t, err) + require.Equal(t, []string{"pg_catalog"}, schemas) }) }) } @@ -3025,12 +3168,12 @@ type testListenerBundle[TTx any] struct { exec riverdriver.Executor } -func setupListener[TTx any](ctx context.Context, t *testing.T, getDriverWithPool func(ctx context.Context, t *testing.T) riverdriver.Driver[TTx]) (riverdriver.Listener, *testListenerBundle[TTx]) { +func setupListener[TTx any](ctx context.Context, t *testing.T, driverWithPool func(ctx context.Context, t *testing.T) (riverdriver.Driver[TTx], string)) (riverdriver.Listener, *testListenerBundle[TTx]) { t.Helper() var ( - driver = getDriverWithPool(ctx, t) - listener = driver.GetListener("") + driver, schema = driverWithPool(ctx, t) + listener = driver.GetListener(schema) ) return listener, &testListenerBundle[TTx]{ @@ -3039,7 +3182,7 @@ func setupListener[TTx any](ctx context.Context, t *testing.T, getDriverWithPool } } -func exerciseListener[TTx any](ctx context.Context, t *testing.T, driverWithPool func(ctx context.Context, t *testing.T) riverdriver.Driver[TTx]) { +func exerciseListener[TTx any](ctx context.Context, t *testing.T, driverWithPool func(ctx context.Context, t *testing.T) (riverdriver.Driver[TTx], string)) { t.Helper() connectListener := func(ctx context.Context, t *testing.T, listener riverdriver.Listener) { @@ -3092,8 +3235,8 @@ func exerciseListener[TTx any](ctx context.Context, t *testing.T, driverWithPool require.NoError(t, listener.Ping(ctx)) // still alive { - require.NoError(t, bundle.exec.NotifyMany(ctx, &riverdriver.NotifyManyParams{Topic: "topic1", Payload: []string{"payload1_1"}, Schema: ""})) - require.NoError(t, bundle.exec.NotifyMany(ctx, &riverdriver.NotifyManyParams{Topic: "topic2", Payload: []string{"payload2_1"}, Schema: ""})) + require.NoError(t, bundle.exec.NotifyMany(ctx, &riverdriver.NotifyManyParams{Topic: "topic1", Payload: []string{"payload1_1"}, Schema: listener.Schema()})) + require.NoError(t, bundle.exec.NotifyMany(ctx, &riverdriver.NotifyManyParams{Topic: "topic2", Payload: []string{"payload2_1"}, Schema: listener.Schema()})) notification := waitForNotification(ctx, t, listener) require.Equal(t, &riverdriver.Notification{Topic: "topic1", Payload: "payload1_1"}, notification) @@ -3104,8 +3247,8 @@ func exerciseListener[TTx any](ctx context.Context, t *testing.T, driverWithPool require.NoError(t, listener.Unlisten(ctx, "topic2")) { - require.NoError(t, bundle.exec.NotifyMany(ctx, &riverdriver.NotifyManyParams{Topic: "topic1", Payload: []string{"payload1_2"}, Schema: ""})) - require.NoError(t, bundle.exec.NotifyMany(ctx, &riverdriver.NotifyManyParams{Topic: "topic2", Payload: []string{"payload2_2"}, Schema: ""})) + require.NoError(t, bundle.exec.NotifyMany(ctx, &riverdriver.NotifyManyParams{Topic: "topic1", Payload: []string{"payload1_2"}, Schema: listener.Schema()})) + require.NoError(t, bundle.exec.NotifyMany(ctx, &riverdriver.NotifyManyParams{Topic: "topic2", Payload: []string{"payload2_2"}, Schema: listener.Schema()})) notification := waitForNotification(ctx, t, listener) require.Equal(t, &riverdriver.Notification{Topic: "topic1", Payload: "payload1_2"}, notification) @@ -3122,8 +3265,8 @@ func exerciseListener[TTx any](ctx context.Context, t *testing.T, driverWithPool t.Parallel() var ( - driver = driverWithPool(ctx, t) - listener = driver.GetListener("my_custom_schema") + driver, _ = driverWithPool(ctx, t) + listener = driver.GetListener("my_custom_schema") ) require.Equal(t, "my_custom_schema", listener.Schema()) @@ -3132,11 +3275,36 @@ func exerciseListener[TTx any](ctx context.Context, t *testing.T, driverWithPool t.Run("SchemaFromSearchPath", func(t *testing.T) { t.Parallel() - listener, _ := setupListener(ctx, t, driverWithPool) + // TODO(brandur): Need to find a way to make this test work. We need to + // inject a `search_path`, but the connection is acquired below inside + // `listener.Connect`, which means we can't do so here without finding a + // way to do some kind of test injection. + t.Skip("needs a way to be test injectable") + + // somehow do: + // SET search_path TO 'public' + + var ( + driver, _ = driverWithPool(ctx, t) + listener = driver.GetListener("") + ) + connectListener(ctx, t, listener) require.Equal(t, "public", listener.Schema()) }) + t.Run("EmptySchemaFromSearchPath", func(t *testing.T) { + t.Parallel() + + var ( + driver, _ = driverWithPool(ctx, t) + listener = driver.GetListener("") + ) + + connectListener(ctx, t, listener) + require.Empty(t, listener.Schema()) + }) + t.Run("TransactionGated", func(t *testing.T) { t.Parallel() @@ -3149,7 +3317,7 @@ func exerciseListener[TTx any](ctx context.Context, t *testing.T, driverWithPool tx, err := bundle.exec.Begin(ctx) require.NoError(t, err) - require.NoError(t, tx.NotifyMany(ctx, &riverdriver.NotifyManyParams{Topic: "topic1", Payload: []string{"payload1"}, Schema: ""})) + require.NoError(t, tx.NotifyMany(ctx, &riverdriver.NotifyManyParams{Topic: "topic1", Payload: []string{"payload1"}, Schema: listener.Schema()})) // No notification because the transaction hasn't committed yet. requireNoNotification(ctx, t, listener) diff --git a/internal/riverinternaltest/riverinternaltest.go b/internal/riverinternaltest/riverinternaltest.go index 818a8c40..121ac7e2 100644 --- a/internal/riverinternaltest/riverinternaltest.go +++ b/internal/riverinternaltest/riverinternaltest.go @@ -3,24 +3,15 @@ package riverinternaltest import ( - "context" "fmt" - "log" - "net/url" "os" - "runtime" "sync" "testing" "time" - "github.com/jackc/pgx/v5" - "github.com/jackc/pgx/v5/pgxpool" - "github.com/stretchr/testify/require" "go.uber.org/goleak" - "github.com/riverqueue/river/internal/testdb" "github.com/riverqueue/river/rivershared/riversharedtest" - "github.com/riverqueue/river/rivershared/util/valutil" ) // SchedulerShortInterval is an artificially short interval for the scheduler @@ -33,54 +24,6 @@ import ( // in one place. const SchedulerShortInterval = 500 * time.Millisecond -var ( - dbManager *testdb.Manager //nolint:gochecknoglobals - - // Maximum number of connections for the connection pool. This is the same - // default that pgxpool uses (the larger of 4 or number of CPUs), but made a - // variable here so that we can reference it from the test suite and not - // rely on implicit knowledge of pgxpool implementation details that could - // change in the future. If changing this value, also change the number of - // databases to create in `testdbman`. - dbPoolMaxConns = int32(max(4, runtime.NumCPU())) //nolint:gochecknoglobals -) - -func DatabaseConfig(databaseName string) *pgxpool.Config { - config, err := pgxpool.ParseConfig(DatabaseURL(databaseName)) - if err != nil { - panic(fmt.Sprintf("error parsing database URL: %v", err)) - } - config.MaxConns = dbPoolMaxConns - // Use a short conn timeout here to attempt to quickly cancel attempts that - // are unlikely to succeed even with more time: - config.ConnConfig.ConnectTimeout = 2 * time.Second - config.ConnConfig.RuntimeParams["timezone"] = "UTC" - return config -} - -// DatabaseURL gets a test database URL from TEST_DATABASE_URL or falls back on -// a default pointing to `river_test`. If databaseName is set, it replaces the -// database in the URL, although the host and other parameters are preserved. -// -// Most of the time DatabaseConfig should be used instead of this function, but -// it may be useful in non-pgx situations like for examples showing the use of -// `database/sql`. -func DatabaseURL(databaseName string) string { - parsedURL, err := url.Parse(valutil.ValOrDefault( - os.Getenv("TEST_DATABASE_URL"), - "postgres://localhost/river_test?sslmode=disable"), - ) - if err != nil { - panic(err) - } - - if databaseName != "" { - parsedURL.Path = databaseName - } - - return parsedURL.String() -} - // DiscardContinuously drains continuously out of the given channel and discards // anything that comes out of it. Returns a stop function that should be invoked // to stop draining. Stop must be invoked before tests finish to stop an @@ -157,111 +100,11 @@ func DrainContinuously[T any](drainChan <-chan T) func() []T { } } -// TestDB acquires a dedicated test database for the duration of the test. If an -// error occurs, the test fails. The test database will be automatically -// returned to the pool at the end of the test. If the pool was closed, it will -// be recreated. -func TestDB(ctx context.Context, tb testing.TB) *pgxpool.Pool { - tb.Helper() - - ctx, cancel := context.WithTimeout(ctx, riversharedtest.WaitTimeout()) - defer cancel() - - testPool, err := dbManager.Acquire(ctx) - if err != nil { - tb.Fatalf("Failed to acquire pool for test DB: %v", err) - } - tb.Cleanup(testPool.Release) - - return testPool.Pool() -} - -// A pool and mutex to protect it, lazily initialized by TestTx. Once open, this -// pool is never explicitly closed, instead closing implicitly as the package -// tests finish. -var ( - dbPool *pgxpool.Pool //nolint:gochecknoglobals - dbPoolMu sync.RWMutex //nolint:gochecknoglobals -) - -// TestTx starts a test transaction that's rolled back automatically as the test -// case is cleaning itself up. This can be used as a lighter weight alternative -// to `testdb.Manager` in components where it's not necessary to have many -// connections open simultaneously. -func TestTx(ctx context.Context, tb testing.TB) pgx.Tx { - tb.Helper() - - tryPool := func() *pgxpool.Pool { - dbPoolMu.RLock() - defer dbPoolMu.RUnlock() - return dbPool - } - - getPool := func() *pgxpool.Pool { - if dbPool := tryPool(); dbPool != nil { - return dbPool - } - - dbPoolMu.Lock() - defer dbPoolMu.Unlock() - - // Multiple goroutines may have passed the initial `nil` check on start - // up, so check once more to make sure pool hasn't been set yet. - if dbPool != nil { - return dbPool - } - - var err error - dbPool, err = pgxpool.NewWithConfig(ctx, DatabaseConfig("river_test")) - require.NoError(tb, err) - - return dbPool - } - - return riversharedtest.TestTxPool(ctx, tb, getPool()) -} - -// TruncateRiverTables truncates River tables in the target database. This is -// for test cleanup and should obviously only be used in tests. -func TruncateRiverTables(ctx context.Context, pool *pgxpool.Pool) error { - ctx, cancel := context.WithTimeout(ctx, 10*time.Second) - defer cancel() - - tables := []string{"river_job", "river_leader", "river_queue"} - - for _, table := range tables { - if _, err := pool.Exec(ctx, fmt.Sprintf("TRUNCATE TABLE %s;", table)); err != nil { - return fmt.Errorf("error truncating %q: %w", table, err) - } - } - - return nil -} - // WrapTestMain performs some common setup and teardown that should be shared -// amongst all packages. e.g. Configures a manager for test databases on setup, -// and checks for no goroutine leaks on teardown. +// amongst all packages. e.g. Checks for no goroutine leaks on teardown. func WrapTestMain(m *testing.M) { - poolConfig := DatabaseConfig("river_test") - // Use a smaller number of conns per pool, because otherwise we could have - // NUM_CPU pools, each with NUM_CPU connections, and that's a lot of - // connections if there are many CPUs. - poolConfig.MaxConns = 4 - // Pre-initialize 1 connection per pool. - poolConfig.MinConns = 1 - - var err error - // Allow up to one database per concurrent test, plus two for overhead: - maxTestDBs := int32(runtime.GOMAXPROCS(0)) + 2 //nolint:gosec - dbManager, err = testdb.NewManager(poolConfig, maxTestDBs, nil, TruncateRiverTables) - if err != nil { - log.Fatal(err) - } - status := m.Run() - dbManager.Close() - if status == 0 { if err := goleak.Find(riversharedtest.IgnoredKnownGoroutineLeaks...); err != nil { fmt.Fprintf(os.Stderr, "goleak: Errors on successful test run: %v\n", err) diff --git a/internal/riverinternaltest/riverinternaltest_test.go b/internal/riverinternaltest/riverinternaltest_test.go deleted file mode 100644 index db5f215d..00000000 --- a/internal/riverinternaltest/riverinternaltest_test.go +++ /dev/null @@ -1,85 +0,0 @@ -package riverinternaltest - -import ( - "context" - "sync" - "testing" - - "github.com/jackc/pgerrcode" - "github.com/jackc/pgx/v5/pgconn" - "github.com/stretchr/testify/require" -) - -// Implemented by `pgx.Tx` or `pgxpool.Pool`. Normally we'd use a similar type -// from `dbsqlc` or `dbutil`, but riverinternaltest is extremely low level and -// that would introduce a cyclic dependency. We could package as -// `riverinternaltest_test`, except that the test below uses internal variables -// like `dbPool`. -type Executor interface { - Exec(ctx context.Context, query string, args ...interface{}) (pgconn.CommandTag, error) -} - -func TestTestTx(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - checkTestTable := func(executor Executor) error { - _, err := executor.Exec(ctx, "SELECT * FROM test_tx_table") - return err - } - - // Test cleanups are invoked in the order of last added, first called. When - // TestTx is called below it adds a cleanup, so we want to make sure that - // this cleanup, which checks that the database remains pristine, is invoked - // after the TestTx cleanup, so we add it first. - t.Cleanup(func() { - err := checkTestTable(dbPool) - require.Error(t, err) - - var pgErr *pgconn.PgError - require.ErrorAs(t, err, &pgErr) - require.Equal(t, pgerrcode.UndefinedTable, pgErr.Code) - }) - - tx := TestTx(ctx, t) - - _, err := tx.Exec(ctx, "CREATE TABLE test_tx_table (id bigint)") - require.NoError(t, err) - - err = checkTestTable(tx) - require.NoError(t, err) -} - -// Simulates a bunch of parallel processes starting a `TestTx` simultaneously. -// With the help of `go test -race`, should identify mutex/locking/parallel -// access problems if there are any. -// -// This test does NOT run in parallel on purpose because we want to be able to -// check access and set up on the `dbPool` global package variable which may be -// tainted if another test calls `TestTx` at the same time. -func TestTestTx_ConcurrentAccess(t *testing.T) { //nolint:paralleltest - var ( - ctx = context.Background() - wg sync.WaitGroup - ) - - wg.Add(int(dbPoolMaxConns)) - - // Before doing anything, zero out the pool because another test may have - // initialized it already. - dbPool = nil - - // Don't open more than maximum pool size transactions at once because that - // would deadlock. - for i := range dbPoolMaxConns { - workerNum := i - go func() { - _ = TestTx(ctx, t) - t.Logf("Opened transaction: %d", workerNum) - wg.Done() - }() - } - - wg.Wait() -} diff --git a/internal/riverinternaltest/sharedtx/shared_tx_test.go b/internal/riverinternaltest/sharedtx/shared_tx_test.go index 0ddd4795..595720ca 100644 --- a/internal/riverinternaltest/sharedtx/shared_tx_test.go +++ b/internal/riverinternaltest/sharedtx/shared_tx_test.go @@ -8,7 +8,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" ) func TestSharedTx(t *testing.T) { @@ -19,7 +19,7 @@ func TestSharedTx(t *testing.T) { setup := func(t *testing.T) *SharedTx { t.Helper() - return NewSharedTx(riverinternaltest.TestTx(ctx, t)) + return NewSharedTx(riverdbtest.TestTxPgx(ctx, t)) } t.Run("SharedTxFunctions", func(t *testing.T) { diff --git a/internal/testdb/db_with_pool.go b/internal/testdb/db_with_pool.go deleted file mode 100644 index 98081214..00000000 --- a/internal/testdb/db_with_pool.go +++ /dev/null @@ -1,88 +0,0 @@ -package testdb - -import ( - "context" - "errors" - "log/slog" - "sync" - "time" - - "github.com/jackc/pgx/v5/pgxpool" - "github.com/jackc/puddle/v2" -) - -// DBWithPool is a wrapper for a puddle resource for a test database. The -// database is made available via a preconfigured pgxpool.Pool. -type DBWithPool struct { - res *puddle.Resource[*poolWithDBName] - manager *Manager - dbName string - logger *slog.Logger - - closeOnce sync.Once -} - -// Release releases the DBWithPool back to the Manager. This should be called -// when the test is finished with the database. -func (db *DBWithPool) Release() { - db.closeOnce.Do(db.release) -} - -func (db *DBWithPool) release() { - db.logger.Debug("DBWithPool: release called", "dbName", db.dbName) - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - - if err := db.res.Value().pool.Ping(ctx); err != nil { - // If the pgx pool is already closed, Ping returns puddle.ErrClosedPool. - // When this happens, we need to re-create the pool. - if errors.Is(err, puddle.ErrClosedPool) { - db.logger.Debug("DBWithPool: pool is closed, re-creating", "dbName", db.dbName) - - if err := db.recreatePool(ctx); err != nil { - db.res.Destroy() - return - } - } else { - // Log any other ping error but proceed with cleanup. - db.logger.Debug("DBWithPool: pool ping returned error", "dbName", db.dbName, "err", err) - } - } - - if db.manager.cleanup != nil { - db.logger.Debug("DBWithPool: release calling cleanup", "dbName", db.dbName) - if err := db.manager.cleanup(ctx, db.res.Value().pool); err != nil { - db.logger.Error("testdb.DBWithPool: Error during release cleanup", "err", err) - - if err := db.recreatePool(ctx); err != nil { - db.res.Destroy() - return - } - } - db.logger.Debug("DBWithPool: release done with cleanup", "dbName", db.dbName) - } - - // Finally this resource is ready to be reused: - db.res.Release() -} - -// Pool returns the underlying pgxpool.Pool for the test database. -func (db *DBWithPool) Pool() *pgxpool.Pool { - return db.res.Value().pool -} - -func (db *DBWithPool) recreatePool(ctx context.Context) error { - db.logger.Debug("DBWithPool: recreatePool called", "dbName", db.dbName) - db.Pool().Close() - - newPgxPool, err := pgxpool.NewWithConfig(ctx, db.res.Value().config) - if err != nil { - db.res.Destroy() - db.logger.Error("DBWithPool: recreatePool failed", "dbName", db.dbName, "err", err) - return err - } - db.logger.Debug("DBWithPool: recreatePool succeeded", "dbName", db.dbName) - db.res.Value().pool = newPgxPool - return nil -} diff --git a/internal/testdb/manager.go b/internal/testdb/manager.go deleted file mode 100644 index 1033fd1d..00000000 --- a/internal/testdb/manager.go +++ /dev/null @@ -1,144 +0,0 @@ -package testdb - -import ( - "context" - "fmt" - "log/slog" - "os" - "sync" - - "github.com/jackc/pgx/v5/pgxpool" - "github.com/jackc/puddle/v2" -) - -type PrepareFunc func(ctx context.Context, pool *pgxpool.Pool) error - -type CleanupFunc func(ctx context.Context, pool *pgxpool.Pool) error - -type poolWithDBName struct { - pool *pgxpool.Pool - - // We will need to recreate the actual pool each time this DB is reused, so we - // need the config for creating it: - config *pgxpool.Config - - // dbName is needed to be able to drop the database in the destructor. - dbName string -} - -// Manager manages a pool of test databases up to a max size. Each DB keeps a -// pgxpool.Pool which is available when one is acquired from the Manager. -// Databases can optionally be prepared with a PrepareFunc before being added -// into the pool, and cleaned up with a CleanupFunc before being returned to the -// pool for reuse. -// -// This setup makes it trivial to run fully isolated tests in parallel. -type Manager struct { - pud *puddle.Pool[*poolWithDBName] - baseConfig *pgxpool.Config - cleanup CleanupFunc - logger *slog.Logger - prepare PrepareFunc - - mu sync.Mutex // protects nextDbNum - nextDBNum int -} - -// NewManager creates a new Manager with the given databaseURL, maxPoolSize, and -// optional prepare/cleanup funcs. -func NewManager(config *pgxpool.Config, maxPoolSize int32, prepare PrepareFunc, cleanup CleanupFunc) (*Manager, error) { - manager := &Manager{ - baseConfig: config, - cleanup: cleanup, - logger: slog.New(slog.NewTextHandler(os.Stdout, nil)), - prepare: prepare, - } - - pool, err := puddle.NewPool(&puddle.Config[*poolWithDBName]{ - Constructor: manager.allocatePool, - Destructor: manager.closePool, - MaxSize: maxPoolSize, - }) - if err != nil { - return nil, err - } - manager.pud = pool - return manager, nil -} - -// Acquire returns a DBWithPool which contains a pgxpool.Pool. The DBWithPool -// must be released after use. -func (m *Manager) Acquire(ctx context.Context) (*DBWithPool, error) { - m.logger.Debug("DBManager: Acquire called") - res, err := m.pud.Acquire(ctx) - if err != nil { - return nil, err - } - m.logger.Debug("DBManager: Acquire returned pool", "pool", res.Value().pool, "error", err, "dbName", res.Value().dbName) - - return &DBWithPool{res: res, logger: m.logger, manager: m, dbName: res.Value().dbName}, nil -} - -// Close closes the Manager and all of its databases + pools. It blocks until -// all those underlying resources are unused and closed. -func (m *Manager) Close() { - m.logger.Debug("DBManager: Close called") - m.pud.Close() - m.logger.Debug("DBManager: Close returned") -} - -func (m *Manager) allocatePool(ctx context.Context) (*poolWithDBName, error) { - nextDBNum := m.getNextDBNum() - dbName := fmt.Sprintf("%s_%d", m.baseConfig.ConnConfig.Database, nextDBNum) - - m.logger.Debug("Using test database", "name", dbName) - - newPoolConfig := m.baseConfig.Copy() - newPoolConfig.ConnConfig.Database = dbName - - pgxp, err := pgxpool.NewWithConfig(ctx, newPoolConfig) - if err != nil { - return nil, err - } - - if m.cleanup != nil { - m.logger.Debug("DBManager: allocatePool calling cleanup", "dbName", dbName) - if err := m.cleanup(ctx, pgxp); err != nil { - m.logger.Error("DBManager: error during allocatePool cleanup", "error", err) - pgxp.Close() - return nil, fmt.Errorf("error during cleanup: %w", err) - } - m.logger.Debug("DBManager: allocatePool cleanup returned", "dbName", dbName) - } - - if m.prepare != nil { - m.logger.Debug("DBManager: allocatePool calling prepare", "dbName", dbName) - if err = m.prepare(ctx, pgxp); err != nil { - pgxp.Close() - return nil, fmt.Errorf("error during prepare: %w", err) - } - m.logger.Debug("DBManager: allocatePool prepare returned", "dbName", dbName) - } - - return &poolWithDBName{ - config: newPoolConfig, - dbName: dbName, - pool: pgxp, - }, nil -} - -func (m *Manager) closePool(pwn *poolWithDBName) { - // Close the pool so that there are no active connections on the database: - m.logger.Debug("DBManager: closePool called", "pool", pwn.pool, "dbName", pwn.dbName) - pwn.pool.Close() - m.logger.Debug("DBManager: closePool returned") -} - -func (m *Manager) getNextDBNum() int { - m.mu.Lock() - defer m.mu.Unlock() - - nextNum := m.nextDBNum - m.nextDBNum++ - return nextNum -} diff --git a/internal/testdb/manager_test.go b/internal/testdb/manager_test.go deleted file mode 100644 index a553ac2e..00000000 --- a/internal/testdb/manager_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package testdb_test - -import ( - "context" - "os" - "strings" - "testing" - "time" - - "github.com/jackc/pgx/v5/pgxpool" - - "github.com/riverqueue/river/internal/testdb" -) - -func getTestDatabaseURL() string { - if envURL := os.Getenv("TEST_DATABASE_URL"); envURL != "" { - return envURL - } - return "postgres:///river_test?sslmode=disable" -} - -func testConfig(t *testing.T) *pgxpool.Config { - t.Helper() - - config, err := pgxpool.ParseConfig(getTestDatabaseURL()) - if err != nil { - t.Fatal(err) - } - return config -} - -func TestManager_AcquireMultiple(t *testing.T) { - t.Parallel() - - manager, err := testdb.NewManager(testConfig(t), 10, nil, nil) - if err != nil { - t.Fatal(err) - } - defer manager.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - pool0, err := manager.Acquire(ctx) - if err != nil { - t.Fatal(err) - } - defer pool0.Release() - - checkDBNameForPool(ctx, t, pool0, "river_test_") - - pool1, err := manager.Acquire(ctx) - if err != nil { - t.Fatal(err) - } - defer pool1.Release() - - checkDBNameForPool(ctx, t, pool1, "river_test_") - pool0.Release() - - // ensure we get db 0 back on subsequent acquire since it was released to the pool: - pool0Again, err := manager.Acquire(ctx) - if err != nil { - t.Fatal(err) - } - defer pool0Again.Release() - - checkDBNameForPool(ctx, t, pool0Again, "river_test_") - pool0Again.Release() - pool1.Release() - - manager.Close() -} - -func TestManager_ReleaseTwice(t *testing.T) { - t.Parallel() - - manager, err := testdb.NewManager(testConfig(t), 10, nil, nil) - if err != nil { - t.Fatal(err) - } - defer manager.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - tdb0, err := manager.Acquire(ctx) - if err != nil { - t.Fatal(err) - } - defer tdb0.Release() - - selectOne(ctx, t, tdb0) - - // explicitly close p0's pgxpool.Pool before Release to ensure it can be fully - // reused after release: - tdb0.Pool().Close() - t.Log("RELEASING P0") - // tdb0.Release() - // Calling release twice should be a no-op: - t.Log("RELEASING P0 AGAIN") - tdb0.Release() - - // ensure we get db 0 back on subsequent acquire since it was released to the pool: - t.Log("REACQUIRING P0") - tdb1, err := manager.Acquire(ctx) - if err != nil { - t.Fatal(err) - } - defer tdb1.Release() - - selectOne(ctx, t, tdb1) - tdb1.Release() - - t.Log("CALLING MANAGER CLOSE MANUALLY") - manager.Close() -} - -func checkDBNameForPool(ctx context.Context, t *testing.T, p *testdb.DBWithPool, expectedPrefix string) { - t.Helper() - - var currentDBName string - if err := p.Pool().QueryRow(ctx, "SELECT current_database()").Scan(¤tDBName); err != nil { - t.Fatal(err) - } - - if !strings.HasPrefix(currentDBName, expectedPrefix) { - t.Errorf("expected database name to begin with %q, got %q", expectedPrefix, currentDBName) - } -} - -func selectOne(ctx context.Context, t *testing.T, p *testdb.DBWithPool) { - t.Helper() - - var one int - if err := p.Pool().QueryRow(ctx, "SELECT 1").Scan(&one); err != nil { - t.Fatal(err) - } - - if one != 1 { - t.Errorf("expected 1, got %d", one) - } -} diff --git a/internal/util/chanutil/debounced_chan_test.go b/internal/util/chanutil/debounced_chan_test.go index b7e107dc..f8af7207 100644 --- a/internal/util/chanutil/debounced_chan_test.go +++ b/internal/util/chanutil/debounced_chan_test.go @@ -152,10 +152,10 @@ func TestDebouncedChan_ContinuousOperation(t *testing.T) { // into our total test time, and +1 for an initial fire. // // This almost always lands right on the expected number, but allow a delta - // of +/-2 to allow the channel to be off by two cycles in either direction. - // By running at `-count 1000` I can usually reproduce an off-by-one-or-two - // cycle. + // of +/-3 to allow the channel to be off by 3 cycles in either direction + // (mainly for CI). By running at `-count 1000` I can usually reproduce an + // off-by-one-or-two cycle. expectedNumSignal := int(math.Round(float64(testTime)/float64(cooldown))) + 1 t.Logf("Expected: %d, actual: %d", expectedNumSignal, numSignals) - require.InDelta(t, expectedNumSignal, numSignals, 2) + require.InDelta(t, expectedNumSignal, numSignals, 3) } diff --git a/internal/util/dbutil/db_util_test.go b/internal/util/dbutil/db_util_test.go index 50f51033..a514dd3c 100644 --- a/internal/util/dbutil/db_util_test.go +++ b/internal/util/dbutil/db_util_test.go @@ -1,4 +1,4 @@ -package dbutil +package dbutil_test import ( "context" @@ -6,7 +6,8 @@ import ( "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/internal/util/dbutil" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" ) @@ -15,10 +16,10 @@ func TestWithTx(t *testing.T) { t.Parallel() ctx := context.Background() - dbPool := riverinternaltest.TestDB(ctx, t) - driver := riverpgxv5.New(dbPool) + tx := riverdbtest.TestTxPgx(ctx, t) + driver := riverpgxv5.New(nil) - err := WithTx(ctx, driver.GetExecutor(), func(ctx context.Context, exec riverdriver.ExecutorTx) error { + err := dbutil.WithTx(ctx, driver.UnwrapExecutor(tx), func(ctx context.Context, exec riverdriver.ExecutorTx) error { _, err := exec.Exec(ctx, "SELECT 1") require.NoError(t, err) @@ -31,10 +32,10 @@ func TestWithTxV(t *testing.T) { t.Parallel() ctx := context.Background() - dbPool := riverinternaltest.TestDB(ctx, t) - driver := riverpgxv5.New(dbPool) + tx := riverdbtest.TestTxPgx(ctx, t) + driver := riverpgxv5.New(nil) - ret, err := WithTxV(ctx, driver.GetExecutor(), func(ctx context.Context, exec riverdriver.ExecutorTx) (int, error) { + ret, err := dbutil.WithTxV(ctx, driver.UnwrapExecutor(tx), func(ctx context.Context, exec riverdriver.ExecutorTx) (int, error) { _, err := exec.Exec(ctx, "SELECT 1") require.NoError(t, err) diff --git a/job_complete_tx.go b/job_complete_tx.go index a50d70d3..bd408e8b 100644 --- a/job_complete_tx.go +++ b/job_complete_tx.go @@ -53,7 +53,7 @@ func JobCompleteTx[TDriver riverdriver.Driver[TTx], TTx any, TArgs JobArgs](ctx } execTx := driver.UnwrapExecutor(tx) - params := riverdriver.JobSetStateCompleted(job.ID, time.Now(), nil) + params := riverdriver.JobSetStateCompleted(job.ID, client.baseService.Time.NowUTC(), nil) rows, err := pilot.JobSetStateIfRunningMany(ctx, execTx, &riverdriver.JobSetStateIfRunningManyParams{ ID: []int64{params.ID}, Attempt: []*int{params.Attempt}, @@ -62,6 +62,7 @@ func JobCompleteTx[TDriver riverdriver.Driver[TTx], TTx any, TArgs JobArgs](ctx MetadataDoMerge: []bool{hasMetadataUpdates}, MetadataUpdates: [][]byte{metadataUpdatesBytes}, ScheduledAt: []*time.Time{params.ScheduledAt}, + Schema: client.config.Schema, State: []rivertype.JobState{params.State}, }) if err != nil { diff --git a/job_complete_tx_test.go b/job_complete_tx_test.go index 692f1045..3ef999bb 100644 --- a/job_complete_tx_test.go +++ b/job_complete_tx_test.go @@ -11,7 +11,7 @@ import ( "github.com/riverqueue/river/internal/execution" "github.com/riverqueue/river/internal/jobexecutor" "github.com/riverqueue/river/internal/rivercommon" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riversharedtest" @@ -38,7 +38,7 @@ func TestJobCompleteTx(t *testing.T) { setup := func(ctx context.Context, t *testing.T) (context.Context, *testBundle) { t.Helper() - tx := riverinternaltest.TestTx(ctx, t) + tx := riverdbtest.TestTxPgx(ctx, t) client, err := NewClient(riverpgxv5.New(nil), &Config{ Logger: riversharedtest.Logger(t), }) diff --git a/job_list_params.go b/job_list_params.go index 356cadda..035b6121 100644 --- a/job_list_params.go +++ b/job_list_params.go @@ -171,6 +171,7 @@ type JobListParams struct { overrodeState bool paginationCount int32 queues []string + schema string sortField JobListOrderByField sortOrder SortOrder states []rivertype.JobState @@ -206,6 +207,7 @@ func (p *JobListParams) copy() *JobListParams { queues: append([]string(nil), p.queues...), sortField: p.sortField, sortOrder: p.sortOrder, + schema: p.schema, states: append([]rivertype.JobState(nil), p.states...), } } @@ -289,7 +291,7 @@ func (p *JobListParams) toDBParams() (*dblist.JobListParams, error) { conditionsBuilder.WriteString(condition) } - dbParams := &dblist.JobListParams{ + return &dblist.JobListParams{ Conditions: conditionsBuilder.String(), Kinds: p.kinds, LimitCount: p.paginationCount, @@ -297,10 +299,9 @@ func (p *JobListParams) toDBParams() (*dblist.JobListParams, error) { OrderBy: orderBy, Priorities: nil, Queues: p.queues, + Schema: p.schema, States: p.states, - } - - return dbParams, nil + }, nil } // After returns an updated filter set that will only return jobs diff --git a/plugin_test.go b/plugin_test.go index 955ff118..4cbf1b96 100644 --- a/plugin_test.go +++ b/plugin_test.go @@ -8,7 +8,7 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/baseservice" "github.com/riverqueue/river/rivershared/riverpilot" @@ -28,9 +28,16 @@ func TestClientDriverPlugin(t *testing.T) { setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - pluginDriver := newDriverWithPlugin(t, riverinternaltest.TestDB(ctx, t)) + var ( + config = newTestConfig(t, nil) + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + pluginDriver = newDriverWithPlugin(t, dbPool) + ) + config.Schema = schema - client, err := NewClient(pluginDriver, newTestConfig(t, nil)) + client, err := NewClient(pluginDriver, config) require.NoError(t, err) return client, &testBundle{ @@ -90,11 +97,18 @@ func TestClientPilotPlugin(t *testing.T) { setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - pluginDriver := newDriverWithPlugin(t, riverinternaltest.TestDB(ctx, t)) - pluginPilot := newPilotWithPlugin(t) + var ( + config = newTestConfig(t, nil) + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + pluginDriver = newDriverWithPlugin(t, dbPool) + pluginPilot = newPilotWithPlugin(t) + ) + config.Schema = schema pluginDriver.pilot = pluginPilot - client, err := NewClient(pluginDriver, newTestConfig(t, nil)) + client, err := NewClient(pluginDriver, config) require.NoError(t, err) return client, &testBundle{ diff --git a/producer.go b/producer.go index 26e2ce9e..2ead83d7 100644 --- a/producer.go +++ b/producer.go @@ -283,6 +283,7 @@ func (p *producer) StartWorkContext(fetchCtx, workCtx context.Context) error { return p.exec.QueueCreateOrSetUpdatedAt(ctx, &riverdriver.QueueCreateOrSetUpdatedAtParams{ Metadata: []byte("{}"), Name: p.config.Queue, + Schema: p.config.Schema, }) }() if err != nil { @@ -326,11 +327,7 @@ func (p *producer) StartWorkContext(fetchCtx, workCtx context.Context) error { controlSub *notifier.Subscription insertSub *notifier.Subscription ) - if p.config.Notifier == nil { - p.Logger.DebugContext(fetchCtx, p.Name+": No notifier configured; starting in poll mode", "client_id", p.config.ClientID) - - go p.pollForSettingChanges(fetchCtx, initiallyPaused, initialMetadata) - } else { + if p.config.Notifier != nil { var err error handleInsertNotification := func(topic notifier.NotificationTopic, payload string) { @@ -373,6 +370,18 @@ func (p *producer) StartWorkContext(fetchCtx, workCtx context.Context) error { p.Logger.DebugContext(fetchCtx, p.Name+": Run loop stopped", slog.String("queue", p.config.Queue), slog.Uint64("num_completed_jobs", p.numJobsRan.Load())) }() + var wg sync.WaitGroup + defer wg.Wait() + if p.config.Notifier == nil { + p.Logger.DebugContext(fetchCtx, p.Name+": No notifier configured; starting in poll mode", "client_id", p.config.ClientID) + + wg.Add(1) + go func() { + defer wg.Done() + p.pollForSettingChanges(fetchCtx, initiallyPaused, initialMetadata) + }() + } + if insertSub != nil { defer insertSub.Unlisten(fetchCtx) } @@ -503,7 +512,11 @@ func (p *producer) fetchAndRunLoop(fetchCtx, workCtx context.Context, fetchLimit case controlActionMetadataChanged: p.Logger.DebugContext(workCtx, p.Name+": Queue metadata changed", slog.String("queue", p.config.Queue), slog.String("queue_in_message", msg.Queue)) p.testSignals.MetadataChanged.Signal(struct{}{}) - if err := p.pilot.QueueMetadataChanged(workCtx, p.exec, p.state, msg.Metadata); err != nil { + if err := p.pilot.QueueMetadataChanged(workCtx, p.exec, &riverpilot.QueueMetadataChangedParams{ + Metadata: msg.Metadata, + Schema: p.config.Schema, + State: p.state, + }); err != nil { p.Logger.ErrorContext(workCtx, p.Name+": Error updating queue metadata with pilot", slog.String("queue", p.config.Queue), slog.String("err", err.Error())) } case controlActionPause: @@ -616,7 +629,11 @@ func (p *producer) finalizeShutdown(ctx context.Context) { ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - if err := p.pilot.ProducerShutdown(ctx, p.exec, p.id.Load(), p.state); err != nil { + if err := p.pilot.ProducerShutdown(ctx, p.exec, &riverpilot.ProducerShutdownParams{ + ProducerID: p.id.Load(), + Schema: p.config.Schema, + State: p.state, + }); err != nil { // Don't retry on these errors: // - context.Canceled: parent context is canceled, so retrying with a new timeout won't help // - ErrClosedPool: the database connection pool is closed, so retrying won't succeed @@ -684,6 +701,7 @@ func (p *producer) dispatchWork(workCtx context.Context, count int, fetchResultC jobs, err := p.pilot.JobGetAvailable(ctx, p.exec, p.state, &riverdriver.JobGetAvailableParams{ ClientID: p.config.ClientID, Max: count, + Now: p.Time.NowUTCOrNil(), Queue: p.config.Queue, ProducerID: p.id.Load(), Schema: p.config.Schema, @@ -752,7 +770,6 @@ func (p *producer) startNewExecutors(workCtx context.Context, jobs []*rivertype. InformProducerDoneFunc: p.handleWorkerDone, JobRow: job, SchedulerInterval: p.config.SchedulerInterval, - Schema: p.config.Schema, WorkUnit: workUnit, }) p.addActiveJob(job.ID, executor) @@ -914,6 +931,7 @@ func (p *producer) reportQueueStatusOnce(ctx context.Context) { _, err := p.exec.QueueCreateOrSetUpdatedAt(ctx, &riverdriver.QueueCreateOrSetUpdatedAtParams{ Metadata: []byte("{}"), Name: p.config.Queue, + Schema: p.config.Schema, }) if err != nil && errors.Is(context.Cause(ctx), startstop.ErrStop) { return diff --git a/producer_test.go b/producer_test.go index af981c48..0c55a61e 100644 --- a/producer_test.go +++ b/producer_test.go @@ -18,6 +18,7 @@ import ( "github.com/riverqueue/river/internal/rivercommon" "github.com/riverqueue/river/internal/riverinternaltest" "github.com/riverqueue/river/internal/riverinternaltest/sharedtx" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/baseservice" @@ -44,7 +45,7 @@ func Test_Producer_CanSafelyCompleteJobsWhileFetchingNewOnes(t *testing.T) { ctx := context.Background() require := require.New(t) - dbPool := riverinternaltest.TestDB(ctx, t) + dbPool := riversharedtest.DBPool(ctx, t) const maxJobCount = 10000 // This doesn't strictly mean that there are no more jobs left to process, @@ -55,16 +56,16 @@ func Test_Producer_CanSafelyCompleteJobsWhileFetchingNewOnes(t *testing.T) { archetype := riversharedtest.BaseServiceArchetype(t) config := newTestConfig(t, nil) - dbDriver := riverpgxv5.New(dbPool) - exec := dbDriver.GetExecutor() - schema := "" // try to make tests schema-based rather than database-based in the future - listener := dbDriver.GetListener(schema) + driver := riverpgxv5.New(dbPool) + exec := driver.GetExecutor() + schema := riverdbtest.TestSchema(ctx, t, driver, nil) + listener := driver.GetListener(schema) pilot := &riverpilot.StandardPilot{} - subscribeCh := make(chan []jobcompleter.CompleterJobUpdated, 100) - t.Cleanup(riverinternaltest.DiscardContinuously(subscribeCh)) + subscribeChan := make(chan []jobcompleter.CompleterJobUpdated, 100) + t.Cleanup(riverinternaltest.DiscardContinuously(subscribeChan)) - completer := jobcompleter.NewInlineCompleter(archetype, exec, &riverpilot.StandardPilot{}, subscribeCh) + completer := jobcompleter.NewInlineCompleter(archetype, schema, exec, &riverpilot.StandardPilot{}, subscribeChan) t.Cleanup(completer.Stop) type WithJobNumArgs struct { @@ -167,8 +168,7 @@ func TestProducer_PollOnly(t *testing.T) { driver = riverpgxv5.New(nil) pilot = &riverpilot.StandardPilot{} queueName = fmt.Sprintf("test-producer-poll-only-%05d", randutil.IntBetween(1, 100_000)) - schema = "" // try to make tests schema-based rather than database-based in the future - tx = riverinternaltest.TestTx(ctx, t) + tx = riverdbtest.TestTxPgx(ctx, t) ) // Wrap with a shared transaction because the producer fetching jobs may @@ -180,7 +180,7 @@ func TestProducer_PollOnly(t *testing.T) { jobUpdates = make(chan []jobcompleter.CompleterJobUpdated, 10) ) - completer := jobcompleter.NewInlineCompleter(archetype, exec, &riverpilot.StandardPilot{}, jobUpdates) + completer := jobcompleter.NewInlineCompleter(archetype, "", exec, &riverpilot.StandardPilot{}, jobUpdates) { require.NoError(t, completer.Start(ctx)) t.Cleanup(completer.Stop) @@ -203,7 +203,7 @@ func TestProducer_PollOnly(t *testing.T) { QueueReportInterval: queueReportIntervalDefault, RetryPolicy: &DefaultClientRetryPolicy{}, SchedulerInterval: riverinternaltest.SchedulerShortInterval, - Schema: schema, + Schema: "", StaleProducerRetentionPeriod: time.Minute, Workers: NewWorkers(), }), jobUpdates @@ -218,17 +218,17 @@ func TestProducer_WithNotifier(t *testing.T) { var ( archetype = riversharedtest.BaseServiceArchetype(t) - dbPool = riverinternaltest.TestDB(ctx, t) + dbPool = riversharedtest.DBPool(ctx, t) driver = riverpgxv5.New(dbPool) exec = driver.GetExecutor() jobUpdates = make(chan []jobcompleter.CompleterJobUpdated, 10) - schema = "" // try to make tests schema-based rather than database-based in the future + schema = riverdbtest.TestSchema(ctx, t, driver, nil) listener = driver.GetListener(schema) pilot = &riverpilot.StandardPilot{} queueName = fmt.Sprintf("test-producer-with-notifier-%05d", randutil.IntBetween(1, 100_000)) ) - completer := jobcompleter.NewInlineCompleter(archetype, exec, &riverpilot.StandardPilot{}, jobUpdates) + completer := jobcompleter.NewInlineCompleter(archetype, schema, exec, &riverpilot.StandardPilot{}, jobUpdates) { require.NoError(t, completer.Start(ctx)) t.Cleanup(completer.Stop) @@ -527,6 +527,7 @@ func testProducer(t *testing.T, makeProducer func(ctx context.Context, t *testin testfactory.Queue(ctx, t, bundle.exec, &testfactory.QueueOpts{ Name: ptrutil.Ptr(producer.config.Queue), PausedAt: ptrutil.Ptr(time.Now()), + Schema: producer.config.Schema, }) mustInsert(ctx, t, producer, bundle, &noOpArgs{}) @@ -567,7 +568,7 @@ func testProducer(t *testing.T, makeProducer func(ctx context.Context, t *testin })) if producer.config.Notifier != nil { // also emit notification: - emitQueueNotification(t, ctx, bundle.exec, queueNameToPause, "pause", nil) + emitQueueNotification(t, ctx, bundle.exec, producer.config.Schema, queueNameToPause, "pause", nil) } producer.testSignals.Paused.WaitOrTimeout() @@ -587,7 +588,7 @@ func testProducer(t *testing.T, makeProducer func(ctx context.Context, t *testin })) if producer.config.Notifier != nil { // also emit notification: - emitQueueNotification(t, ctx, bundle.exec, queueNameToPause, "resume", nil) + emitQueueNotification(t, ctx, bundle.exec, producer.config.Schema, queueNameToPause, "resume", nil) } producer.testSignals.Resumed.WaitOrTimeout() @@ -618,6 +619,7 @@ func testProducer(t *testing.T, makeProducer func(ctx context.Context, t *testin // Delete the queue by using a future-dated horizon: _, err := bundle.exec.QueueDeleteExpired(ctx, &riverdriver.QueueDeleteExpiredParams{ Max: 100, + Schema: producer.config.Schema, UpdatedAtHorizon: time.Now().Add(time.Minute), }) require.NoError(t, err) @@ -643,6 +645,7 @@ func testProducer(t *testing.T, makeProducer func(ctx context.Context, t *testin Metadata: newMetadata, MetadataDoUpdate: true, Name: producer.config.Queue, + Schema: producer.config.Schema, }) require.NoError(t, err) } @@ -652,7 +655,7 @@ func testProducer(t *testing.T, makeProducer func(ctx context.Context, t *testin if producer.config.Notifier != nil { // also emit notification: - emitQueueNotification(t, ctx, bundle.exec, producer.config.Queue, "metadata_changed", []byte(`{"foo":"bar","baz":123}`)) + emitQueueNotification(t, ctx, bundle.exec, producer.config.Schema, producer.config.Queue, "metadata_changed", []byte(`{"foo":"bar","baz":123}`)) } producer.testSignals.MetadataChanged.WaitOrTimeout() @@ -676,7 +679,7 @@ func testProducer(t *testing.T, makeProducer func(ctx context.Context, t *testin updateMetadata(differentMetadata) if producer.config.Notifier != nil { // also emit notification: - emitQueueNotification(t, ctx, bundle.exec, producer.config.Queue, "metadata_changed", differentMetadata) + emitQueueNotification(t, ctx, bundle.exec, producer.config.Schema, producer.config.Queue, "metadata_changed", differentMetadata) } // Should receive a metadata changed signal since the JSON is different: @@ -684,7 +687,7 @@ func testProducer(t *testing.T, makeProducer func(ctx context.Context, t *testin }) } -func emitQueueNotification(t *testing.T, ctx context.Context, exec riverdriver.Executor, queue, action string, metadata []byte) { +func emitQueueNotification(t *testing.T, ctx context.Context, exec riverdriver.Executor, schema, queue, action string, metadata []byte) { t.Helper() payload := map[string]any{ @@ -701,7 +704,7 @@ func emitQueueNotification(t *testing.T, ctx context.Context, exec riverdriver.E err = exec.NotifyMany(ctx, &riverdriver.NotifyManyParams{ Topic: string(notifier.NotificationTopicControl), Payload: []string{string(payloadBytes)}, - Schema: "", + Schema: schema, }) require.NoError(t, err) } diff --git a/recorded_output_test.go b/recorded_output_test.go index 2a70af82..21d039f6 100644 --- a/recorded_output_test.go +++ b/recorded_output_test.go @@ -10,7 +10,8 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" + "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riversharedtest" ) @@ -28,16 +29,27 @@ func Test_RecordedOutput(t *testing.T) { type testBundle struct { dbPool *pgxpool.Pool + schema string } setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) - config := newTestConfig(t, nil) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema + client := newTestClient(t, dbPool, config) t.Cleanup(func() { require.NoError(t, client.Stop(ctx)) }) - return client, &testBundle{dbPool: dbPool} + + return client, &testBundle{ + dbPool: dbPool, + schema: schema, + } } t.Run("ValidOutput", func(t *testing.T) { diff --git a/riverdbtest/riverdbtest.go b/riverdbtest/riverdbtest.go new file mode 100644 index 00000000..efd6fdb9 --- /dev/null +++ b/riverdbtest/riverdbtest.go @@ -0,0 +1,473 @@ +// Package riverdbtest contains testing infrastructure for the River project +// itself that creates isolated schemas suitable for use within a single case. +// +// This package is for internal use and should not be considered stable. Changes +// to functions and types in this package WILL NOT be considered breaking +// changes for purposes of River's semantic versioning. +package riverdbtest + +import ( + "context" + "errors" + "fmt" + "runtime" + "slices" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/jackc/pgx/v5" + "github.com/stretchr/testify/require" + + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivermigrate" + "github.com/riverqueue/river/rivershared/riversharedtest" + "github.com/riverqueue/river/rivershared/util/sliceutil" + "github.com/riverqueue/river/rivershared/util/testutil" +) + +const schemaDateFormat = "2006_01_02t15_04_05" // everything here needs to be lowercase because Postgres forces schema names to lowercase + +var ( + genSchemaBase sync.Once //nolint:gochecknoglobals + idleSchemas = make(map[string][]string) //nolint:gochecknoglobals + idleSchemasMu sync.Mutex //nolint:gochecknoglobals + initialCleanup sync.Once //nolint:gochecknoglobals + nextSchemaNum atomic.Int32 //nolint:gochecknoglobals + packageName string //nolint:gochecknoglobals + schemaBaseName string //nolint:gochecknoglobals + stats struct { //nolint:gochecknoglobals + numGenerated atomic.Int32 + numReused atomic.Int32 + } +) + +// TestSchemaOpts are options for TestSchema. Most of the time these can be left +// as nil. +type TestSchemaOpts struct { + // Lines are migration lines to run. By default, the migration lines + // specified by the driver's GetMigrationDefaultLines function are run. + // + // Set to an empty non-nil slice like `[]string{}` to run no migrations. + Lines []string + + // Schema will not be checked in for reuse at the end of tests. + noReuse bool + + // skipPackageNameCheck skips the check that package name doesn't resolve to + // `riverdbtest`. Normally we want this to make sure that we're skipping + // the right number of frames back to the caller package, but it needs to be + // skipped for tests _in_ `riverdbtest`. That's also why it's not + // exported. + skipPackageNameCheck bool + + skipExtraFrames int +} + +// TestSchema generates an isolated schema for use during a single test run. +// Migrations are run in the schema (this adds ~50 ms of overhead) to prepare it +// for River testing. AFter a test run, the schema in use is checked back into a +// pool for potential reuse. When a schema is reused, tables in TruncateTables +// are truncated to leave a clean state for the next test. +// +// Use of a schema doesn't happen in River clients automatically. TestSchema +// returns the name of a schema to use. This should be set in River clients, +// used as a parameter for testfactory functions, and anywhere else where +// database operations are issued. +// +// Despite reasonably fast migrations and efficient reuse, test schemas still +// have more overhead than a test transaction, so prefer the use of +// riversharedtest.TestTx where a full database isn't needed (TestSchema is +// commonly needed where tests rely on database notifications). +// +// tb is an interface that tolerates not having a cleanup hook so constructs +// like testutil.PanicTB can be used. If Cleanup isn't available, schemas aren't +// checked back in for reuse. +// +// Where Cleanup is available, the function also performs a post-flight check +// that makes sure all tables in TruncateTables are empty. This helps detect +// problems where test cases accidentally inject data into the default schema +// rather than the one returned by this function. +func TestSchema[TTx any](ctx context.Context, tb testutil.TestingTB, driver riverdriver.Driver[TTx], opts *TestSchemaOpts) string { + tb.Helper() + + require.NotNil(tb, driver, "driver should not be nil") + + if opts == nil { + opts = &TestSchemaOpts{} + } + + // An initial pass to calculate a friendly package name that'll be used to + // prefix this package's schemas so that it won't clash with packages + // running their own tests in parallel. Generated name is like `river` or + // `jobcompleter` or `riverpro`. + genSchemaBase.Do(func() { + var ( + programCounterAddr, _, _, _ = runtime.Caller(4 + opts.skipExtraFrames) // skip `TestSchema.func1` (closure) + `sync.(*Once).doSlow` + `sync.(*Once).Do` + `TestSchema` and end up at `TestSchema`'s caller + funcName = runtime.FuncForPC(programCounterAddr).Name() // like: github.com/riverqueue/river.Test_Client.func1 + ) + + packageName = packageFromFunc(funcName) // like: `river` (or `jobcompleter`, or `riverpro`) + + // Check to make sure we're skipping the right number of frames above. + // If the location of `runtime.Caller` is changed at all (a single new + // function is added to the stack), the reported package will be + // completely wrong, so we try to take precautions about it. + if packageName == "riverdbtest" && !opts.skipPackageNameCheck { + panic("package name should not resolve to riverdbtest") + } + + // Notification topics are prefixed with schemas. The max Postgres + // length of topics is 63, and "river_leadership" is the longest topic + // name. If the package suffix is longer than the max that could fit + // into 63 when combined with a schema name and leadership, trim it down + // a bit. The only package where this is needed as I write this is + // `riverencrypt_test`. If this happens in too many places we may want + // to trim "_schema_" to an abbreviation or shorten "river_leadership". + const maxLength = 63 - len("_2025_04_20t16_00_20_schema_01.river_leadership") - 1 + if len(packageName) > maxLength { + packageName = packageName[0:maxLength] + } + + schemaBaseName = packageName + "_" + time.Now().Format(schemaDateFormat) + "_schema_" + }) + + exec := driver.GetExecutor() + + // Schemas aren't dropped after a package test run. Instead, we drop them + // before starting a test run. This happens in a `sync.Once` to minimize the + // amount of work that needs to be done (it has to run once, but all other + // TestSchema invocations skip it). + initialCleanup.Do(func() { + expiredSchemas := func() []string { + // We only expire schemas in our package prefix (e.g. `river_*`) so + // that in case other package tests are running in parallel we don't + // end up contending with them as they also try to clean their old + // schemas. + expiredSchemas, err := driver.GetExecutor().SchemaGetExpired(ctx, &riverdriver.SchemaGetExpiredParams{ + BeforeName: schemaBaseName, + Prefix: packageName + "_%", + }) + require.NoError(tb, err) + + return expiredSchemas + }() + + start := time.Now() + + for _, schema := range expiredSchemas { + _, err := exec.Exec(ctx, fmt.Sprintf("DROP SCHEMA %s CASCADE", schema)) + require.NoError(tb, err) + } + + tb.Logf("Dropped %d expired schema(s) in %s", len(expiredSchemas), time.Since(start)) + }) + + lines := driver.GetMigrationDefaultLines() + if opts.Lines != nil { + lines = opts.Lines + } + + // Idle schemas must be managed by which migration lines were run within + // them. i.e. A schema with no migrations obviously cannot be reused for a + // test expecting the `main` migration line. + // + // linesKey acts as key specific to this migrations set for idleSchemas. + slices.Sort(lines) + linesKey := strings.Join(lines, ",") + + // All tables to truncate when reusing a schema for this set of lines. Also + // used to perform the post-flight cleanup check to make sure tests didn't + // leave any detritus in the default schema. + var truncateTables []string + for _, line := range lines { + truncateTables = append(truncateTables, driver.GetMigrationTruncateTables(line)...) + } + + // See if there are any idle schemas that were previously generated during + // this run and have since been checked back into the pool. If so, pop it + // off and run cleanup on it. If not, continue on to generating a new schema + // below. This function never blocks, so we'll prefer generating extra + // schemas rather than optimizing amongst a minimal set that's already there. + if schema := func() string { + idleSchemasMu.Lock() + defer idleSchemasMu.Unlock() + + linesIdleSchemas := idleSchemas[linesKey] + + if len(linesIdleSchemas) < 1 { + return "" + } + + schema := linesIdleSchemas[0] + idleSchemas[linesKey] = linesIdleSchemas[1:] + return schema + }(); schema != "" { + start := time.Now() + + if len(truncateTables) > 0 { + _, err := exec.Exec(ctx, "TRUNCATE TABLE "+ + strings.Join( + sliceutil.Map( + truncateTables, + func(table string) string { return schema + "." + table }, + ), + ", ", + ), + ) + require.NoError(tb, err) + } + + tb.Logf("Reusing idle schema %q after cleaning in %s [%d generated] [%d reused]", + schema, time.Since(start), stats.numGenerated.Load(), stats.numReused.Add(1)) + + return schema + } + + // e.g. river_2025_04_14t22_13_58_schema_10 + schema := schemaBaseName + fmt.Sprintf("%02d", nextSchemaNum.Add(1)) + + _, err := exec.Exec(ctx, "CREATE SCHEMA "+schema) + require.NoError(tb, err) + + for _, line := range lines { + // Migrate the new schema. This takes somewhere in the neighborhood of 10 to + // 50ms on my machine which is already pretty fast, but we still prefer to + // use an already created schema if available. + migrator, err := rivermigrate.New(driver, &rivermigrate.Config{ + Line: line, + Logger: riversharedtest.LoggerWarn(tb), // set to warn level to make migrate logs a little quieter since as we'll be migrating a lot + Schema: schema, + }) + require.NoError(tb, err) + + start := time.Now() + + migrateRes, err := migrator.Migrate(ctx, rivermigrate.DirectionUp, &rivermigrate.MigrateOpts{}) + require.NoError(tb, err) + + tb.Logf("Generated schema %q with migrations %+v on line %q in %s [%d generated] [%d reused]", + schema, + sliceutil.Map(migrateRes.Versions, func(v rivermigrate.MigrateVersion) int { return v.Version }), + line, + time.Since(start), + stats.numGenerated.Add(1), + stats.numReused.Load(), + ) + } + + // Use an interface here so that callers can pass in `testutil.PanicTB`, + // which doesn't have a Cleanup implementation, but also won't care about + // having to check schemas back in (it's used in example tests). + type testingTBWithCleanup interface { + Cleanup(cleanupFunc func()) + } + + if withCleanup, ok := tb.(testingTBWithCleanup); ok { + if !opts.noReuse { + withCleanup.Cleanup(func() { + idleSchemasMu.Lock() + defer idleSchemasMu.Unlock() + + idleSchemas[linesKey] = append(idleSchemas[linesKey], schema) + + tb.Logf("Checked in schema %q; %d idle schema(s) [%d generated] [%d reused]", + schema, len(idleSchemas), stats.numGenerated.Load(), stats.numReused.Load()) + }) + } + } else { + tb.Logf("tb does not implement Cleanup; schema not checked in for reuse") + } + + return schema +} + +// Gets a "friendly package name" from a fully qualified function name. +// +// Most effectively demonstrated by example: +// +// - `github.com/riverqueue/river.Test_Client.func1` -> `river` +// - `github.com/riverqueue/river/internal/jobcompleter.testCompleterWait` -> `jobcompleter` +// +// This is then used as a root for constructive schema names. It's convenient +// because it's not too long (schemas have a max length of 64 characters), human +// friendly, and won't have any special characters. +func packageFromFunc(funcName string) string { + var ( + packagePathLastSlashIndex = strings.LastIndex(funcName, "/") // index of last slash in path, so starting at `/river.Test_Client.func1` + funcNameFromLastSlash = funcName[packagePathLastSlashIndex+1:] // like: `/river.Test_Client.func1` + packageName, _, _ = strings.Cut(funcNameFromLastSlash, ".") // cut around first dot to extract `river` + ) + + return packageName +} + +// TestTx starts a test transaction that's rolled back automatically as the test +// case is cleaning itself up. +// +// This variant starts a transaction for the standard pgx/v5 driver most +// commonly used throughout most of River. +func TestTxPgx(ctx context.Context, tb testing.TB) pgx.Tx { + tb.Helper() + + return TestTx(ctx, tb, riverpgxv5.New(riversharedtest.DBPool(ctx, tb)), &TestTxOpts{ + IsTestTxHelper: true, + }) +} + +// TestTxOpts are options for TestTx. Most of the time these can be left as nil. +type TestTxOpts struct { + // IsTestTxHelper should be set to true for if TestTx is being called from + // within a secondary helper that's in a common testing package. This causes + // an extra stack frame to be skipped when determining the name of the test + // schema being used for test transactions. So instead of `riverdbtest` or + // `riverprodbtest` we get the real name of the package being tested (e.g. + // `river` or `riverpro`). + IsTestTxHelper bool + + // Lines are migration lines to run. By default, the migration lines + // specified by the driver's GetMigrationDefaultLines function are run. + // + // Set to an empty non-nil slice like `[]string{}` to run no migrations. + // + // This is currently not exported because it hasn't been needed anywhere yet + // for test transactions. + lines []string + + // skipPackageNameCheck skips the check that package name doesn't resolve to + // `riverdbtest`. Normally we want this to make sure that we're skipping + // the right number of frames back to the caller package, but it needs to be + // skipped for tests _in_ `riverdbtest`. That's also why it's not exported. + skipPackageNameCheck bool +} + +// TestTx starts a test transaction that's rolled back automatically as the test +// case is cleaning itself up. +// +// The function invokes TestSchema to create a single schema where this test +// transaction and all future test transactions for this package test run will +// run. +// +// `search_path` is set to the name of the transaction schema so that it's not +// necessary to specify an explicit schema for database operations. (This is +// somewhat of a legacy decision +// +// The included driver determines what migrations are run to prepare the test +// transaction schema. +func TestTx[TTx any](ctx context.Context, tb testing.TB, driver riverdriver.Driver[TTx], opts *TestTxOpts) TTx { + tb.Helper() + + schema := testTxSchemaForMigrationLines(ctx, tb, driver, opts) + tb.Logf("TestTx using schema: " + schema) + + tx, err := driver.GetExecutor().Begin(ctx) + require.NoError(tb, err) + + _, err = tx.Exec(ctx, "SET search_path TO '"+schema+"'") + require.NoError(tb, err) + + tb.Cleanup(func() { + // Tests may inerit context from `t.Context()` which is cancelled after + // tests run and before calling clean up. We need a non-cancelled + // context to issue rollback here, so use a bit of a bludgeon to do so + // with `context.WithoutCancel()`. + ctx := context.WithoutCancel(ctx) + + err := tx.Rollback(ctx) + + if err == nil { + return + } + + // Try to look for an error on rollback because it does occasionally + // reveal a real problem in the way a test is written. However, allow + // tests to roll back their transaction early if they like, so ignore + // `ErrTxClosed`. + if errors.Is(err, pgx.ErrTxClosed) { + return + } + + // In case of a cancelled context during a database operation, which + // happens in many tests, pgx seems to not only roll back the + // transaction, but closes the connection, and returns this error on + // rollback. Allow this error since it's hard to prevent it in our flows + // that use contexts heavily. + if err.Error() == "conn closed" { + return + } + + // Similar to the above, but a newly appeared error that wraps the + // above. As far as I can tell, no error variables are available to use + // with `errors.Is`. + if err.Error() == "failed to deallocate cached statement(s): conn closed" { + return + } + + require.NoError(tb, err) + }) + + return driver.UnwrapTx(tx) +} + +var ( + testTxSchemas = make(map[string]string) //nolint:gochecknoglobals + testTxSchemasMu sync.RWMutex //nolint:gochecknoglobals +) + +func testTxSchemaForMigrationLines[TTx any](ctx context.Context, tb testing.TB, driver riverdriver.Driver[TTx], opts *TestTxOpts) string { + tb.Helper() + + if opts == nil { + opts = &TestTxOpts{} + } + + lines := driver.GetMigrationDefaultLines() + if opts.lines != nil { + lines = opts.lines + } + + // Transaction schemas must be managed by which migration lines were run + // within them, which is determined by the included driver. i.e. A schema + // with no migrations obviously cannot be reused for a test expecting the + // `main` migration line. + // + // linesKey acts as key specific to this migrations set for testTxSchemas. + slices.Sort(lines) + linesKey := strings.Join(lines, ",") + + testTxSchemasMu.RLock() + schema := testTxSchemas[linesKey] + testTxSchemasMu.RUnlock() + + if schema != "" { + return schema + } + + testTxSchemasMu.Lock() + defer testTxSchemasMu.Unlock() + + // Check for a schema once more in case there was a race to acquire the + // mutex lock and another TestTx invocation did it first. + if schema = testTxSchemas[linesKey]; schema != "" { + return schema + } + + // If called from a transaction helper like `TestTxPgx`, skip one more frame + // for purposes of schema naming. + skipExtraFrames := 2 + if opts.IsTestTxHelper { + skipExtraFrames++ + } + + schema = TestSchema(ctx, tb, driver, &TestSchemaOpts{ + Lines: lines, + noReuse: true, + skipExtraFrames: skipExtraFrames, + skipPackageNameCheck: opts.skipPackageNameCheck, + }) + testTxSchemas[linesKey] = schema + return schema +} diff --git a/riverdbtest/riverdbtest_test.go b/riverdbtest/riverdbtest_test.go new file mode 100644 index 00000000..c9d4c4d5 --- /dev/null +++ b/riverdbtest/riverdbtest_test.go @@ -0,0 +1,249 @@ +package riverdbtest + +import ( + "context" + "sync" + "testing" + + "github.com/jackc/pgerrcode" + "github.com/jackc/pgx/v5/pgconn" + "github.com/stretchr/testify/require" + + "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" + "github.com/riverqueue/river/rivershared/testfactory" + "github.com/riverqueue/river/rivershared/util/ptrutil" + "github.com/riverqueue/river/rivertype" +) + +func TestTestSchema(t *testing.T) { + t.Parallel() + + var ( + ctx = context.Background() + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + exec = driver.GetExecutor() + ) + + // Always use this set of options on the first invocation of TestSchema in + // each test. Makes sure that the initial check that package name isn't + // `riverdbtest` is skipped, but it's only needed once because the + // check's done in a `sync.Once`. Must be used in every test case because + // we're using `t.Parallel()` and any test could win the first run race. + firstInvocationOpts := &TestSchemaOpts{skipPackageNameCheck: true} + + t.Run("BasicExerciseAndVisibility", func(t *testing.T) { + t.Parallel() + + schema1 := TestSchema(ctx, t, driver, firstInvocationOpts) + require.Regexp(t, `\Ariverdbtest_`, schema1) + + schema2 := TestSchema(ctx, t, driver, nil) + require.Regexp(t, `\Ariverdbtest_`, schema2) + + require.NotEqual(t, schema1, schema2) + + job1 := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("schema1_job"), Schema: schema1}) + job2 := testfactory.Job(ctx, t, exec, &testfactory.JobOpts{Kind: ptrutil.Ptr("schema2_job"), Schema: schema2}) + + // Notably, the jobs will actually share an ID because the schemas are + // brand new an the ID sequence will start from 1 in each one every time + // this package's tests are run. They'll start at 1 on the first test + // run, but will increase if `-count` is issued because schemas will + // start being reused. + // + // Know about this shared ID is important because it implies we cannot + // compare jobs just by ID below. We have to check that another + // property like their kind also matches. + require.Equal(t, job1.ID, job2.ID) + + // Each job is found in its appropriate schema. Make sure to check kind + // because as above, IDs will be identical. + { + fetchedJob1, err := exec.JobGetByID(ctx, &riverdriver.JobGetByIDParams{ID: job1.ID, Schema: schema1}) + require.NoError(t, err) + require.Equal(t, "schema1_job", fetchedJob1.Kind) + + fetchedJob2, err := exec.JobGetByID(ctx, &riverdriver.JobGetByIDParams{ID: job2.ID, Schema: schema2}) + require.NoError(t, err) + require.Equal(t, "schema2_job", fetchedJob2.Kind) + } + + // Essentially the same check as above, but just looking that jobs are + // found in each schema by their appropriate kind. + { + fetchedJobs1, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{Kind: []string{"schema1_job"}, Schema: schema1}) + require.NoError(t, err) + require.Len(t, fetchedJobs1, 1) + + fetchedJobs2, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{Kind: []string{"schema2_job"}, Schema: schema2}) + require.NoError(t, err) + require.Len(t, fetchedJobs2, 1) + } + + // Invert the schemas on each check to show that no jobs intended for + // the other schema are found in each other's schema. + { + fetchedJobs1, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{Kind: []string{"schema1_job"}, Schema: schema2}) + require.NoError(t, err) + require.Empty(t, fetchedJobs1) + + fetchedJobs2, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{Kind: []string{"schema2_job"}, Schema: schema1}) + require.NoError(t, err) + require.Empty(t, fetchedJobs2) + } + }) + + t.Run("EmptyLines", func(t *testing.T) { + t.Parallel() + + var schema string + + t.Run("FirstCheckout", func(t *testing.T) { + schema = TestSchema(ctx, t, driver, &TestSchemaOpts{ + Lines: []string{}, // non-nil empty indicates no migrations should be run + skipPackageNameCheck: true, + }) + + _, err := exec.JobGetByID(ctx, &riverdriver.JobGetByIDParams{ID: 1, Schema: schema}) + var pgErr *pgconn.PgError + require.ErrorAs(t, err, &pgErr) + require.Equal(t, pgerrcode.UndefinedTable, pgErr.Code) + }) + + // Get another empty schema to make sure that truncating tables with an + // empty migration line works. This schema is reused because the subtest + // above will have checked its schema back in when its cleanup hook + // runs. + nextSchema := TestSchema(ctx, t, driver, &TestSchemaOpts{ + Lines: []string{}, + }) + require.Equal(t, schema, nextSchema) + }) +} + +func TestPackageFromFunc(t *testing.T) { + t.Parallel() + + require.Equal(t, "river", packageFromFunc("github.com/riverqueue/river.Test_Client.func1")) + require.Equal(t, "jobcompleter", packageFromFunc("github.com/riverqueue/river/internal/jobcompleter.testCompleterWait")) +} + +func TestTestTx(t *testing.T) { + t.Parallel() + + var ( + ctx = context.Background() + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + ) + + t.Run("TransactionVisibility", func(t *testing.T) { + t.Parallel() + + type PoolOrTx interface { + Exec(ctx context.Context, sql string, arguments ...any) (commandTag pgconn.CommandTag, err error) + } + + checkTestTable := func(ctx context.Context, poolOrTx PoolOrTx) error { + _, err := poolOrTx.Exec(ctx, "SELECT * FROM river_shared_test_tx_table") + return err + } + + // Test cleanups are invoked in the order of last added, first called. + // When TestTx is called below it adds a cleanup, so we want to make + // sure that this cleanup, which checks that the database remains + // pristine, is invoked after the TestTx cleanup, so we add it first. + t.Cleanup(func() { + // Tests may inherit context from `t.Context()` which is cancelled + // after tests run and before calling clean up. We need a + // non-cancelled context to issue rollback here, so use a bit of a + // bludgeon to do so with `context.WithoutCancel()`. + ctx := context.WithoutCancel(ctx) + + err := checkTestTable(ctx, dbPool) + require.Error(t, err) + + var pgErr *pgconn.PgError + require.ErrorAs(t, err, &pgErr) + require.Equal(t, pgerrcode.UndefinedTable, pgErr.Code) + }) + + tx := TestTx(ctx, t, driver, &TestTxOpts{skipPackageNameCheck: true}) + + _, err := tx.Exec(ctx, "CREATE TABLE river_shared_test_tx_table (id bigint)") + require.NoError(t, err) + + err = checkTestTable(ctx, tx) + require.NoError(t, err) + }) + + t.Run("EmptyLines", func(t *testing.T) { + t.Parallel() + + { + tx := TestTx(ctx, t, driver, &TestTxOpts{ + lines: []string{}, // non-nil empty indicates no migrations should be run + skipPackageNameCheck: true, + }) + + _, err := driver.UnwrapExecutor(tx).JobGetByID(ctx, &riverdriver.JobGetByIDParams{ID: 1, Schema: ""}) + var pgErr *pgconn.PgError + require.ErrorAs(t, err, &pgErr) + require.Equal(t, pgerrcode.UndefinedTable, pgErr.Code) + } + + // Get another test transaction with empty schema to make sure that + // rollback with an empty migration line works. This schema is reused + // because the subtest above will have added to test transaction schema + // to testTxSchemas. + { + tx := TestTx(ctx, t, driver, &TestTxOpts{ + lines: []string{}, + }) + _, err := driver.UnwrapExecutor(tx).JobGetByID(ctx, &riverdriver.JobGetByIDParams{ID: 1, Schema: ""}) + var pgErr *pgconn.PgError + require.ErrorAs(t, err, &pgErr) + require.Equal(t, pgerrcode.UndefinedTable, pgErr.Code) + } + + // A test transaction with default options uses the main schema and has a jobs table. + { + tx := TestTx(ctx, t, driver, nil) + _, err := driver.UnwrapExecutor(tx).JobGetByID(ctx, &riverdriver.JobGetByIDParams{ID: 1, Schema: ""}) + require.ErrorIs(t, rivertype.ErrNotFound, err) + } + }) + + // Simulates a bunch of parallel processes using `TestTx` simultaneously. + // With the help of `go test -race`, should identify mutex/locking/parallel + // access problems if there are any. + t.Run("ConcurrentAccess", func(t *testing.T) { + t.Parallel() + + // Don't open more than maximum pool size transactions at once because + // that would deadlock. + const numGoroutines = 4 + + var ( + ctx = context.Background() + wg sync.WaitGroup + ) + + dbPool := riversharedtest.DBPoolClone(ctx, t) + + wg.Add(4) + for i := range numGoroutines { + workerNum := i + go func() { + _ = TestTx(ctx, t, riverpgxv5.New(dbPool), &TestTxOpts{skipPackageNameCheck: true}) + t.Logf("Opened transaction: %d", workerNum) + wg.Done() + }() + } + + wg.Wait() + }) +} diff --git a/riverdriver/river_driver_interface.go b/riverdriver/river_driver_interface.go index ba5b03bc..5315b911 100644 --- a/riverdriver/river_driver_interface.go +++ b/riverdriver/river_driver_interface.go @@ -55,6 +55,14 @@ type Driver[TTx any] interface { // API is not stable. DO NOT USE. GetListener(schema string) Listener + // GetMigrationDefaultLines gets default migration lines that should be + // applied when using this driver. This is mainly used by riverdbtest to + // figure out what migration lines should be available by default for new + // test schemas. + // + // API is not stable. DO NOT USE. + GetMigrationDefaultLines() []string + // GetMigrationFS gets a filesystem containing migrations for the driver. // // Each set of migration files is expected to exist within the filesystem as @@ -71,6 +79,14 @@ type Driver[TTx any] interface { // API is not stable. DO NOT USE. GetMigrationLines() []string + // GetMigrationTruncateTables gets the tables that should be truncated + // before or after tests for a specific migration line returned by this + // driver. Tables to truncate doesn't need to consider intermediary states, + // and should return tables for the latest migration version. + // + // API is not stable. DO NOT USE. + GetMigrationTruncateTables(line string) []string + // HasPool returns true if the driver is configured with a database pool. // // API is not stable. DO NOT USE. @@ -86,6 +102,8 @@ type Driver[TTx any] interface { // // API is not stable. DO NOT USE. UnwrapExecutor(tx TTx) ExecutorTx + + UnwrapTx(execTx ExecutorTx) TTx } // Executor provides River operations against a database. It may be a database @@ -165,6 +183,8 @@ type Executor interface { QueuePause(ctx context.Context, params *QueuePauseParams) error QueueResume(ctx context.Context, params *QueueResumeParams) error QueueUpdate(ctx context.Context, params *QueueUpdateParams) (*rivertype.Queue, error) + QueryRow(ctx context.Context, sql string, args ...any) Row + SchemaGetExpired(ctx context.Context, params *SchemaGetExpiredParams) ([]string, error) // TableExists checks whether a table exists for the schema in the current // search schema. @@ -363,6 +383,7 @@ type JobSetStateIfRunningParams struct { MetadataDoMerge bool MetadataUpdates []byte ScheduledAt *time.Time + Schema string // added by completer State rivertype.JobState } @@ -622,6 +643,19 @@ type QueueUpdateParams struct { Schema string } +type Row interface { + Scan(dest ...any) error +} + +type Schema struct { + Name string +} + +type SchemaGetExpiredParams struct { + BeforeName string + Prefix string +} + type TableExistsParams struct { Schema string Table string diff --git a/riverdriver/river_driver_interface_test.go b/riverdriver/river_driver_interface_test.go index 5036f81d..0f9ff628 100644 --- a/riverdriver/river_driver_interface_test.go +++ b/riverdriver/river_driver_interface_test.go @@ -24,6 +24,7 @@ func TestJobSetStateCancelled(t *testing.T) { require.True(t, result.FinalizedAt.Equal(finalizedAt), "expected FinalizedAt to equal %v, got %v", finalizedAt, result.FinalizedAt) require.Nil(t, result.MetadataUpdates) require.False(t, result.MetadataDoMerge) + require.Empty(t, result.Schema) require.Equal(t, rivertype.JobStateCancelled, result.State) }) @@ -41,6 +42,7 @@ func TestJobSetStateCancelled(t *testing.T) { require.True(t, result.FinalizedAt.Equal(finalizedAt), "expected FinalizedAt to equal %v, got %v", finalizedAt, result.FinalizedAt) require.Equal(t, metadata, result.MetadataUpdates) require.True(t, result.MetadataDoMerge) + require.Empty(t, result.Schema) require.Equal(t, rivertype.JobStateCancelled, result.State) }) } @@ -60,6 +62,7 @@ func TestJobSetStateCompleted(t *testing.T) { require.True(t, result.FinalizedAt.Equal(finalizedAt), "expected FinalizedAt to equal %v, got %v", finalizedAt, result.FinalizedAt) require.False(t, result.MetadataDoMerge) require.Nil(t, result.MetadataUpdates) + require.Empty(t, result.Schema) require.Equal(t, rivertype.JobStateCompleted, result.State) }) @@ -75,6 +78,7 @@ func TestJobSetStateCompleted(t *testing.T) { require.True(t, result.FinalizedAt.Equal(finalizedAt)) require.True(t, result.MetadataDoMerge) require.Equal(t, metadata, result.MetadataUpdates) + require.Empty(t, result.Schema) require.Equal(t, rivertype.JobStateCompleted, result.State) }) } @@ -95,6 +99,7 @@ func TestJobSetStateDiscarded(t *testing.T) { require.True(t, result.FinalizedAt.Equal(finalizedAt)) require.False(t, result.MetadataDoMerge) require.Nil(t, result.MetadataUpdates) + require.Empty(t, result.Schema) require.Equal(t, rivertype.JobStateDiscarded, result.State) }) @@ -112,6 +117,7 @@ func TestJobSetStateDiscarded(t *testing.T) { require.True(t, result.FinalizedAt.Equal(finalizedAt)) require.Equal(t, metadata, result.MetadataUpdates) require.True(t, result.MetadataDoMerge) + require.Empty(t, result.Schema) require.Equal(t, rivertype.JobStateDiscarded, result.State) }) } @@ -132,6 +138,7 @@ func TestJobSetStateErrorAvailable(t *testing.T) { require.Nil(t, result.MetadataUpdates) require.NotNil(t, result.ScheduledAt) require.True(t, result.ScheduledAt.Equal(scheduledAt)) + require.Empty(t, result.Schema) require.Equal(t, rivertype.JobStateAvailable, result.State) }) @@ -148,6 +155,7 @@ func TestJobSetStateErrorAvailable(t *testing.T) { require.Equal(t, metadata, result.MetadataUpdates) require.NotNil(t, result.ScheduledAt) require.True(t, result.ScheduledAt.Equal(scheduledAt)) + require.Empty(t, result.Schema) require.Equal(t, errData, result.ErrData) }) } @@ -168,6 +176,7 @@ func TestJobSetStateErrorRetryable(t *testing.T) { require.NotNil(t, result.ScheduledAt) require.True(t, result.ScheduledAt.Equal(scheduledAt)) require.Equal(t, errData, result.ErrData) + require.Empty(t, result.Schema) require.Equal(t, rivertype.JobStateRetryable, result.State) }) @@ -184,6 +193,7 @@ func TestJobSetStateErrorRetryable(t *testing.T) { require.Equal(t, metadata, result.MetadataUpdates) require.NotNil(t, result.ScheduledAt) require.True(t, result.ScheduledAt.Equal(scheduledAt)) + require.Empty(t, result.Schema) require.Equal(t, errData, result.ErrData) }) } @@ -205,6 +215,7 @@ func TestJobSetStateSnoozed(t *testing.T) { //nolint:dupl require.Nil(t, result.MetadataUpdates) require.NotNil(t, result.ScheduledAt) require.True(t, result.ScheduledAt.Equal(scheduledAt)) + require.Empty(t, result.Schema) require.Equal(t, rivertype.JobStateScheduled, result.State) }) @@ -222,6 +233,7 @@ func TestJobSetStateSnoozed(t *testing.T) { //nolint:dupl require.Equal(t, metadata, result.MetadataUpdates) require.NotNil(t, result.ScheduledAt) require.True(t, result.ScheduledAt.Equal(scheduledAt)) + require.Empty(t, result.Schema) require.Equal(t, rivertype.JobStateScheduled, result.State) }) } @@ -243,6 +255,7 @@ func TestJobSetStateSnoozedAvailable(t *testing.T) { //nolint:dupl require.Nil(t, result.MetadataUpdates) require.NotNil(t, result.ScheduledAt) require.True(t, result.ScheduledAt.Equal(scheduledAt)) + require.Empty(t, result.Schema) require.Equal(t, rivertype.JobStateAvailable, result.State) }) @@ -261,6 +274,7 @@ func TestJobSetStateSnoozedAvailable(t *testing.T) { //nolint:dupl require.Equal(t, metadata, result.MetadataUpdates) require.NotNil(t, result.ScheduledAt) require.True(t, result.ScheduledAt.Equal(scheduledAt)) + require.Empty(t, result.Schema) require.Equal(t, rivertype.JobStateAvailable, result.State) }) } diff --git a/riverdriver/riverdatabasesql/internal/dbsqlc/river_job.sql.go b/riverdriver/riverdatabasesql/internal/dbsqlc/river_job.sql.go index 6037a0dd..b5cf43ae 100644 --- a/riverdriver/riverdatabasesql/internal/dbsqlc/river_job.sql.go +++ b/riverdriver/riverdatabasesql/internal/dbsqlc/river_job.sql.go @@ -206,8 +206,8 @@ WITH locked_jobs AS ( /* TEMPLATE: schema */river_job WHERE state = 'available' - AND queue = $2::text - AND scheduled_at <= coalesce($3::timestamptz, now()) + AND queue = $3::text + AND scheduled_at <= coalesce($1::timestamptz, now()) ORDER BY priority ASC, scheduled_at ASC, @@ -221,8 +221,8 @@ UPDATE SET state = 'running', attempt = river_job.attempt + 1, - attempted_at = now(), - attempted_by = array_append(river_job.attempted_by, $1::text) + attempted_at = coalesce($1::timestamptz, now()), + attempted_by = array_append(river_job.attempted_by, $2::text) FROM locked_jobs WHERE @@ -232,17 +232,17 @@ RETURNING ` type JobGetAvailableParams struct { + Now *time.Time AttemptedBy string Queue string - Now *time.Time Max int32 } func (q *Queries) JobGetAvailable(ctx context.Context, db DBTX, arg *JobGetAvailableParams) ([]*RiverJob, error) { rows, err := db.QueryContext(ctx, jobGetAvailable, + arg.Now, arg.AttemptedBy, arg.Queue, - arg.Now, arg.Max, ) if err != nil { @@ -557,7 +557,7 @@ INSERT INTO /* TEMPLATE: schema */river_job( unnest($8::timestamptz[]), -- To avoid requiring pgx users to register the OID of the river_job_state[] -- type, we cast the array to text[] and then to river_job_state. - unnest($9::text[])::river_job_state, + unnest($9::text[])::/* TEMPLATE: schema */river_job_state, -- Unnest on a multi-dimensional array will fully flatten the array, so we -- encode the tag list as a comma-separated string and split it in the -- query. @@ -565,11 +565,10 @@ INSERT INTO /* TEMPLATE: schema */river_job( unnest($11::bytea[]), unnest($12::bit(8)[]) - ON CONFLICT (unique_key) WHERE unique_key IS NOT NULL AND unique_states IS NOT NULL - AND river_job_state_in_bitmask(unique_states, state) + AND /* TEMPLATE: schema */river_job_state_in_bitmask(unique_states, state) -- Something needs to be updated for a row to be returned on a conflict. DO UPDATE SET kind = EXCLUDED.kind RETURNING river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags, river_job.unique_key, river_job.unique_states, (xmax != 0) AS unique_skipped_as_duplicate @@ -674,7 +673,7 @@ INSERT INTO /* TEMPLATE: schema */river_job( unnest($6::smallint[]), unnest($7::text[]), unnest($8::timestamptz[]), - unnest($9::river_job_state[]), + unnest($9::/* TEMPLATE: schema */river_job_state[]), -- lib/pq really, REALLY does not play nicely with multi-dimensional arrays, -- so instead we pack each set of tags into a string, send them through, @@ -684,11 +683,10 @@ INSERT INTO /* TEMPLATE: schema */river_job( unnest($11::bytea[]), unnest($12::bit(8)[]) - ON CONFLICT (unique_key) WHERE unique_key IS NOT NULL AND unique_states IS NOT NULL - AND river_job_state_in_bitmask(unique_states, state) + AND /* TEMPLATE: schema */river_job_state_in_bitmask(unique_states, state) DO NOTHING ` @@ -761,7 +759,7 @@ INSERT INTO /* TEMPLATE: schema */river_job( $11, $12, coalesce($13::timestamptz, now()), - $14, + $14::/* TEMPLATE: schema */river_job_state, coalesce($15::varchar(255)[], '{}'), $16, $17 @@ -895,7 +893,7 @@ FROM ( unnest($2::jsonb[]) AS error, nullif(unnest($3::timestamptz[]), '0001-01-01 00:00:00 +0000') AS finalized_at, unnest($4::timestamptz[]) AS scheduled_at, - unnest($5::text[])::river_job_state AS state + unnest($5::text[])::/* TEMPLATE: schema */river_job_state AS state ) AS updated_job WHERE river_job.id = updated_job.id ` @@ -1020,7 +1018,7 @@ unique_conflicts AS ( WHERE river_job.unique_key IS NOT NULL AND river_job.unique_states IS NOT NULL - AND river_job_state_in_bitmask(river_job.unique_states, river_job.state) + AND /* TEMPLATE: schema */river_job_state_in_bitmask(river_job.unique_states, river_job.state) ), job_updates AS ( SELECT @@ -1028,10 +1026,10 @@ job_updates AS ( job.unique_key, job.unique_states, CASE - WHEN job.row_num IS NULL THEN 'available'::river_job_state - WHEN uc.unique_key IS NOT NULL THEN 'discarded'::river_job_state - WHEN job.row_num = 1 THEN 'available'::river_job_state - ELSE 'discarded'::river_job_state + WHEN job.row_num IS NULL THEN 'available'::/* TEMPLATE: schema */river_job_state + WHEN uc.unique_key IS NOT NULL THEN 'discarded'::/* TEMPLATE: schema */river_job_state + WHEN job.row_num = 1 THEN 'available'::/* TEMPLATE: schema */river_job_state + ELSE 'discarded'::/* TEMPLATE: schema */river_job_state END AS new_state, (job.row_num IS NOT NULL AND (uc.unique_key IS NOT NULL OR job.row_num > 1)) AS finalized_at_do_update, (job.row_num IS NOT NULL AND (uc.unique_key IS NOT NULL OR job.row_num > 1)) AS metadata_do_update @@ -1050,7 +1048,7 @@ updated_jobs AS ( WHERE river_job.id = job_updates.id RETURNING river_job.id, - job_updates.new_state = 'discarded'::river_job_state AS conflict_discarded + job_updates.new_state = 'discarded'::/* TEMPLATE: schema */river_job_state AS conflict_discarded ) SELECT river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags, river_job.unique_key, river_job.unique_states, @@ -1128,7 +1126,7 @@ WITH job_input AS ( unnest($11::timestamptz[]) AS scheduled_at, -- To avoid requiring pgx users to register the OID of the river_job_state[] -- type, we cast the array to text[] and then to river_job_state. - unnest($12::text[])::river_job_state AS state + unnest($12::text[])::/* TEMPLATE: schema */river_job_state AS state ), job_to_update AS ( SELECT @@ -1151,7 +1149,7 @@ job_to_update AS ( FOR UPDATE ), updated_running AS ( - UPDATE river_job + UPDATE /* TEMPLATE: schema */river_job SET attempt = CASE WHEN NOT job_to_update.should_cancel AND job_to_update.attempt_do_update THEN job_to_update.attempt ELSE river_job.attempt END, @@ -1165,7 +1163,7 @@ updated_running AS ( ELSE river_job.metadata END, scheduled_at = CASE WHEN NOT job_to_update.should_cancel AND job_to_update.scheduled_at_do_update THEN job_to_update.scheduled_at ELSE river_job.scheduled_at END, - state = CASE WHEN job_to_update.should_cancel THEN 'cancelled'::river_job_state + state = CASE WHEN job_to_update.should_cancel THEN 'cancelled'::/* TEMPLATE: schema */river_job_state ELSE job_to_update.state END FROM job_to_update WHERE river_job.id = job_to_update.id @@ -1269,7 +1267,7 @@ SET attempted_by = CASE WHEN $5::boolean THEN $6 ELSE attempted_by END, errors = CASE WHEN $7::boolean THEN $8::jsonb[] ELSE errors END, finalized_at = CASE WHEN $9::boolean THEN $10 ELSE finalized_at END, - state = CASE WHEN $11::boolean THEN $12 ELSE state END + state = CASE WHEN $11::boolean THEN $12::/* TEMPLATE: schema */river_job_state ELSE state END WHERE id = $13 RETURNING id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags, unique_key, unique_states ` diff --git a/riverdriver/riverdatabasesql/internal/dbsqlc/schema.sql.go b/riverdriver/riverdatabasesql/internal/dbsqlc/schema.sql.go new file mode 100644 index 00000000..49c4ff29 --- /dev/null +++ b/riverdriver/riverdatabasesql/internal/dbsqlc/schema.sql.go @@ -0,0 +1,46 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 +// source: schema.sql + +package dbsqlc + +import ( + "context" +) + +const schemaGetExpired = `-- name: SchemaGetExpired :many +SELECT schema_name::text +FROM information_schema.schemata +WHERE schema_name LIKE $1 + AND schema_name < $2 +ORDER BY schema_name +` + +type SchemaGetExpiredParams struct { + Prefix interface{} + BeforeName interface{} +} + +func (q *Queries) SchemaGetExpired(ctx context.Context, db DBTX, arg *SchemaGetExpiredParams) ([]string, error) { + rows, err := db.QueryContext(ctx, schemaGetExpired, arg.Prefix, arg.BeforeName) + if err != nil { + return nil, err + } + defer rows.Close() + var items []string + for rows.Next() { + var schema_name string + if err := rows.Scan(&schema_name); err != nil { + return nil, err + } + items = append(items, schema_name) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/riverdriver/riverdatabasesql/internal/dbsqlc/sqlc.yaml b/riverdriver/riverdatabasesql/internal/dbsqlc/sqlc.yaml index d75cb70b..4cb3c4ec 100644 --- a/riverdriver/riverdatabasesql/internal/dbsqlc/sqlc.yaml +++ b/riverdriver/riverdatabasesql/internal/dbsqlc/sqlc.yaml @@ -9,6 +9,7 @@ sql: - ../../../riverpgxv5/internal/dbsqlc/river_leader.sql - ../../../riverpgxv5/internal/dbsqlc/river_migration.sql - ../../../riverpgxv5/internal/dbsqlc/river_queue.sql + - ../../../riverpgxv5/internal/dbsqlc/schema.sql schema: - ../../../riverpgxv5/internal/dbsqlc/pg_misc.sql - ../../../riverpgxv5/internal/dbsqlc/river_client.sql @@ -17,6 +18,7 @@ sql: - ../../../riverpgxv5/internal/dbsqlc/river_leader.sql - ../../../riverpgxv5/internal/dbsqlc/river_migration.sql - ../../../riverpgxv5/internal/dbsqlc/river_queue.sql + - ../../../riverpgxv5/internal/dbsqlc/schema.sql gen: go: package: "dbsqlc" diff --git a/riverdriver/riverdatabasesql/migration/main/002_initial_schema.up.sql b/riverdriver/riverdatabasesql/migration/main/002_initial_schema.up.sql index 604e82ba..6fb1ce5a 100644 --- a/riverdriver/riverdatabasesql/migration/main/002_initial_schema.up.sql +++ b/riverdriver/riverdatabasesql/migration/main/002_initial_schema.up.sql @@ -18,7 +18,7 @@ CREATE TABLE /* TEMPLATE: schema */river_job( -- looking at jobs with `SELECT *` it'll appear first after ID. The other two -- fields aren't as important but are kept adjacent to `state` for alignment -- to get an 8-byte block. - state river_job_state NOT NULL DEFAULT 'available', + state /* TEMPLATE: schema */river_job_state NOT NULL DEFAULT 'available', attempt smallint NOT NULL DEFAULT 0, max_attempts smallint NOT NULL, diff --git a/riverdriver/riverdatabasesql/migration/main/005_migration_unique_client.up.sql b/riverdriver/riverdatabasesql/migration/main/005_migration_unique_client.up.sql index ff964304..e0f1711e 100644 --- a/riverdriver/riverdatabasesql/migration/main/005_migration_unique_client.up.sql +++ b/riverdriver/riverdatabasesql/migration/main/005_migration_unique_client.up.sql @@ -64,7 +64,7 @@ CREATE UNLOGGED TABLE /* TEMPLATE: schema */river_client ( -- Differs from `river_queue` in that it tracks the queue state for a particular -- active client. CREATE UNLOGGED TABLE /* TEMPLATE: schema */river_client_queue ( - river_client_id text NOT NULL REFERENCES river_client (id) ON DELETE CASCADE, + river_client_id text NOT NULL REFERENCES /* TEMPLATE: schema */river_client (id) ON DELETE CASCADE, name text NOT NULL, created_at timestamptz NOT NULL DEFAULT now(), max_workers bigint NOT NULL DEFAULT 0, diff --git a/riverdriver/riverdatabasesql/migration/main/006_bulk_unique.up.sql b/riverdriver/riverdatabasesql/migration/main/006_bulk_unique.up.sql index 15f0ee53..0b9f5e01 100644 --- a/riverdriver/riverdatabasesql/migration/main/006_bulk_unique.up.sql +++ b/riverdriver/riverdatabasesql/migration/main/006_bulk_unique.up.sql @@ -1,5 +1,4 @@ - -CREATE OR REPLACE FUNCTION /* TEMPLATE: schema */river_job_state_in_bitmask(bitmask BIT(8), state river_job_state) +CREATE OR REPLACE FUNCTION /* TEMPLATE: schema */river_job_state_in_bitmask(bitmask BIT(8), state /* TEMPLATE: schema */river_job_state) RETURNS boolean LANGUAGE SQL IMMUTABLE @@ -31,7 +30,7 @@ ALTER TABLE /* TEMPLATE: schema */river_job ADD COLUMN IF NOT EXISTS unique_stat CREATE UNIQUE INDEX IF NOT EXISTS river_job_unique_idx ON /* TEMPLATE: schema */river_job (unique_key) WHERE unique_key IS NOT NULL AND unique_states IS NOT NULL - AND river_job_state_in_bitmask(unique_states, state); + AND /* TEMPLATE: schema */river_job_state_in_bitmask(unique_states, state); -- Remove the old unique index. Users who are actively using the unique jobs -- feature and who wish to avoid deploy downtime may want od drop this in a diff --git a/riverdriver/riverdatabasesql/river_database_sql_driver.go b/riverdriver/riverdatabasesql/river_database_sql_driver.go index f77698cc..a0cbec11 100644 --- a/riverdriver/riverdatabasesql/river_database_sql_driver.go +++ b/riverdriver/riverdatabasesql/river_database_sql_driver.go @@ -58,6 +58,7 @@ func (d *Driver) GetListener(schema string) riverdriver.Listener { panic(riverdriver.ErrNotImplemented) } +func (d *Driver) GetMigrationDefaultLines() []string { return []string{riverdriver.MigrationLineMain} } func (d *Driver) GetMigrationFS(line string) fs.FS { if line == riverdriver.MigrationLineMain { return migrationFS @@ -65,8 +66,20 @@ func (d *Driver) GetMigrationFS(line string) fs.FS { panic("migration line does not exist: " + line) } func (d *Driver) GetMigrationLines() []string { return []string{riverdriver.MigrationLineMain} } -func (d *Driver) HasPool() bool { return d.dbPool != nil } -func (d *Driver) SupportsListener() bool { return false } +func (d *Driver) GetMigrationTruncateTables(line string) []string { + if line == riverdriver.MigrationLineMain { + return []string{ + "river_client", + "river_client_queue", + "river_job", + "river_leader", + "river_queue", + } + } + panic("migration line does not exist: " + line) +} +func (d *Driver) HasPool() bool { return d.dbPool != nil } +func (d *Driver) SupportsListener() bool { return false } func (d *Driver) UnwrapExecutor(tx *sql.Tx) riverdriver.ExecutorTx { // Allows UnwrapExecutor to be invoked even if driver is nil. @@ -80,6 +93,8 @@ func (d *Driver) UnwrapExecutor(tx *sql.Tx) riverdriver.ExecutorTx { return &ExecutorTx{Executor: Executor{nil, templateReplaceWrapper{tx, replacer}, d}, tx: tx} } +func (d *Driver) UnwrapTx(execTx riverdriver.ExecutorTx) *sql.Tx { return execTx.(*ExecutorTx).tx } //nolint:forcetypeassert + type Executor struct { dbPool *sql.DB dbtx templateReplaceWrapper @@ -824,6 +839,21 @@ func (e *Executor) QueueUpdate(ctx context.Context, params *riverdriver.QueueUpd return queueFromInternal(queue), nil } +func (e *Executor) QueryRow(ctx context.Context, sql string, args ...any) riverdriver.Row { + return e.dbtx.QueryRowContext(ctx, sql, args...) +} + +func (e *Executor) SchemaGetExpired(ctx context.Context, params *riverdriver.SchemaGetExpiredParams) ([]string, error) { + schemas, err := dbsqlc.New().SchemaGetExpired(ctx, e.dbtx, &dbsqlc.SchemaGetExpiredParams{ + BeforeName: params.BeforeName, + Prefix: params.Prefix, + }) + if err != nil { + return nil, interpretError(err) + } + return schemas, nil +} + func (e *Executor) TableExists(ctx context.Context, params *riverdriver.TableExistsParams) (bool, error) { // Different from other operations because the schemaAndTable name is a parameter. schemaAndTable := params.Table @@ -873,6 +903,7 @@ func (t *ExecutorSubTx) Begin(ctx context.Context) (riverdriver.ExecutorTx, erro if err != nil { return nil, err } + return &ExecutorSubTx{Executor: Executor{nil, templateReplaceWrapper{t.tx, &t.driver.replacer}, t.driver}, savepointNum: nextSavepointNum, single: &singleTransaction{parent: t.single}, tx: t.tx}, nil } diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql b/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql index 5ef9b8d6..bdf94d8f 100644 --- a/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql +++ b/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql @@ -150,7 +150,7 @@ UPDATE SET state = 'running', attempt = river_job.attempt + 1, - attempted_at = now(), + attempted_at = coalesce(sqlc.narg('now')::timestamptz, now()), attempted_by = array_append(river_job.attempted_by, @attempted_by::text) FROM locked_jobs @@ -219,7 +219,7 @@ INSERT INTO /* TEMPLATE: schema */river_job( unnest(@scheduled_at::timestamptz[]), -- To avoid requiring pgx users to register the OID of the river_job_state[] -- type, we cast the array to text[] and then to river_job_state. - unnest(@state::text[])::river_job_state, + unnest(@state::text[])::/* TEMPLATE: schema */river_job_state, -- Unnest on a multi-dimensional array will fully flatten the array, so we -- encode the tag list as a comma-separated string and split it in the -- query. @@ -227,11 +227,10 @@ INSERT INTO /* TEMPLATE: schema */river_job( unnest(@unique_key::bytea[]), unnest(@unique_states::bit(8)[]) - ON CONFLICT (unique_key) WHERE unique_key IS NOT NULL AND unique_states IS NOT NULL - AND river_job_state_in_bitmask(unique_states, state) + AND /* TEMPLATE: schema */river_job_state_in_bitmask(unique_states, state) -- Something needs to be updated for a row to be returned on a conflict. DO UPDATE SET kind = EXCLUDED.kind RETURNING sqlc.embed(river_job), (xmax != 0) AS unique_skipped_as_duplicate; @@ -259,7 +258,7 @@ INSERT INTO /* TEMPLATE: schema */river_job( unnest(@priority::smallint[]), unnest(@queue::text[]), unnest(@scheduled_at::timestamptz[]), - unnest(@state::river_job_state[]), + unnest(@state::/* TEMPLATE: schema */river_job_state[]), -- lib/pq really, REALLY does not play nicely with multi-dimensional arrays, -- so instead we pack each set of tags into a string, send them through, @@ -269,11 +268,10 @@ INSERT INTO /* TEMPLATE: schema */river_job( unnest(@unique_key::bytea[]), unnest(@unique_states::bit(8)[]) - ON CONFLICT (unique_key) WHERE unique_key IS NOT NULL AND unique_states IS NOT NULL - AND river_job_state_in_bitmask(unique_states, state) + AND /* TEMPLATE: schema */river_job_state_in_bitmask(unique_states, state) DO NOTHING; -- name: JobInsertFull :one @@ -309,7 +307,7 @@ INSERT INTO /* TEMPLATE: schema */river_job( @priority, @queue, coalesce(sqlc.narg('scheduled_at')::timestamptz, now()), - @state, + @state::/* TEMPLATE: schema */river_job_state, coalesce(@tags::varchar(255)[], '{}'), @unique_key, @unique_states @@ -336,7 +334,7 @@ FROM ( unnest(@error::jsonb[]) AS error, nullif(unnest(@finalized_at::timestamptz[]), '0001-01-01 00:00:00 +0000') AS finalized_at, unnest(@scheduled_at::timestamptz[]) AS scheduled_at, - unnest(@state::text[])::river_job_state AS state + unnest(@state::text[])::/* TEMPLATE: schema */river_job_state AS state ) AS updated_job WHERE river_job.id = updated_job.id; @@ -413,7 +411,7 @@ unique_conflicts AS ( WHERE river_job.unique_key IS NOT NULL AND river_job.unique_states IS NOT NULL - AND river_job_state_in_bitmask(river_job.unique_states, river_job.state) + AND /* TEMPLATE: schema */river_job_state_in_bitmask(river_job.unique_states, river_job.state) ), job_updates AS ( SELECT @@ -421,10 +419,10 @@ job_updates AS ( job.unique_key, job.unique_states, CASE - WHEN job.row_num IS NULL THEN 'available'::river_job_state - WHEN uc.unique_key IS NOT NULL THEN 'discarded'::river_job_state - WHEN job.row_num = 1 THEN 'available'::river_job_state - ELSE 'discarded'::river_job_state + WHEN job.row_num IS NULL THEN 'available'::/* TEMPLATE: schema */river_job_state + WHEN uc.unique_key IS NOT NULL THEN 'discarded'::/* TEMPLATE: schema */river_job_state + WHEN job.row_num = 1 THEN 'available'::/* TEMPLATE: schema */river_job_state + ELSE 'discarded'::/* TEMPLATE: schema */river_job_state END AS new_state, (job.row_num IS NOT NULL AND (uc.unique_key IS NOT NULL OR job.row_num > 1)) AS finalized_at_do_update, (job.row_num IS NOT NULL AND (uc.unique_key IS NOT NULL OR job.row_num > 1)) AS metadata_do_update @@ -443,7 +441,7 @@ updated_jobs AS ( WHERE river_job.id = job_updates.id RETURNING river_job.id, - job_updates.new_state = 'discarded'::river_job_state AS conflict_discarded + job_updates.new_state = 'discarded'::/* TEMPLATE: schema */river_job_state AS conflict_discarded ) SELECT sqlc.embed(river_job), @@ -467,7 +465,7 @@ WITH job_input AS ( unnest(@scheduled_at::timestamptz[]) AS scheduled_at, -- To avoid requiring pgx users to register the OID of the river_job_state[] -- type, we cast the array to text[] and then to river_job_state. - unnest(@state::text[])::river_job_state AS state + unnest(@state::text[])::/* TEMPLATE: schema */river_job_state AS state ), job_to_update AS ( SELECT @@ -490,7 +488,7 @@ job_to_update AS ( FOR UPDATE ), updated_running AS ( - UPDATE river_job + UPDATE /* TEMPLATE: schema */river_job SET attempt = CASE WHEN NOT job_to_update.should_cancel AND job_to_update.attempt_do_update THEN job_to_update.attempt ELSE river_job.attempt END, @@ -504,7 +502,7 @@ updated_running AS ( ELSE river_job.metadata END, scheduled_at = CASE WHEN NOT job_to_update.should_cancel AND job_to_update.scheduled_at_do_update THEN job_to_update.scheduled_at ELSE river_job.scheduled_at END, - state = CASE WHEN job_to_update.should_cancel THEN 'cancelled'::river_job_state + state = CASE WHEN job_to_update.should_cancel THEN 'cancelled'::/* TEMPLATE: schema */river_job_state ELSE job_to_update.state END FROM job_to_update WHERE river_job.id = job_to_update.id @@ -539,6 +537,6 @@ SET attempted_by = CASE WHEN @attempted_by_do_update::boolean THEN @attempted_by ELSE attempted_by END, errors = CASE WHEN @errors_do_update::boolean THEN @errors::jsonb[] ELSE errors END, finalized_at = CASE WHEN @finalized_at_do_update::boolean THEN @finalized_at ELSE finalized_at END, - state = CASE WHEN @state_do_update::boolean THEN @state ELSE state END + state = CASE WHEN @state_do_update::boolean THEN @state::/* TEMPLATE: schema */river_job_state ELSE state END WHERE id = @id RETURNING *; diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql.go b/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql.go index 61ef0419..d32ba66b 100644 --- a/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql.go +++ b/riverdriver/riverpgxv5/internal/dbsqlc/river_job.sql.go @@ -204,8 +204,8 @@ WITH locked_jobs AS ( /* TEMPLATE: schema */river_job WHERE state = 'available' - AND queue = $2::text - AND scheduled_at <= coalesce($3::timestamptz, now()) + AND queue = $3::text + AND scheduled_at <= coalesce($1::timestamptz, now()) ORDER BY priority ASC, scheduled_at ASC, @@ -219,8 +219,8 @@ UPDATE SET state = 'running', attempt = river_job.attempt + 1, - attempted_at = now(), - attempted_by = array_append(river_job.attempted_by, $1::text) + attempted_at = coalesce($1::timestamptz, now()), + attempted_by = array_append(river_job.attempted_by, $2::text) FROM locked_jobs WHERE @@ -230,17 +230,17 @@ RETURNING ` type JobGetAvailableParams struct { + Now *time.Time AttemptedBy string Queue string - Now *time.Time Max int32 } func (q *Queries) JobGetAvailable(ctx context.Context, db DBTX, arg *JobGetAvailableParams) ([]*RiverJob, error) { rows, err := db.Query(ctx, jobGetAvailable, + arg.Now, arg.AttemptedBy, arg.Queue, - arg.Now, arg.Max, ) if err != nil { @@ -543,7 +543,7 @@ INSERT INTO /* TEMPLATE: schema */river_job( unnest($8::timestamptz[]), -- To avoid requiring pgx users to register the OID of the river_job_state[] -- type, we cast the array to text[] and then to river_job_state. - unnest($9::text[])::river_job_state, + unnest($9::text[])::/* TEMPLATE: schema */river_job_state, -- Unnest on a multi-dimensional array will fully flatten the array, so we -- encode the tag list as a comma-separated string and split it in the -- query. @@ -551,11 +551,10 @@ INSERT INTO /* TEMPLATE: schema */river_job( unnest($11::bytea[]), unnest($12::bit(8)[]) - ON CONFLICT (unique_key) WHERE unique_key IS NOT NULL AND unique_states IS NOT NULL - AND river_job_state_in_bitmask(unique_states, state) + AND /* TEMPLATE: schema */river_job_state_in_bitmask(unique_states, state) -- Something needs to be updated for a row to be returned on a conflict. DO UPDATE SET kind = EXCLUDED.kind RETURNING river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags, river_job.unique_key, river_job.unique_states, (xmax != 0) AS unique_skipped_as_duplicate @@ -657,7 +656,7 @@ INSERT INTO /* TEMPLATE: schema */river_job( unnest($6::smallint[]), unnest($7::text[]), unnest($8::timestamptz[]), - unnest($9::river_job_state[]), + unnest($9::/* TEMPLATE: schema */river_job_state[]), -- lib/pq really, REALLY does not play nicely with multi-dimensional arrays, -- so instead we pack each set of tags into a string, send them through, @@ -667,11 +666,10 @@ INSERT INTO /* TEMPLATE: schema */river_job( unnest($11::bytea[]), unnest($12::bit(8)[]) - ON CONFLICT (unique_key) WHERE unique_key IS NOT NULL AND unique_states IS NOT NULL - AND river_job_state_in_bitmask(unique_states, state) + AND /* TEMPLATE: schema */river_job_state_in_bitmask(unique_states, state) DO NOTHING ` @@ -744,7 +742,7 @@ INSERT INTO /* TEMPLATE: schema */river_job( $11, $12, coalesce($13::timestamptz, now()), - $14, + $14::/* TEMPLATE: schema */river_job_state, coalesce($15::varchar(255)[], '{}'), $16, $17 @@ -875,7 +873,7 @@ FROM ( unnest($2::jsonb[]) AS error, nullif(unnest($3::timestamptz[]), '0001-01-01 00:00:00 +0000') AS finalized_at, unnest($4::timestamptz[]) AS scheduled_at, - unnest($5::text[])::river_job_state AS state + unnest($5::text[])::/* TEMPLATE: schema */river_job_state AS state ) AS updated_job WHERE river_job.id = updated_job.id ` @@ -1000,7 +998,7 @@ unique_conflicts AS ( WHERE river_job.unique_key IS NOT NULL AND river_job.unique_states IS NOT NULL - AND river_job_state_in_bitmask(river_job.unique_states, river_job.state) + AND /* TEMPLATE: schema */river_job_state_in_bitmask(river_job.unique_states, river_job.state) ), job_updates AS ( SELECT @@ -1008,10 +1006,10 @@ job_updates AS ( job.unique_key, job.unique_states, CASE - WHEN job.row_num IS NULL THEN 'available'::river_job_state - WHEN uc.unique_key IS NOT NULL THEN 'discarded'::river_job_state - WHEN job.row_num = 1 THEN 'available'::river_job_state - ELSE 'discarded'::river_job_state + WHEN job.row_num IS NULL THEN 'available'::/* TEMPLATE: schema */river_job_state + WHEN uc.unique_key IS NOT NULL THEN 'discarded'::/* TEMPLATE: schema */river_job_state + WHEN job.row_num = 1 THEN 'available'::/* TEMPLATE: schema */river_job_state + ELSE 'discarded'::/* TEMPLATE: schema */river_job_state END AS new_state, (job.row_num IS NOT NULL AND (uc.unique_key IS NOT NULL OR job.row_num > 1)) AS finalized_at_do_update, (job.row_num IS NOT NULL AND (uc.unique_key IS NOT NULL OR job.row_num > 1)) AS metadata_do_update @@ -1030,7 +1028,7 @@ updated_jobs AS ( WHERE river_job.id = job_updates.id RETURNING river_job.id, - job_updates.new_state = 'discarded'::river_job_state AS conflict_discarded + job_updates.new_state = 'discarded'::/* TEMPLATE: schema */river_job_state AS conflict_discarded ) SELECT river_job.id, river_job.args, river_job.attempt, river_job.attempted_at, river_job.attempted_by, river_job.created_at, river_job.errors, river_job.finalized_at, river_job.kind, river_job.max_attempts, river_job.metadata, river_job.priority, river_job.queue, river_job.state, river_job.scheduled_at, river_job.tags, river_job.unique_key, river_job.unique_states, @@ -1105,7 +1103,7 @@ WITH job_input AS ( unnest($11::timestamptz[]) AS scheduled_at, -- To avoid requiring pgx users to register the OID of the river_job_state[] -- type, we cast the array to text[] and then to river_job_state. - unnest($12::text[])::river_job_state AS state + unnest($12::text[])::/* TEMPLATE: schema */river_job_state AS state ), job_to_update AS ( SELECT @@ -1128,7 +1126,7 @@ job_to_update AS ( FOR UPDATE ), updated_running AS ( - UPDATE river_job + UPDATE /* TEMPLATE: schema */river_job SET attempt = CASE WHEN NOT job_to_update.should_cancel AND job_to_update.attempt_do_update THEN job_to_update.attempt ELSE river_job.attempt END, @@ -1142,7 +1140,7 @@ updated_running AS ( ELSE river_job.metadata END, scheduled_at = CASE WHEN NOT job_to_update.should_cancel AND job_to_update.scheduled_at_do_update THEN job_to_update.scheduled_at ELSE river_job.scheduled_at END, - state = CASE WHEN job_to_update.should_cancel THEN 'cancelled'::river_job_state + state = CASE WHEN job_to_update.should_cancel THEN 'cancelled'::/* TEMPLATE: schema */river_job_state ELSE job_to_update.state END FROM job_to_update WHERE river_job.id = job_to_update.id @@ -1243,7 +1241,7 @@ SET attempted_by = CASE WHEN $5::boolean THEN $6 ELSE attempted_by END, errors = CASE WHEN $7::boolean THEN $8::jsonb[] ELSE errors END, finalized_at = CASE WHEN $9::boolean THEN $10 ELSE finalized_at END, - state = CASE WHEN $11::boolean THEN $12 ELSE state END + state = CASE WHEN $11::boolean THEN $12::/* TEMPLATE: schema */river_job_state ELSE state END WHERE id = $13 RETURNING id, args, attempt, attempted_at, attempted_by, created_at, errors, finalized_at, kind, max_attempts, metadata, priority, queue, state, scheduled_at, tags, unique_key, unique_states ` diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/river_job_copyfrom.sql b/riverdriver/riverpgxv5/internal/dbsqlc/river_job_copyfrom.sql index ecb1b18f..54fdbeaa 100644 --- a/riverdriver/riverpgxv5/internal/dbsqlc/river_job_copyfrom.sql +++ b/riverdriver/riverpgxv5/internal/dbsqlc/river_job_copyfrom.sql @@ -1,5 +1,5 @@ -- name: JobInsertFastManyCopyFrom :copyfrom -INSERT INTO /* TEMPLATE: schema */river_job( +INSERT INTO river_job( args, created_at, kind, diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/schema.sql b/riverdriver/riverpgxv5/internal/dbsqlc/schema.sql new file mode 100644 index 00000000..b46a300c --- /dev/null +++ b/riverdriver/riverpgxv5/internal/dbsqlc/schema.sql @@ -0,0 +1,6 @@ +-- name: SchemaGetExpired :many +SELECT schema_name::text +FROM information_schema.schemata +WHERE schema_name LIKE @prefix + AND schema_name < @before_name +ORDER BY schema_name; \ No newline at end of file diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/schema.sql.go b/riverdriver/riverpgxv5/internal/dbsqlc/schema.sql.go new file mode 100644 index 00000000..bf7352e1 --- /dev/null +++ b/riverdriver/riverpgxv5/internal/dbsqlc/schema.sql.go @@ -0,0 +1,43 @@ +// Code generated by sqlc. DO NOT EDIT. +// versions: +// sqlc v1.28.0 +// source: schema.sql + +package dbsqlc + +import ( + "context" +) + +const schemaGetExpired = `-- name: SchemaGetExpired :many +SELECT schema_name::text +FROM information_schema.schemata +WHERE schema_name LIKE $1 + AND schema_name < $2 +ORDER BY schema_name +` + +type SchemaGetExpiredParams struct { + Prefix interface{} + BeforeName interface{} +} + +func (q *Queries) SchemaGetExpired(ctx context.Context, db DBTX, arg *SchemaGetExpiredParams) ([]string, error) { + rows, err := db.Query(ctx, schemaGetExpired, arg.Prefix, arg.BeforeName) + if err != nil { + return nil, err + } + defer rows.Close() + var items []string + for rows.Next() { + var schema_name string + if err := rows.Scan(&schema_name); err != nil { + return nil, err + } + items = append(items, schema_name) + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} diff --git a/riverdriver/riverpgxv5/internal/dbsqlc/sqlc.yaml b/riverdriver/riverpgxv5/internal/dbsqlc/sqlc.yaml index 17ff029c..a818dcfa 100644 --- a/riverdriver/riverpgxv5/internal/dbsqlc/sqlc.yaml +++ b/riverdriver/riverpgxv5/internal/dbsqlc/sqlc.yaml @@ -10,6 +10,7 @@ sql: - river_leader.sql - river_migration.sql - river_queue.sql + - schema.sql schema: - pg_misc.sql - river_client.sql @@ -18,6 +19,7 @@ sql: - river_leader.sql - river_migration.sql - river_queue.sql + - schema.sql gen: go: package: "dbsqlc" diff --git a/riverdriver/riverpgxv5/migration/main/002_initial_schema.up.sql b/riverdriver/riverpgxv5/migration/main/002_initial_schema.up.sql index 604e82ba..6fb1ce5a 100644 --- a/riverdriver/riverpgxv5/migration/main/002_initial_schema.up.sql +++ b/riverdriver/riverpgxv5/migration/main/002_initial_schema.up.sql @@ -18,7 +18,7 @@ CREATE TABLE /* TEMPLATE: schema */river_job( -- looking at jobs with `SELECT *` it'll appear first after ID. The other two -- fields aren't as important but are kept adjacent to `state` for alignment -- to get an 8-byte block. - state river_job_state NOT NULL DEFAULT 'available', + state /* TEMPLATE: schema */river_job_state NOT NULL DEFAULT 'available', attempt smallint NOT NULL DEFAULT 0, max_attempts smallint NOT NULL, diff --git a/riverdriver/riverpgxv5/migration/main/005_migration_unique_client.up.sql b/riverdriver/riverpgxv5/migration/main/005_migration_unique_client.up.sql index ff964304..e0f1711e 100644 --- a/riverdriver/riverpgxv5/migration/main/005_migration_unique_client.up.sql +++ b/riverdriver/riverpgxv5/migration/main/005_migration_unique_client.up.sql @@ -64,7 +64,7 @@ CREATE UNLOGGED TABLE /* TEMPLATE: schema */river_client ( -- Differs from `river_queue` in that it tracks the queue state for a particular -- active client. CREATE UNLOGGED TABLE /* TEMPLATE: schema */river_client_queue ( - river_client_id text NOT NULL REFERENCES river_client (id) ON DELETE CASCADE, + river_client_id text NOT NULL REFERENCES /* TEMPLATE: schema */river_client (id) ON DELETE CASCADE, name text NOT NULL, created_at timestamptz NOT NULL DEFAULT now(), max_workers bigint NOT NULL DEFAULT 0, diff --git a/riverdriver/riverpgxv5/migration/main/006_bulk_unique.up.sql b/riverdriver/riverpgxv5/migration/main/006_bulk_unique.up.sql index 15f0ee53..0b9f5e01 100644 --- a/riverdriver/riverpgxv5/migration/main/006_bulk_unique.up.sql +++ b/riverdriver/riverpgxv5/migration/main/006_bulk_unique.up.sql @@ -1,5 +1,4 @@ - -CREATE OR REPLACE FUNCTION /* TEMPLATE: schema */river_job_state_in_bitmask(bitmask BIT(8), state river_job_state) +CREATE OR REPLACE FUNCTION /* TEMPLATE: schema */river_job_state_in_bitmask(bitmask BIT(8), state /* TEMPLATE: schema */river_job_state) RETURNS boolean LANGUAGE SQL IMMUTABLE @@ -31,7 +30,7 @@ ALTER TABLE /* TEMPLATE: schema */river_job ADD COLUMN IF NOT EXISTS unique_stat CREATE UNIQUE INDEX IF NOT EXISTS river_job_unique_idx ON /* TEMPLATE: schema */river_job (unique_key) WHERE unique_key IS NOT NULL AND unique_states IS NOT NULL - AND river_job_state_in_bitmask(unique_states, state); + AND /* TEMPLATE: schema */river_job_state_in_bitmask(unique_states, state); -- Remove the old unique index. Users who are actively using the unique jobs -- feature and who wish to avoid deploy downtime may want od drop this in a diff --git a/riverdriver/riverpgxv5/river_pgx_v5_driver.go b/riverdriver/riverpgxv5/river_pgx_v5_driver.go index 504dba60..2808f54a 100644 --- a/riverdriver/riverpgxv5/river_pgx_v5_driver.go +++ b/riverdriver/riverpgxv5/river_pgx_v5_driver.go @@ -65,6 +65,7 @@ func (d *Driver) GetListener(schema string) riverdriver.Listener { return &Listener{dbPool: d.dbPool, schema: schema} } +func (d *Driver) GetMigrationDefaultLines() []string { return []string{riverdriver.MigrationLineMain} } func (d *Driver) GetMigrationFS(line string) fs.FS { if line == riverdriver.MigrationLineMain { return migrationFS @@ -72,8 +73,20 @@ func (d *Driver) GetMigrationFS(line string) fs.FS { panic("migration line does not exist: " + line) } func (d *Driver) GetMigrationLines() []string { return []string{riverdriver.MigrationLineMain} } -func (d *Driver) HasPool() bool { return d.dbPool != nil } -func (d *Driver) SupportsListener() bool { return true } +func (d *Driver) GetMigrationTruncateTables(line string) []string { + if line == riverdriver.MigrationLineMain { + return []string{ + "river_client", + "river_client_queue", + "river_job", + "river_leader", + "river_queue", + } + } + panic("migration line does not exist: " + line) +} +func (d *Driver) HasPool() bool { return d.dbPool != nil } +func (d *Driver) SupportsListener() bool { return true } func (d *Driver) UnwrapExecutor(tx pgx.Tx) riverdriver.ExecutorTx { // Allows UnwrapExecutor to be invoked even if driver is nil. @@ -87,6 +100,8 @@ func (d *Driver) UnwrapExecutor(tx pgx.Tx) riverdriver.ExecutorTx { return &ExecutorTx{Executor: Executor{templateReplaceWrapper{tx, replacer}, d}, tx: tx} } +func (d *Driver) UnwrapTx(execTx riverdriver.ExecutorTx) pgx.Tx { return execTx.(*ExecutorTx).tx } //nolint:forcetypeassert + type Executor struct { dbtx templateReplaceWrapper driver *Driver @@ -230,7 +245,6 @@ func (e *Executor) JobInsertFastMany(ctx context.Context, params *riverdriver.Jo UniqueStates: make([]pgtype.Bits, len(params.Jobs)), } now := time.Now().UTC() - for i := range len(params.Jobs) { params := params.Jobs[i] @@ -322,7 +336,7 @@ func (e *Executor) JobInsertFastManyNoReturning(ctx context.Context, params *riv } } - numInserted, err := dbsqlc.New().JobInsertFastManyCopyFrom(schemaTemplateParam(ctx, params.Schema), e.dbtx, insertJobsParams) + numInserted, err := dbsqlc.New().JobInsertFastManyCopyFrom(schemaCopyFrom(ctx, params.Schema), e.dbtx, insertJobsParams) if err != nil { return 0, interpretError(err) } @@ -710,6 +724,21 @@ func (e *Executor) QueueUpdate(ctx context.Context, params *riverdriver.QueueUpd return queueFromInternal(queue), nil } +func (e *Executor) QueryRow(ctx context.Context, sql string, args ...any) riverdriver.Row { + return e.dbtx.QueryRow(ctx, sql, args...) +} + +func (e *Executor) SchemaGetExpired(ctx context.Context, params *riverdriver.SchemaGetExpiredParams) ([]string, error) { + schemas, err := dbsqlc.New().SchemaGetExpired(ctx, e.dbtx, &dbsqlc.SchemaGetExpiredParams{ + BeforeName: params.BeforeName, + Prefix: params.Prefix, + }) + if err != nil { + return nil, interpretError(err) + } + return schemas, nil +} + func (e *Executor) TableExists(ctx context.Context, params *riverdriver.TableExistsParams) (bool, error) { // Different from other operations because the schemaAndTable name is a parameter. schemaAndTable := params.Table @@ -780,14 +809,18 @@ func (l *Listener) Connect(ctx context.Context) error { // schema based on `search_path`. schema := l.schema if schema == "" { - if err := poolConn.QueryRow(ctx, "SELECT current_schema();").Scan(&schema); err != nil { + // `current_schema` may be `NULL` if `search_path` is unset completely. + if err := poolConn.QueryRow(ctx, "SELECT coalesce(current_schema(), '');").Scan(&schema); err != nil { poolConn.Release() return err } l.schema = schema } - l.prefix = schema + "." + if schema != "" { + l.prefix = schema + "." + } + // Assume full ownership of the conn so that it doesn't get released back to // the pool or auto-closed by the pool. l.conn = poolConn.Hijack() @@ -868,6 +901,10 @@ func (w templateReplaceWrapper) QueryRow(ctx context.Context, sql string, args . } func (w templateReplaceWrapper) CopyFrom(ctx context.Context, tableName pgx.Identifier, columnNames []string, rowSrc pgx.CopyFromSource) (int64, error) { + if schema, ok := ctx.Value(schemaCopyFromContextKey{}).(string); ok { + tableName = append([]string{schema}, tableName...) + } + return w.dbtx.CopyFrom(ctx, tableName, columnNames, rowSrc) } @@ -980,6 +1017,20 @@ func queueFromInternal(internal *dbsqlc.RiverQueue) *rivertype.Queue { } } +// A special internal context key used only to set a schema for use in CopyFrom. +// If we end up eliminating the use of copyfrom functions (which can't use +// sqlctemplate because no SQL is executed at any time so there's nowhere to +// otherwise do a replacement), we can get rid of this completely. +type schemaCopyFromContextKey struct{} + +func schemaCopyFrom(ctx context.Context, schema string) context.Context { + if schema != "" { + ctx = context.WithValue(ctx, schemaCopyFromContextKey{}, schema) + } + + return ctx +} + func schemaTemplateParam(ctx context.Context, schema string) context.Context { if schema != "" { schema += "." diff --git a/riverlog/example_middleware_test.go b/riverlog/example_middleware_test.go index 945d8827..ebb809e7 100644 --- a/riverlog/example_middleware_test.go +++ b/riverlog/example_middleware_test.go @@ -10,10 +10,12 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/riverlog" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" "github.com/riverqueue/river/rivertype" ) @@ -36,17 +38,12 @@ func (w *LoggingWorker) Work(ctx context.Context, job *river.Job[LoggingArgs]) e func Example_middleware() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &LoggingWorker{}) @@ -65,7 +62,8 @@ func Example_middleware() { return &slogutil.SlogMessageOnlyHandler{Out: w} }, nil), }, - TestOnly: true, // suitable only for use in tests; remove for live environments + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil), // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments Workers: workers, }) if err != nil { diff --git a/riverlog/river_log_test.go b/riverlog/river_log_test.go index 60e2ee38..6dc9e6b6 100644 --- a/riverlog/river_log_test.go +++ b/riverlog/river_log_test.go @@ -12,9 +12,9 @@ import ( "github.com/stretchr/testify/require" "github.com/riverqueue/river" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" - "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" "github.com/riverqueue/river/rivertest" "github.com/riverqueue/river/rivertype" @@ -69,7 +69,7 @@ func TestMiddleware(t *testing.T) { clientConfig = &river.Config{ Middleware: []rivertype.Middleware{middleware}, } - tx = riversharedtest.TestTx(ctx, t) + tx = riverdbtest.TestTxPgx(ctx, t) worker = &loggingWorker{} ) diff --git a/rivermigrate/example_migrate_database_sql_test.go b/rivermigrate/example_migrate_database_sql_test.go index 7751971b..578d2407 100644 --- a/rivermigrate/example_migrate_database_sql_test.go +++ b/rivermigrate/example_migrate_database_sql_test.go @@ -8,9 +8,12 @@ import ( _ "github.com/jackc/pgx/v5/stdlib" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverdatabasesql" "github.com/riverqueue/river/rivermigrate" + "github.com/riverqueue/river/rivershared/riversharedtest" + "github.com/riverqueue/river/rivershared/util/testutil" + "github.com/riverqueue/river/rivershared/util/urlutil" ) // Example_migrateDatabaseSQL demonstrates the use of River's Go migration API @@ -18,28 +21,21 @@ import ( func Example_migrateDatabaseSQL() { ctx := context.Background() - // Use a dedicated Postgres schema for this example so we can migrate and drop it at will: - schemaName := "migration_example_dbsql" - url := riverinternaltest.DatabaseURL("river_test_example") + "&search_path=" + schemaName - dbPool, err := sql.Open("pgx", url) + db, err := sql.Open("pgx", urlutil.DatabaseSQLCompatibleURL(riversharedtest.TestDatabaseURL())) if err != nil { panic(err) } - defer dbPool.Close() + defer db.Close() - driver := riverdatabasesql.New(dbPool) - migrator, err := rivermigrate.New(driver, nil) + driver := riverdatabasesql.New(db) + migrator, err := rivermigrate.New(driver, &rivermigrate.Config{ + // Test schema with no migrations for purposes of this test. + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), driver, &riverdbtest.TestSchemaOpts{Lines: []string{}}), + }) if err != nil { panic(err) } - // Create the schema used for this example. Drop it when we're done. - // This isn't necessary outside this test. - if _, err := dbPool.ExecContext(ctx, "CREATE SCHEMA IF NOT EXISTS "+schemaName); err != nil { - panic(err) - } - defer dropRiverSchema(ctx, driver, schemaName) - printVersions := func(res *rivermigrate.MigrateResult) { for _, version := range res.Versions { fmt.Printf("Migrated [%s] version %d\n", strings.ToUpper(string(res.Direction)), version.Version) diff --git a/rivermigrate/example_migrate_test.go b/rivermigrate/example_migrate_test.go index b9cdf6b0..86c968a9 100644 --- a/rivermigrate/example_migrate_test.go +++ b/rivermigrate/example_migrate_test.go @@ -7,10 +7,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" - "github.com/riverqueue/river/internal/riverinternaltest" - "github.com/riverqueue/river/riverdriver" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivermigrate" + "github.com/riverqueue/river/rivershared/riversharedtest" + "github.com/riverqueue/river/rivershared/util/testutil" ) // Example_migrate demonstrates the use of River's Go migration API by migrating @@ -18,30 +19,21 @@ import ( func Example_migrate() { ctx := context.Background() - // Use a dedicated Postgres schema for this example so we can migrate and drop it at will: - schemaName := "migration_example" - poolConfig := riverinternaltest.DatabaseConfig("river_test_example") - poolConfig.ConnConfig.RuntimeParams["search_path"] = schemaName - - dbPool, err := pgxpool.NewWithConfig(ctx, poolConfig) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() driver := riverpgxv5.New(dbPool) - migrator, err := rivermigrate.New(driver, nil) + migrator, err := rivermigrate.New(driver, &rivermigrate.Config{ + // Test schema with no migrations for purposes of this test. + Schema: riverdbtest.TestSchema(ctx, testutil.PanicTB(), driver, &riverdbtest.TestSchemaOpts{Lines: []string{}}), + }) if err != nil { panic(err) } - // Create the schema used for this example. Drop it when we're done. - // This isn't necessary outside this test. - if _, err := dbPool.Exec(ctx, "CREATE SCHEMA IF NOT EXISTS "+schemaName); err != nil { - panic(err) - } - defer dropRiverSchema(ctx, driver, schemaName) - printVersions := func(res *rivermigrate.MigrateResult) { for _, version := range res.Versions { fmt.Printf("Migrated [%s] version %d\n", strings.ToUpper(string(res.Direction)), version.Version) @@ -76,10 +68,3 @@ func Example_migrate() { // Migrated [DOWN] version 2 // Migrated [DOWN] version 1 } - -func dropRiverSchema[TTx any](ctx context.Context, driver riverdriver.Driver[TTx], schemaName string) { - _, err := driver.GetExecutor().Exec(ctx, "DROP SCHEMA IF EXISTS "+schemaName+" CASCADE;") - if err != nil { - panic(err) - } -} diff --git a/rivermigrate/river_migrate.go b/rivermigrate/river_migrate.go index 68e8d0a6..fb9939ba 100644 --- a/rivermigrate/river_migrate.go +++ b/rivermigrate/river_migrate.go @@ -61,7 +61,7 @@ type Config struct { // or higher. Logger *slog.Logger - schema string + Schema string } // Migrator is a database migration tool for River which can run up or down @@ -153,7 +153,7 @@ func New[TTx any](driver riverdriver.Driver[TTx], config *Config) (*Migrator[TTx driver: driver, line: line, migrations: validateAndInit(riverMigrations), - schema: config.schema, + schema: config.Schema, }), nil } diff --git a/rivermigrate/river_migrate_test.go b/rivermigrate/river_migrate_test.go index 44e5721f..99037fc8 100644 --- a/rivermigrate/river_migrate_test.go +++ b/rivermigrate/river_migrate_test.go @@ -19,7 +19,6 @@ import ( "github.com/jackc/pgx/v5/stdlib" "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/riverinternaltest" "github.com/riverqueue/river/internal/util/dbutil" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverdatabasesql" @@ -89,7 +88,7 @@ func TestMigrator(t *testing.T) { // // To make this easier to clean up afterward, we create a new, clean schema // for each test run and then drop it afterward. - dbPool := riverinternaltest.TestDB(ctx, t) + dbPool := riversharedtest.DBPool(ctx, t) schema := "river_migrate_test_" + randutil.Hex(8) _, err := dbPool.Exec(ctx, "CREATE SCHEMA "+schema) require.NoError(t, err) @@ -108,7 +107,7 @@ func TestMigrator(t *testing.T) { migrator, err := New(bundle.driver, &Config{ Logger: bundle.logger, - schema: schema, + Schema: schema, }) require.NoError(t, err) migrator.migrations = migrationsBundle.WithTestVersionsMap @@ -130,7 +129,7 @@ func TestMigrator(t *testing.T) { driver := riverdatabasesql.New(stdPool) migrator, err := New(driver, &Config{ Logger: bundle.logger, - schema: bundle.schema, + Schema: bundle.schema, }) require.NoError(t, err) migrator.migrations = migrationsBundle.WithTestVersionsMap @@ -741,7 +740,7 @@ func TestMigrator(t *testing.T) { alternateMigrator, err := New(bundle.driver, &Config{ Line: migrationLineAlternate, Logger: bundle.logger, - schema: bundle.schema, + Schema: bundle.schema, }) require.NoError(t, err) @@ -785,7 +784,7 @@ func TestMigrator(t *testing.T) { alternateMigrator, err := New(bundle.driver, &Config{ Line: migrationLineAlternate, Logger: bundle.logger, - schema: bundle.schema, + Schema: bundle.schema, }) require.NoError(t, err) @@ -875,7 +874,7 @@ func TestMigrator(t *testing.T) { commitRequiredMigrator, err := New(bundle.driver, &Config{ Line: migrationLineCommitRequired, Logger: bundle.logger, - schema: bundle.schema, + Schema: bundle.schema, }) require.NoError(t, err) diff --git a/rivershared/riverpilot/pilot.go b/rivershared/riverpilot/pilot.go index 598dcd6b..c91e32e8 100644 --- a/rivershared/riverpilot/pilot.go +++ b/rivershared/riverpilot/pilot.go @@ -40,9 +40,9 @@ type Pilot interface { ProducerKeepAlive(ctx context.Context, exec riverdriver.Executor, params *riverdriver.ProducerKeepAliveParams) error - ProducerShutdown(ctx context.Context, exec riverdriver.Executor, producerID int64, state ProducerState) error + ProducerShutdown(ctx context.Context, exec riverdriver.Executor, params *ProducerShutdownParams) error - QueueMetadataChanged(ctx context.Context, exec riverdriver.Executor, state ProducerState, metadata []byte) error + QueueMetadataChanged(ctx context.Context, exec riverdriver.Executor, params *QueueMetadataChangedParams) error } type ProducerState interface { @@ -56,3 +56,15 @@ type ProducerInitParams struct { QueueMetadata []byte Schema string } + +type ProducerShutdownParams struct { + ProducerID int64 + Schema string + State ProducerState +} + +type QueueMetadataChangedParams struct { + Metadata []byte + Schema string + State ProducerState +} diff --git a/rivershared/riverpilot/standard.go b/rivershared/riverpilot/standard.go index 1ced1dce..30fe773e 100644 --- a/rivershared/riverpilot/standard.go +++ b/rivershared/riverpilot/standard.go @@ -45,11 +45,11 @@ func (p *StandardPilot) ProducerKeepAlive(ctx context.Context, exec riverdriver. return nil } -func (p *StandardPilot) ProducerShutdown(ctx context.Context, exec riverdriver.Executor, producerID int64, state ProducerState) error { +func (p *StandardPilot) ProducerShutdown(ctx context.Context, exec riverdriver.Executor, params *ProducerShutdownParams) error { return nil } -func (p *StandardPilot) QueueMetadataChanged(ctx context.Context, exec riverdriver.Executor, state ProducerState, metadata []byte) error { +func (p *StandardPilot) QueueMetadataChanged(ctx context.Context, exec riverdriver.Executor, params *QueueMetadataChangedParams) error { return nil } diff --git a/rivershared/riversharedtest/riversharedtest.go b/rivershared/riversharedtest/riversharedtest.go index 5957350c..e34d93c7 100644 --- a/rivershared/riversharedtest/riversharedtest.go +++ b/rivershared/riversharedtest/riversharedtest.go @@ -18,6 +18,7 @@ import ( "github.com/riverqueue/river/rivershared/baseservice" "github.com/riverqueue/river/rivershared/slogtest" + "github.com/riverqueue/river/rivershared/util/testutil" ) // BaseServiceArchetype returns a new base service suitable for use in tests. @@ -46,11 +47,27 @@ func DBPool(ctx context.Context, tb testing.TB) *pgxpool.Pool { tb.Helper() dbPoolOnce.Do(func() { - var err error - dbPool, err = pgxpool.New(ctx, cmp.Or( - os.Getenv("TEST_DATABASE_URL"), - "postgres://localhost:5432/river_test", - )) + config, err := pgxpool.ParseConfig(TestDatabaseURL()) + require.NoError(tb, err) + + config.AfterConnect = func(ctx context.Context, conn *pgx.Conn) error { + // Empty the search path so that tests using riverdbtest are + // forced to pass a schema to clients and any other database + // operations they invoke. Calls do not accidentally fall back to a + // default schema, which would potentially hide bugs where we + // weren't properly referencing a schema explicitly. + _, err := conn.Exec(ctx, "SET search_path TO ''") + + // This should not be a `require` because the callback may run long + // after the original test has completed. + if err != nil && !errors.Is(err, context.Canceled) { + panic(err) + } + + return nil + } + + dbPool, err = pgxpool.NewWithConfig(ctx, config) require.NoError(tb, err) }) require.NotNil(tb, dbPool) // die in case initial connect from another test failed @@ -58,6 +75,28 @@ func DBPool(ctx context.Context, tb testing.TB) *pgxpool.Pool { return dbPool } +// DBPoolClone returns a disposable clone of DBPool. Share resources by using +// DBPool when possible, but this is useless for areas like stress tests where +// context cancellations are likely to end up closing the pool. +// +// Unlike DBPool, adds a test cleanup hook that closes the pool after run. +func DBPoolClone(ctx context.Context, tb testing.TB) *pgxpool.Pool { + tb.Helper() + + dbPool := DBPool(ctx, tb) + + config := dbPool.Config() + config.MaxConns = 4 // dramatically reduce max allowed conns for clones so we they don't clobber the database server + + var err error + dbPool, err = pgxpool.NewWithConfig(ctx, config) + require.NoError(tb, err) + + tb.Cleanup(dbPool.Close) + + return dbPool +} + // Logger returns a logger suitable for use in tests. // // Defaults to informational verbosity. If env is set with `RIVER_DEBUG=true`, @@ -74,72 +113,32 @@ func Logger(tb testing.TB) *slog.Logger { // Logger returns a logger suitable for use in tests which outputs only at warn // or above. Useful in tests where particularly noisy log output is expected. -func LoggerWarn(tb testing.TB) *slog.Logger { +func LoggerWarn(tb testutil.TestingTB) *slog.Logger { tb.Helper() return slogtest.NewLogger(tb, &slog.HandlerOptions{Level: slog.LevelWarn}) } -// TestTx starts a test transaction that's rolled back automatically as the test -// case is cleaning itself up. -// -// This variant uses the default database pool from DBPool that points to -// `TEST_DATABASE_URL` or `river_test` if the former wasn't specified. -func TestTx(ctx context.Context, tb testing.TB) pgx.Tx { - tb.Helper() - return TestTxPool(ctx, tb, DBPool(ctx, tb)) -} - -// TestTxPool starts a test transaction that's rolled back automatically as the -// test case is cleaning itself up. -// -// This variant starts the test transaction on the specified database pool. -func TestTxPool(ctx context.Context, tb testing.TB, dbPool *pgxpool.Pool) pgx.Tx { - tb.Helper() - - tx, err := dbPool.Begin(ctx) - require.NoError(tb, err) - - tb.Cleanup(func() { - // Tests may inerit context from `t.Context()` which is cancelled after - // tests run and before calling clean up. We need a non-cancelled - // context to issue rollback here, so use a bit of a bludgeon to do so - // with `context.WithoutCancel()`. - ctx := context.WithoutCancel(ctx) - - err := tx.Rollback(ctx) - - if err == nil { - return - } - - // Try to look for an error on rollback because it does occasionally - // reveal a real problem in the way a test is written. However, allow - // tests to roll back their transaction early if they like, so ignore - // `ErrTxClosed`. - if errors.Is(err, pgx.ErrTxClosed) { - return - } - - // In case of a cancelled context during a database operation, which - // happens in many tests, pgx seems to not only roll back the - // transaction, but closes the connection, and returns this error on - // rollback. Allow this error since it's hard to prevent it in our flows - // that use contexts heavily. - if err.Error() == "conn closed" { - return - } - - // Similar to the above, but a newly appeared error that wraps the - // above. As far as I can tell, no error variables are available to use - // with `errors.Is`. - if err.Error() == "failed to deallocate cached statement(s): conn closed" { - return - } - - require.NoError(tb, err) - }) - - return tx +// TestDatabaseURL returns `TEST_DATABASE_URL` or a default URL pointing to +// `river_test` and with suitable connection configuration defaults. +func TestDatabaseURL() string { + return cmp.Or( + os.Getenv("TEST_DATABASE_URL"), + + // 100 conns is the default maximum for Homebrew. + // + // It'd be nice to be able to set this number really high because it'd + // mean less waiting time acquiring connections in tests, but with + // default settings, contention between tests/test packages leading to + // exhausion on the Postgres server is definitely a problem. At numbers + // >75 I started seeing a lot of errors between tests within a single + // package, and worse yet, at numbers >=20 I saw major problems between + // packages (i.e. as parallel packages run at the same time). + // + // 15 is about as high as I found I could set it while keeping test runs + // stable. This could be much higher in areas where we know Postgres is + // configured with more allowed max connections. + "postgres://localhost:5432/river_test?pool_max_conns=15&sslmode=disable", + ) } // TimeStub implements baseservice.TimeGeneratorWithStub to allow time to be diff --git a/rivershared/riversharedtest/riversharedtest_test.go b/rivershared/riversharedtest/riversharedtest_test.go index aca77ef4..db4270d2 100644 --- a/rivershared/riversharedtest/riversharedtest_test.go +++ b/rivershared/riversharedtest/riversharedtest_test.go @@ -5,8 +5,6 @@ import ( "testing" "time" - "github.com/jackc/pgerrcode" - "github.com/jackc/pgx/v5/pgconn" "github.com/stretchr/testify/require" ) @@ -24,48 +22,6 @@ func TestDBPool(t *testing.T) { require.Equal(t, pool1, pool2) } -func TestTestTx(t *testing.T) { - t.Parallel() - - ctx := context.Background() - - type PoolOrTx interface { - Exec(ctx context.Context, sql string, arguments ...any) (commandTag pgconn.CommandTag, err error) - } - - checkTestTable := func(ctx context.Context, poolOrTx PoolOrTx) error { - _, err := poolOrTx.Exec(ctx, "SELECT * FROM river_shared_test_tx_table") - return err - } - - // Test cleanups are invoked in the order of last added, first called. When - // TestTx is called below it adds a cleanup, so we want to make sure that - // this cleanup, which checks that the database remains pristine, is invoked - // after the TestTx cleanup, so we add it first. - t.Cleanup(func() { - // Tests may inherit context from `t.Context()` which is cancelled after - // tests run and before calling clean up. We need a non-cancelled - // context to issue rollback here, so use a bit of a bludgeon to do so - // with `context.WithoutCancel()`. - ctx := context.WithoutCancel(ctx) - - err := checkTestTable(ctx, DBPool(ctx, t)) - require.Error(t, err) - - var pgErr *pgconn.PgError - require.ErrorAs(t, err, &pgErr) - require.Equal(t, pgerrcode.UndefinedTable, pgErr.Code) - }) - - tx := TestTx(ctx, t) - - _, err := tx.Exec(ctx, "CREATE TABLE river_shared_test_tx_table (id bigint)") - require.NoError(t, err) - - err = checkTestTable(ctx, tx) - require.NoError(t, err) -} - func TestWaitOrTimeout(t *testing.T) { t.Parallel() diff --git a/rivershared/slogtest/slog_test_handler.go b/rivershared/slogtest/slog_test_handler.go index 7c1cfed4..c28fc2fe 100644 --- a/rivershared/slogtest/slog_test_handler.go +++ b/rivershared/slogtest/slog_test_handler.go @@ -6,13 +6,14 @@ import ( "io" "log/slog" "sync" - "testing" + + "github.com/riverqueue/river/rivershared/util/testutil" ) // NewLogger returns a new slog text logger that outputs to `t.Log`. This helps // keep test output better formatted, and allows it to be differentiated in case // of a failure during a parallel test suite run. -func NewLogger(tb testing.TB, opts *slog.HandlerOptions) *slog.Logger { +func NewLogger(tb testutil.TestingTB, opts *slog.HandlerOptions) *slog.Logger { tb.Helper() var buf bytes.Buffer @@ -31,7 +32,7 @@ type slogTestHandler struct { buf *bytes.Buffer inner slog.Handler mu *sync.Mutex - tb testing.TB + tb testutil.TestingTB } func (b *slogTestHandler) Enabled(ctx context.Context, level slog.Level) bool { diff --git a/rivershared/startstoptest/startstoptest.go b/rivershared/startstoptest/startstoptest.go index b7b5ae7d..1c11fe4d 100644 --- a/rivershared/startstoptest/startstoptest.go +++ b/rivershared/startstoptest/startstoptest.go @@ -23,7 +23,7 @@ func Stress(ctx context.Context, tb testingT, svc startstop.Service) { // tolerated on start (either no error or an error that is allowedStartErr is // allowed). This is useful for services that may want to return an error if // they're shut down as they're still starting up. -func StressErr(ctx context.Context, tb testingT, svc startstop.Service, allowedStartErr error) { //nolint:varnamelen +func StressErr(ctx context.Context, tb testingT, svc startstop.Service, allowedStartErr error) { tb.Helper() var wg sync.WaitGroup diff --git a/rivershared/testfactory/test_factory.go b/rivershared/testfactory/test_factory.go index c034f0bf..7dba1005 100644 --- a/rivershared/testfactory/test_factory.go +++ b/rivershared/testfactory/test_factory.go @@ -31,6 +31,7 @@ type JobOpts struct { Priority *int Queue *string ScheduledAt *time.Time + Schema string State *rivertype.JobState Tags []string UniqueKey []byte @@ -42,6 +43,7 @@ func Job(ctx context.Context, tb testing.TB, exec riverdriver.Executor, opts *Jo job, err := exec.JobInsertFull(ctx, Job_Build(tb, opts)) require.NoError(tb, err) + return job } @@ -84,6 +86,7 @@ func Job_Build(tb testing.TB, opts *JobOpts) *riverdriver.JobInsertFullParams { Priority: ptrutil.ValOrDefault(opts.Priority, rivercommon.PriorityDefault), Queue: ptrutil.ValOrDefault(opts.Queue, rivercommon.QueueDefault), ScheduledAt: opts.ScheduledAt, + Schema: opts.Schema, State: ptrutil.ValOrDefault(opts.State, rivertype.JobStateAvailable), Tags: tags, UniqueKey: opts.UniqueKey, @@ -95,6 +98,7 @@ type LeaderOpts struct { ElectedAt *time.Time ExpiresAt *time.Time LeaderID *string + Schema string } func Leader(ctx context.Context, tb testing.TB, exec riverdriver.Executor, opts *LeaderOpts) *riverdriver.Leader { @@ -104,6 +108,7 @@ func Leader(ctx context.Context, tb testing.TB, exec riverdriver.Executor, opts ElectedAt: opts.ElectedAt, ExpiresAt: opts.ExpiresAt, LeaderID: ptrutil.ValOrDefault(opts.LeaderID, "test-client-id"), + Schema: opts.Schema, TTL: 10 * time.Second, }) require.NoError(tb, err) @@ -112,6 +117,7 @@ func Leader(ctx context.Context, tb testing.TB, exec riverdriver.Executor, opts type MigrationOpts struct { Line *string + Schema string Version *int } @@ -120,7 +126,7 @@ func Migration(ctx context.Context, tb testing.TB, exec riverdriver.Executor, op migration, err := exec.MigrationInsertMany(ctx, &riverdriver.MigrationInsertManyParams{ Line: ptrutil.ValOrDefault(opts.Line, riverdriver.MigrationLineMain), - Schema: "", + Schema: opts.Schema, Versions: []int{ptrutil.ValOrDefaultFunc(opts.Version, nextSeq)}, }) require.NoError(tb, err) @@ -137,6 +143,7 @@ type QueueOpts struct { Metadata []byte Name *string PausedAt *time.Time + Schema string UpdatedAt *time.Time } @@ -156,6 +163,7 @@ func Queue(ctx context.Context, tb testing.TB, exec riverdriver.Executor, opts * Metadata: metadata, Name: ptrutil.ValOrDefaultFunc(opts.Name, func() string { return fmt.Sprintf("queue_%05d", nextSeq()) }), PausedAt: opts.PausedAt, + Schema: opts.Schema, UpdatedAt: opts.UpdatedAt, }) require.NoError(tb, err) diff --git a/rivershared/util/testutil/test_util.go b/rivershared/util/testutil/test_util.go new file mode 100644 index 00000000..cbb6c339 --- /dev/null +++ b/rivershared/util/testutil/test_util.go @@ -0,0 +1,53 @@ +package testutil + +import "fmt" + +// See docs on PanicTB. +type panicTB struct { + SuppressOutput bool +} + +// PanicTB is an implementation for testing.TB that panics when an error is +// logged or FailNow is called. This is useful to inject into test helpers in +// example tests where no *testing.T is available. +// +// Doesn't fully implement testing.TB. Functions where it's used should take the +// more streamlined TestingTB instead. +func PanicTB() *panicTB { + return &panicTB{SuppressOutput: true} +} + +func (tb *panicTB) Errorf(format string, args ...any) { + panic(fmt.Sprintf(format, args...)) +} + +func (tb *panicTB) FailNow() { + panic("FailNow invoked") +} + +func (tb *panicTB) Helper() {} + +func (tb *panicTB) Log(args ...any) { + if !tb.SuppressOutput { + fmt.Println(args...) + } +} + +func (tb *panicTB) Logf(format string, args ...any) { + if !tb.SuppressOutput { + fmt.Printf(format+"\n", args...) + } +} + +// TestingT is an interface wrapper around *testing.T that's implemented by all +// of *testing.T, *testing.F, and *testing.B. +// +// It's used internally to verify that River's test assertions are working as +// expected. +type TestingTB interface { + Errorf(format string, args ...any) + FailNow() + Helper() + Log(args ...any) + Logf(format string, args ...any) +} diff --git a/rivershared/util/testutil/test_util_test.go b/rivershared/util/testutil/test_util_test.go new file mode 100644 index 00000000..73274f9d --- /dev/null +++ b/rivershared/util/testutil/test_util_test.go @@ -0,0 +1,3 @@ +package testutil + +var _ TestingTB = &panicTB{} diff --git a/rivershared/util/urlutil/url_util.go b/rivershared/util/urlutil/url_util.go new file mode 100644 index 00000000..5e6478d6 --- /dev/null +++ b/rivershared/util/urlutil/url_util.go @@ -0,0 +1,19 @@ +package urlutil + +import "net/url" + +// DatabaseSQLCompatibleURL returns databaseURL, but with any known pgx-specific +// URL parameters removed so that it'll be parseable by `database/sql`. +func DatabaseSQLCompatibleURL(databaseURL string) string { + parsedURL, err := url.Parse(databaseURL) + if err != nil { + panic(err) + } + + query := parsedURL.Query() + query.Del("pool_max_conns") + + parsedURL.RawQuery = query.Encode() + + return parsedURL.String() +} diff --git a/rivertest/example_require_inserted_test.go b/rivertest/example_require_inserted_test.go index 40b5b25e..28be7a14 100644 --- a/rivertest/example_require_inserted_test.go +++ b/rivertest/example_require_inserted_test.go @@ -9,9 +9,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" "github.com/riverqueue/river/rivertest" ) @@ -32,23 +34,22 @@ func (w *RequiredWorker) Work(ctx context.Context, job *river.Job[RequiredArgs]) func Example_requireInserted() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &RequiredWorker{}) + schema := riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil) + riverClient, err := river.NewClient(riverpgxv5.New(dbPool), &river.Config{ - Logger: slog.New(&slogutil.SlogMessageOnlyHandler{Level: slog.LevelWarn}), - Workers: workers, + Logger: slog.New(&slogutil.SlogMessageOnlyHandler{Level: slog.LevelWarn}), + Schema: schema, // only necessary for the example test + TestOnly: true, // suitable only for use in tests; remove for live environments + Workers: workers, }) if err != nil { panic(err) @@ -71,6 +72,11 @@ func Example_requireInserted() { // *testing.T that comes from a test's argument. t := &testing.T{} + // This is needed because rivertest does not yet support an injected schema. + if _, err := tx.Exec(ctx, "SET search_path TO "+schema); err != nil { + panic(err) + } + job := rivertest.RequireInsertedTx[*riverpgxv5.Driver](ctx, t, tx, &RequiredArgs{}, nil) fmt.Printf("Test passed with message: %s\n", job.Args.Message) @@ -81,12 +87,17 @@ func Example_requireInserted() { Queue: river.QueueDefault, }) - // Insert and verify one on a pool instead of transaction. - _, err = riverClient.Insert(ctx, &RequiredArgs{Message: "Hello from pool."}, nil) - if err != nil { - panic(err) - } - _ = rivertest.RequireInserted(ctx, t, riverpgxv5.New(dbPool), &RequiredArgs{}, nil) + // Due to some refactoring to make schemas injectable, we don't yet have a + // way of injecting a schema at the pool level. The rivertest API will need + // to be expanded to allow it. + /* + // Insert and verify one on a pool instead of transaction. + _, err = riverClient.Insert(ctx, &RequiredArgs{Message: "Hello from pool."}, nil) + if err != nil { + panic(err) + } + _ = rivertest.RequireInserted(ctx, t, riverpgxv5.New(dbPool), &RequiredArgs{}, nil) + */ // Output: // Test passed with message: Hello. diff --git a/rivertest/example_require_many_inserted_test.go b/rivertest/example_require_many_inserted_test.go index d6827eb0..d295595c 100644 --- a/rivertest/example_require_many_inserted_test.go +++ b/rivertest/example_require_many_inserted_test.go @@ -9,9 +9,11 @@ import ( "github.com/jackc/pgx/v5/pgxpool" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivershared/util/slogutil" + "github.com/riverqueue/river/rivershared/util/testutil" "github.com/riverqueue/river/rivertest" ) @@ -49,23 +51,21 @@ func (w *SecondRequiredWorker) Work(ctx context.Context, job *river.Job[SecondRe func Example_requireManyInserted() { ctx := context.Background() - dbPool, err := pgxpool.NewWithConfig(ctx, riverinternaltest.DatabaseConfig("river_test_example")) + dbPool, err := pgxpool.New(ctx, riversharedtest.TestDatabaseURL()) if err != nil { panic(err) } defer dbPool.Close() - // Required for the purpose of this test, but not necessary in real usage. - if err := riverinternaltest.TruncateRiverTables(ctx, dbPool); err != nil { - panic(err) - } - workers := river.NewWorkers() river.AddWorker(workers, &FirstRequiredWorker{}) river.AddWorker(workers, &SecondRequiredWorker{}) + schema := riverdbtest.TestSchema(ctx, testutil.PanicTB(), riverpgxv5.New(dbPool), nil) + riverClient, err := river.NewClient(riverpgxv5.New(dbPool), &river.Config{ Logger: slog.New(&slogutil.SlogMessageOnlyHandler{Level: slog.LevelWarn}), + Schema: schema, // only necessary for the example test Workers: workers, }) if err != nil { @@ -97,6 +97,11 @@ func Example_requireManyInserted() { // *testing.T that comes from a test's argument. t := &testing.T{} + // This is needed because rivertest does not yet support an injected schema. + if _, err := tx.Exec(ctx, "SET search_path TO "+schema); err != nil { + panic(err) + } + jobs := rivertest.RequireManyInsertedTx[*riverpgxv5.Driver](ctx, t, tx, []rivertest.ExpectedJob{ {Args: &FirstRequiredArgs{}}, {Args: &SecondRequiredArgs{}}, @@ -115,14 +120,19 @@ func Example_requireManyInserted() { }}, }) - // Insert and verify one on a pool instead of transaction. - _, err = riverClient.Insert(ctx, &FirstRequiredArgs{Message: "Hello from pool."}, nil) - if err != nil { - panic(err) - } - _ = rivertest.RequireManyInserted(ctx, t, riverpgxv5.New(dbPool), []rivertest.ExpectedJob{ - {Args: &FirstRequiredArgs{}}, - }) + // Due to some refactoring to make schemas injectable, we don't yet have a + // way of injecting a schema at the pool level. The rivertest API will need + // to be expanded to allow it. + /* + // Insert and verify one on a pool instead of transaction. + _, err = riverClient.Insert(ctx, &FirstRequiredArgs{Message: "Hello from pool."}, nil) + if err != nil { + panic(err) + } + _ = rivertest.RequireManyInserted(ctx, t, riverpgxv5.New(dbPool), []rivertest.ExpectedJob{ + {Args: &FirstRequiredArgs{}}, + }) + */ // Output: // Job 0 args: {"message": "Hello from first."} diff --git a/rivertest/rivertest.go b/rivertest/rivertest.go index cad33459..5170534f 100644 --- a/rivertest/rivertest.go +++ b/rivertest/rivertest.go @@ -18,6 +18,10 @@ import ( "github.com/riverqueue/river/rivertype" ) +// A placeholder for empty schema placeholders that'll need to be fixed to +// something better at some point. +const emptySchema = "" + // testingT is an interface wrapper around *testing.T that's implemented by all // of *testing.T, *testing.F, and *testing.B. // @@ -97,12 +101,12 @@ type RequireInsertedOpts struct { // to cover that case instead. func RequireInserted[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, tb testing.TB, driver TDriver, expectedJob TArgs, opts *RequireInsertedOpts) *river.Job[TArgs] { tb.Helper() - return requireInserted(ctx, tb, driver, expectedJob, opts) + return requireInserted(ctx, tb, driver, emptySchema, expectedJob, opts) } -func requireInserted[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, driver TDriver, expectedJob TArgs, opts *RequireInsertedOpts) *river.Job[TArgs] { +func requireInserted[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, driver TDriver, schema string, expectedJob TArgs, opts *RequireInsertedOpts) *river.Job[TArgs] { t.Helper() - actualArgs, err := requireInsertedErr[TDriver](ctx, t, driver.GetExecutor(), expectedJob, opts) + actualArgs, err := requireInsertedErr[TDriver](ctx, t, driver.GetExecutor(), schema, expectedJob, opts) if err != nil { failure(t, "Internal failure: %s", err) } @@ -127,28 +131,31 @@ func requireInserted[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobAr // to cover that case instead. func RequireInsertedTx[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, tb testing.TB, tx TTx, expectedJob TArgs, opts *RequireInsertedOpts) *river.Job[TArgs] { tb.Helper() - return requireInsertedTx[TDriver](ctx, tb, tx, expectedJob, opts) + return requireInsertedTx[TDriver](ctx, tb, tx, emptySchema, expectedJob, opts) } // Internal function used by the tests so that the exported version can take // `testing.TB` instead of `testing.T`. -func requireInsertedTx[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, tx TTx, expectedJob TArgs, opts *RequireInsertedOpts) *river.Job[TArgs] { +// +// Also takes a schema for testing purposes, which I haven't quite figured out +// how to get into the public API yet. +func requireInsertedTx[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, tx TTx, schema string, expectedJob TArgs, opts *RequireInsertedOpts) *river.Job[TArgs] { t.Helper() var driver TDriver - actualArgs, err := requireInsertedErr[TDriver](ctx, t, driver.UnwrapExecutor(tx), expectedJob, opts) + actualArgs, err := requireInsertedErr[TDriver](ctx, t, driver.UnwrapExecutor(tx), schema, expectedJob, opts) if err != nil { failure(t, "Internal failure: %s", err) } return actualArgs } -func requireInsertedErr[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, exec riverdriver.Executor, expectedJob TArgs, opts *RequireInsertedOpts) (*river.Job[TArgs], error) { +func requireInsertedErr[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, exec riverdriver.Executor, schema string, expectedJob TArgs, opts *RequireInsertedOpts) (*river.Job[TArgs], error) { t.Helper() // Returned ordered by ID. jobRows, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{expectedJob.Kind()}, - Schema: "", + Schema: schema, }) if err != nil { return nil, fmt.Errorf("error querying jobs: %w", err) @@ -198,12 +205,12 @@ func requireInsertedErr[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.Jo // the given opts. func RequireNotInserted[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, tb testing.TB, driver TDriver, expectedJob TArgs, opts *RequireInsertedOpts) { tb.Helper() - requireNotInserted(ctx, tb, driver, expectedJob, opts) + requireNotInserted(ctx, tb, driver, emptySchema, expectedJob, opts) } -func requireNotInserted[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, driver TDriver, expectedJob TArgs, opts *RequireInsertedOpts) { +func requireNotInserted[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, driver TDriver, schema string, expectedJob TArgs, opts *RequireInsertedOpts) { t.Helper() - err := requireNotInsertedErr[TDriver](ctx, t, driver.GetExecutor(), expectedJob, opts) + err := requireNotInsertedErr[TDriver](ctx, t, driver.GetExecutor(), schema, expectedJob, opts) if err != nil { failure(t, "Internal failure: %s", err) } @@ -227,27 +234,30 @@ func requireNotInserted[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.Jo // the given opts. func RequireNotInsertedTx[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, tb testing.TB, tx TTx, expectedJob TArgs, opts *RequireInsertedOpts) { tb.Helper() - requireNotInsertedTx[TDriver](ctx, tb, tx, expectedJob, opts) + requireNotInsertedTx[TDriver](ctx, tb, tx, emptySchema, expectedJob, opts) } // Internal function used by the tests so that the exported version can take // `testing.TB` instead of `testing.T`. -func requireNotInsertedTx[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, tx TTx, expectedJob TArgs, opts *RequireInsertedOpts) { +// +// Also takes a schema for testing purposes, which I haven't quite figured out +// how to get into the public API yet. +func requireNotInsertedTx[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, tx TTx, schema string, expectedJob TArgs, opts *RequireInsertedOpts) { t.Helper() var driver TDriver - err := requireNotInsertedErr[TDriver](ctx, t, driver.UnwrapExecutor(tx), expectedJob, opts) + err := requireNotInsertedErr[TDriver](ctx, t, driver.UnwrapExecutor(tx), schema, expectedJob, opts) if err != nil { failure(t, "Internal failure: %s", err) } } -func requireNotInsertedErr[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, exec riverdriver.Executor, expectedJob TArgs, opts *RequireInsertedOpts) error { +func requireNotInsertedErr[TDriver riverdriver.Driver[TTx], TTx any, TArgs river.JobArgs](ctx context.Context, t testingT, exec riverdriver.Executor, schema string, expectedJob TArgs, opts *RequireInsertedOpts) error { t.Helper() // Returned ordered by ID. jobRows, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: []string{expectedJob.Kind()}, - Schema: "", + Schema: schema, }) if err != nil { return fmt.Errorf("error querying jobs: %w", err) @@ -314,12 +324,12 @@ type ExpectedJob struct { // multiple times. func RequireManyInserted[TDriver riverdriver.Driver[TTx], TTx any](ctx context.Context, tb testing.TB, driver TDriver, expectedJobs []ExpectedJob) []*rivertype.JobRow { tb.Helper() - return requireManyInserted(ctx, tb, driver, expectedJobs) + return requireManyInserted(ctx, tb, driver, string(emptySchema), expectedJobs) } -func requireManyInserted[TDriver riverdriver.Driver[TTx], TTx any](ctx context.Context, t testingT, driver TDriver, expectedJobs []ExpectedJob) []*rivertype.JobRow { +func requireManyInserted[TDriver riverdriver.Driver[TTx], TTx any](ctx context.Context, t testingT, driver TDriver, schema string, expectedJobs []ExpectedJob) []*rivertype.JobRow { t.Helper() - actualArgs, err := requireManyInsertedErr[TDriver](ctx, t, driver.GetExecutor(), expectedJobs) + actualArgs, err := requireManyInsertedErr[TDriver](ctx, t, driver.GetExecutor(), schema, expectedJobs) if err != nil { failure(t, "Internal failure: %s", err) } @@ -348,22 +358,25 @@ func requireManyInserted[TDriver riverdriver.Driver[TTx], TTx any](ctx context.C // multiple times. func RequireManyInsertedTx[TDriver riverdriver.Driver[TTx], TTx any](ctx context.Context, tb testing.TB, tx TTx, expectedJobs []ExpectedJob) []*rivertype.JobRow { tb.Helper() - return requireManyInsertedTx[TDriver](ctx, tb, tx, expectedJobs) + return requireManyInsertedTx[TDriver](ctx, tb, tx, emptySchema, expectedJobs) } // Internal function used by the tests so that the exported version can take // `testing.TB` instead of `testing.T`. -func requireManyInsertedTx[TDriver riverdriver.Driver[TTx], TTx any](ctx context.Context, t testingT, tx TTx, expectedJobs []ExpectedJob) []*rivertype.JobRow { +// +// Also takes a schema for testing purposes, which I haven't quite figured out +// how to get into the public API yet. +func requireManyInsertedTx[TDriver riverdriver.Driver[TTx], TTx any](ctx context.Context, t testingT, tx TTx, schema string, expectedJobs []ExpectedJob) []*rivertype.JobRow { t.Helper() var driver TDriver - actualArgs, err := requireManyInsertedErr[TDriver](ctx, t, driver.UnwrapExecutor(tx), expectedJobs) + actualArgs, err := requireManyInsertedErr[TDriver](ctx, t, driver.UnwrapExecutor(tx), schema, expectedJobs) if err != nil { failure(t, "Internal failure: %s", err) } return actualArgs } -func requireManyInsertedErr[TDriver riverdriver.Driver[TTx], TTx any](ctx context.Context, t testingT, exec riverdriver.Executor, expectedJobs []ExpectedJob) ([]*rivertype.JobRow, error) { +func requireManyInsertedErr[TDriver riverdriver.Driver[TTx], TTx any](ctx context.Context, t testingT, exec riverdriver.Executor, schema string, expectedJobs []ExpectedJob) ([]*rivertype.JobRow, error) { t.Helper() expectedArgsKinds := sliceutil.Map(expectedJobs, func(j ExpectedJob) string { return j.Args.Kind() }) @@ -371,7 +384,7 @@ func requireManyInsertedErr[TDriver riverdriver.Driver[TTx], TTx any](ctx contex // Returned ordered by ID. jobRows, err := exec.JobGetByKindMany(ctx, &riverdriver.JobGetByKindManyParams{ Kind: expectedArgsKinds, - Schema: "", + Schema: schema, }) if err != nil { return nil, fmt.Errorf("error querying jobs: %w", err) diff --git a/rivertest/rivertest_test.go b/rivertest/rivertest_test.go index c51a59db..4bc6d043 100644 --- a/rivertest/rivertest_test.go +++ b/rivertest/rivertest_test.go @@ -12,8 +12,9 @@ import ( "github.com/stretchr/testify/require" "github.com/riverqueue/river" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver/riverpgxv5" + "github.com/riverqueue/river/rivershared/riversharedtest" "github.com/riverqueue/river/rivertype" ) @@ -41,20 +42,30 @@ func TestRequireInserted(t *testing.T) { type testBundle struct { dbPool *pgxpool.Pool + driver *riverpgxv5.Driver mockT *MockT + schema string } setup := func(t *testing.T) (*river.Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) - riverClient, err := river.NewClient(riverpgxv5.New(dbPool), &river.Config{}) + riverClient, err := river.NewClient(driver, &river.Config{ + Schema: schema, + }) require.NoError(t, err) return riverClient, &testBundle{ dbPool: dbPool, + driver: driver, mockT: NewMockT(t), + schema: schema, } } @@ -66,7 +77,7 @@ func TestRequireInserted(t *testing.T) { _, err := riverClient.Insert(ctx, Job1Args{String: "foo"}, nil) require.NoError(t, err) - job := requireInserted(ctx, t, riverpgxv5.New(bundle.dbPool), &Job1Args{}, nil) + job := requireInserted(ctx, t, bundle.driver, bundle.schema, &Job1Args{}, nil) require.False(t, bundle.mockT.Failed) require.Equal(t, "foo", job.Args.String) }) @@ -90,7 +101,7 @@ func TestRequireInsertedTx(t *testing.T) { return riverClient, &testBundle{ mockT: NewMockT(t), - tx: riverinternaltest.TestTx(ctx, t), + tx: riverdbtest.TestTxPgx(ctx, t), } } @@ -102,7 +113,7 @@ func TestRequireInsertedTx(t *testing.T) { _, err := riverClient.InsertTx(ctx, bundle.tx, Job1Args{String: "foo"}, nil) require.NoError(t, err) - job := requireInsertedTx[*riverpgxv5.Driver](ctx, t, bundle.tx, &Job1Args{}, nil) + job := requireInsertedTx[*riverpgxv5.Driver](ctx, t, bundle.tx, emptySchema, &Job1Args{}, nil) require.False(t, bundle.mockT.Failed) require.Equal(t, "foo", job.Args.String) }) @@ -118,11 +129,11 @@ func TestRequireInsertedTx(t *testing.T) { _, err = riverClient.InsertTx(ctx, bundle.tx, Job2Args{Int: 123}, nil) require.NoError(t, err) - job1 := requireInsertedTx[*riverpgxv5.Driver](ctx, t, bundle.tx, &Job1Args{}, nil) + job1 := requireInsertedTx[*riverpgxv5.Driver](ctx, t, bundle.tx, emptySchema, &Job1Args{}, nil) require.False(t, bundle.mockT.Failed) require.Equal(t, "foo", job1.Args.String) - job2 := requireInsertedTx[*riverpgxv5.Driver](ctx, t, bundle.tx, &Job2Args{}, nil) + job2 := requireInsertedTx[*riverpgxv5.Driver](ctx, t, bundle.tx, emptySchema, &Job2Args{}, nil) require.False(t, bundle.mockT.Failed) require.Equal(t, 123, job2.Args.Int) }) @@ -133,18 +144,18 @@ func TestRequireInsertedTx(t *testing.T) { riverClient, bundle := setup(t) // Start a second transaction with different visibility. - otherTx := riverinternaltest.TestTx(ctx, t) + otherTx := riverdbtest.TestTxPgx(ctx, t) _, err := riverClient.InsertTx(ctx, bundle.tx, Job1Args{String: "foo"}, nil) require.NoError(t, err) // Visible in the original transaction. - job := requireInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, &Job1Args{}, nil) + job := requireInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, &Job1Args{}, nil) require.False(t, bundle.mockT.Failed) require.Equal(t, "foo", job.Args.String) // Not visible in the second transaction. - _ = requireInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, otherTx, &Job1Args{}, nil) + _ = requireInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, otherTx, emptySchema, &Job1Args{}, nil) require.True(t, bundle.mockT.Failed) }) @@ -156,7 +167,7 @@ func TestRequireInsertedTx(t *testing.T) { _, err := riverClient.InsertTx(ctx, bundle.tx, Job1Args{String: "foo"}, nil) require.NoError(t, err) - _ = requireInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, &Job2Args{}, nil) + _ = requireInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, &Job2Args{}, nil) require.True(t, bundle.mockT.Failed) require.Equal(t, failureString("No jobs found with kind: job2")+"\n", @@ -174,7 +185,7 @@ func TestRequireInsertedTx(t *testing.T) { }) require.NoError(t, err) - _ = requireInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, &Job1Args{}, nil) + _ = requireInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, &Job1Args{}, nil) require.True(t, bundle.mockT.Failed) require.Equal(t, failureString("More than one job found with kind: job1 (you might want RequireManyInserted instead)")+"\n", @@ -189,7 +200,7 @@ func TestRequireInsertedTx(t *testing.T) { _, err := riverClient.InsertTx(ctx, bundle.tx, Job2Args{Int: 123}, nil) require.NoError(t, err) - _ = requireInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, &Job1Args{}, nil) + _ = requireInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, &Job1Args{}, nil) require.True(t, bundle.mockT.Failed) require.Equal(t, failureString("No jobs found with kind: job1")+"\n", @@ -229,7 +240,7 @@ func TestRequireInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := sameOpts() opts.MaxAttempts = 77 - _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' max attempts 78 not equal to expected 77")+"\n", @@ -240,7 +251,7 @@ func TestRequireInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := sameOpts() opts.Priority = 3 - _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' priority 2 not equal to expected 3")+"\n", @@ -251,7 +262,7 @@ func TestRequireInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := sameOpts() opts.Queue = "wrong_queue" - _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' queue 'another_queue' not equal to expected 'wrong_queue'")+"\n", @@ -262,7 +273,7 @@ func TestRequireInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := sameOpts() opts.ScheduledAt = testTime.Add(3*time.Minute + 23*time.Second + 123*time.Microsecond) - _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' scheduled at 2023-10-30T10:45:23.000123Z not equal to expected 2023-10-30T10:48:46.000246Z")+"\n", @@ -273,7 +284,7 @@ func TestRequireInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := sameOpts() opts.State = rivertype.JobStateCancelled - _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' state 'scheduled' not equal to expected 'cancelled'")+"\n", @@ -284,7 +295,7 @@ func TestRequireInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := sameOpts() opts.Tags = []string{"tag2"} - _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' tags [tag1] not equal to expected [tag2]")+"\n", @@ -296,7 +307,7 @@ func TestRequireInsertedTx(t *testing.T) { opts := emptyOpts() opts.MaxAttempts = job.MaxAttempts opts.Priority = job.Priority - _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.False(t, mockT.Failed, "Should have succeeded, but failed with: "+mockT.LogOutput()) }) @@ -305,7 +316,7 @@ func TestRequireInsertedTx(t *testing.T) { opts := sameOpts() opts.MaxAttempts = 77 opts.Priority = 3 - _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + _ = requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' max attempts 78 not equal to expected 77, priority 2 not equal to expected 3")+"\n", @@ -315,7 +326,7 @@ func TestRequireInsertedTx(t *testing.T) { t.Run("AllSameSucceeds", func(t *testing.T) { mockT := NewMockT(t) opts := sameOpts() - requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + requireInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.False(t, mockT.Failed) }) }) @@ -330,20 +341,30 @@ func TestRequireNotInserted(t *testing.T) { type testBundle struct { dbPool *pgxpool.Pool + driver *riverpgxv5.Driver mockT *MockT + schema string } setup := func(t *testing.T) (*river.Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) - riverClient, err := river.NewClient(riverpgxv5.New(dbPool), &river.Config{}) + riverClient, err := river.NewClient(driver, &river.Config{ + Schema: schema, + }) require.NoError(t, err) return riverClient, &testBundle{ dbPool: dbPool, + driver: driver, mockT: NewMockT(t), + schema: schema, } } @@ -355,7 +376,7 @@ func TestRequireNotInserted(t *testing.T) { _, err := riverClient.Insert(ctx, Job2Args{Int: 123}, nil) require.NoError(t, err) - requireNotInserted(ctx, t, riverpgxv5.New(bundle.dbPool), &Job1Args{}, nil) + requireNotInserted(ctx, t, bundle.driver, bundle.schema, &Job1Args{}, nil) require.False(t, bundle.mockT.Failed) }) } @@ -378,7 +399,7 @@ func TestRequireNotInsertedTx(t *testing.T) { return riverClient, &testBundle{ mockT: NewMockT(t), - tx: riverinternaltest.TestTx(ctx, t), + tx: riverdbtest.TestTxPgx(ctx, t), } } @@ -390,7 +411,7 @@ func TestRequireNotInsertedTx(t *testing.T) { _, err := riverClient.InsertTx(ctx, bundle.tx, Job2Args{Int: 123}, nil) require.NoError(t, err) - requireNotInsertedTx[*riverpgxv5.Driver](ctx, t, bundle.tx, &Job1Args{}, nil) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, t, bundle.tx, emptySchema, &Job1Args{}, nil) require.False(t, bundle.mockT.Failed) }) @@ -399,10 +420,10 @@ func TestRequireNotInsertedTx(t *testing.T) { _, bundle := setup(t) - requireNotInsertedTx[*riverpgxv5.Driver](ctx, t, bundle.tx, &Job1Args{}, nil) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, t, bundle.tx, emptySchema, &Job1Args{}, nil) require.False(t, bundle.mockT.Failed) - requireNotInsertedTx[*riverpgxv5.Driver](ctx, t, bundle.tx, &Job2Args{}, nil) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, t, bundle.tx, emptySchema, &Job2Args{}, nil) require.False(t, bundle.mockT.Failed) }) @@ -412,17 +433,17 @@ func TestRequireNotInsertedTx(t *testing.T) { riverClient, bundle := setup(t) // Start a second transaction with different visibility. - otherTx := riverinternaltest.TestTx(ctx, t) + otherTx := riverdbtest.TestTxPgx(ctx, t) _, err := riverClient.InsertTx(ctx, bundle.tx, Job1Args{String: "foo"}, nil) require.NoError(t, err) // Not visible in the second transaction. - requireNotInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, otherTx, &Job1Args{}, nil) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, otherTx, emptySchema, &Job1Args{}, nil) require.False(t, bundle.mockT.Failed) // Visible in the original transaction. - requireNotInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, &Job1Args{}, nil) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, &Job1Args{}, nil) require.True(t, bundle.mockT.Failed) }) @@ -431,7 +452,7 @@ func TestRequireNotInsertedTx(t *testing.T) { _, bundle := setup(t) - requireNotInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, &Job2Args{}, nil) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, &Job2Args{}, nil) require.False(t, bundle.mockT.Failed) }) @@ -446,7 +467,7 @@ func TestRequireNotInsertedTx(t *testing.T) { }) require.NoError(t, err) - requireNotInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, &Job1Args{}, nil) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, &Job1Args{}, nil) require.True(t, bundle.mockT.Failed) require.Equal(t, failureString("2 jobs found with kind, but expected to find none: job1")+"\n", @@ -461,7 +482,7 @@ func TestRequireNotInsertedTx(t *testing.T) { _, err := riverClient.InsertTx(ctx, bundle.tx, Job2Args{Int: 123}, nil) require.NoError(t, err) - requireNotInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, &Job1Args{}, nil) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, &Job1Args{}, nil) require.False(t, bundle.mockT.Failed) }) @@ -498,7 +519,7 @@ func TestRequireNotInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := emptyOpts() opts.MaxAttempts = job.MaxAttempts - requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' max attempts equal to excluded %d", job.MaxAttempts)+"\n", @@ -509,7 +530,7 @@ func TestRequireNotInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := emptyOpts() opts.Priority = job.Priority - requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' priority equal to excluded %d", job.Priority)+"\n", @@ -520,7 +541,7 @@ func TestRequireNotInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := emptyOpts() opts.Queue = job.Queue - requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' queue equal to excluded '%s'", job.Queue)+"\n", @@ -531,7 +552,7 @@ func TestRequireNotInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := emptyOpts() opts.ScheduledAt = job.ScheduledAt - requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' scheduled at equal to excluded %s", opts.ScheduledAt.Format(rfc3339Micro))+"\n", @@ -542,7 +563,7 @@ func TestRequireNotInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := emptyOpts() opts.State = job.State - requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' state equal to excluded '%s'", job.State)+"\n", @@ -553,7 +574,7 @@ func TestRequireNotInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := emptyOpts() opts.Tags = job.Tags - requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' tags equal to excluded %+v", job.Tags)+"\n", @@ -565,7 +586,7 @@ func TestRequireNotInsertedTx(t *testing.T) { opts := emptyOpts() opts.MaxAttempts = job.MaxAttempts // one property matches job, but the other does not opts.Priority = 3 - requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.False(t, mockT.Failed, "Should have succeeded, but failed with: "+mockT.LogOutput()) }) @@ -574,7 +595,7 @@ func TestRequireNotInsertedTx(t *testing.T) { opts := emptyOpts() opts.MaxAttempts = job.MaxAttempts opts.Priority = job.Priority - requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' max attempts equal to excluded %d, priority equal to excluded %d", job.MaxAttempts, job.Priority)+"\n", @@ -584,7 +605,7 @@ func TestRequireNotInsertedTx(t *testing.T) { t.Run("AllSameFails", func(t *testing.T) { mockT := NewMockT(t) opts := sameOpts() - requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' max attempts equal to excluded %d, priority equal to excluded %d, queue equal to excluded '%s', scheduled at equal to excluded %s, state equal to excluded '%s', tags equal to excluded %+v", job.MaxAttempts, job.Priority, job.Queue, job.ScheduledAt.Format(rfc3339Micro), job.State, job.Tags)+"\n", @@ -600,7 +621,7 @@ func TestRequireNotInsertedTx(t *testing.T) { mockT := NewMockT(t) opts := emptyOpts() opts.Priority = 3 - requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, &Job2Args{}, opts) + requireNotInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, &Job2Args{}, opts) require.True(t, mockT.Failed) require.Equal(t, failureString("Job with kind 'job2' priority equal to excluded %d", 3)+"\n", @@ -618,20 +639,30 @@ func TestRequireManyInserted(t *testing.T) { type testBundle struct { dbPool *pgxpool.Pool + driver *riverpgxv5.Driver mockT *MockT + schema string } setup := func(t *testing.T) (*river.Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + ) - riverClient, err := river.NewClient(riverpgxv5.New(dbPool), &river.Config{}) + riverClient, err := river.NewClient(driver, &river.Config{ + Schema: schema, + }) require.NoError(t, err) return riverClient, &testBundle{ dbPool: dbPool, + driver: driver, mockT: NewMockT(t), + schema: schema, } } @@ -643,7 +674,7 @@ func TestRequireManyInserted(t *testing.T) { _, err := riverClient.Insert(ctx, Job1Args{String: "foo"}, nil) require.NoError(t, err) - jobs := requireManyInserted(ctx, bundle.mockT, riverpgxv5.New(bundle.dbPool), []ExpectedJob{ + jobs := requireManyInserted(ctx, bundle.mockT, bundle.driver, bundle.schema, []ExpectedJob{ {Args: &Job1Args{}}, }) require.False(t, bundle.mockT.Failed) @@ -669,7 +700,7 @@ func TestRequireManyInsertedTx(t *testing.T) { return riverClient, &testBundle{ mockT: NewMockT(t), - tx: riverinternaltest.TestTx(ctx, t), + tx: riverdbtest.TestTxPgx(ctx, t), } } @@ -681,7 +712,7 @@ func TestRequireManyInsertedTx(t *testing.T) { _, err := riverClient.InsertTx(ctx, bundle.tx, Job1Args{String: "foo"}, nil) require.NoError(t, err) - jobs := requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, []ExpectedJob{ + jobs := requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, []ExpectedJob{ {Args: &Job1Args{}}, }) require.False(t, bundle.mockT.Failed) @@ -694,20 +725,20 @@ func TestRequireManyInsertedTx(t *testing.T) { riverClient, bundle := setup(t) // Start a second transaction with different visibility. - otherTx := riverinternaltest.TestTx(ctx, t) + otherTx := riverdbtest.TestTxPgx(ctx, t) _, err := riverClient.InsertTx(ctx, bundle.tx, Job1Args{String: "foo"}, nil) require.NoError(t, err) // Visible in the original transaction. - jobs := requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, []ExpectedJob{ + jobs := requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, []ExpectedJob{ {Args: &Job1Args{}}, }) require.False(t, bundle.mockT.Failed) require.Equal(t, "job1", jobs[0].Kind) // Not visible in the second transaction. - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, otherTx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, otherTx, emptySchema, []ExpectedJob{ {Args: &Job1Args{}}, }) require.True(t, bundle.mockT.Failed) @@ -724,7 +755,7 @@ func TestRequireManyInsertedTx(t *testing.T) { _, err = riverClient.InsertTx(ctx, bundle.tx, Job2Args{Int: 123}, nil) require.NoError(t, err) - jobs := requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, []ExpectedJob{ + jobs := requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, []ExpectedJob{ {Args: &Job1Args{}}, {Args: &Job2Args{}}, }) @@ -744,7 +775,7 @@ func TestRequireManyInsertedTx(t *testing.T) { }) require.NoError(t, err) - jobs := requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, []ExpectedJob{ + jobs := requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, []ExpectedJob{ {Args: &Job1Args{}}, {Args: &Job1Args{}}, }) @@ -767,7 +798,7 @@ func TestRequireManyInsertedTx(t *testing.T) { }) require.NoError(t, err) - jobs := requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, []ExpectedJob{ + jobs := requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, []ExpectedJob{ {Args: &Job1Args{}}, {Args: &Job1Args{}}, {Args: &Job2Args{}}, @@ -791,7 +822,7 @@ func TestRequireManyInsertedTx(t *testing.T) { _, err := riverClient.InsertTx(ctx, bundle.tx, Job1Args{String: "foo"}, nil) require.NoError(t, err) - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, []ExpectedJob{ { Args: &Job1Args{}, Opts: &RequireInsertedOpts{ @@ -813,7 +844,7 @@ func TestRequireManyInsertedTx(t *testing.T) { }) require.NoError(t, err) - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, []ExpectedJob{ { Args: &Job2Args{}, Opts: &RequireInsertedOpts{ @@ -833,7 +864,7 @@ func TestRequireManyInsertedTx(t *testing.T) { _, bundle := setup(t) - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, []ExpectedJob{ {Args: &Job1Args{}}, }) require.True(t, bundle.mockT.Failed) @@ -853,7 +884,7 @@ func TestRequireManyInsertedTx(t *testing.T) { }) require.NoError(t, err) - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, []ExpectedJob{ {Args: &Job1Args{}}, }) require.True(t, bundle.mockT.Failed) @@ -873,7 +904,7 @@ func TestRequireManyInsertedTx(t *testing.T) { }) require.NoError(t, err) - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, []ExpectedJob{ {Args: &Job1Args{}}, {Args: &Job2Args{}}, }) @@ -897,7 +928,7 @@ func TestRequireManyInsertedTx(t *testing.T) { }) require.NoError(t, err) - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, bundle.mockT, bundle.tx, emptySchema, []ExpectedJob{ {Args: &Job1Args{}}, {Args: &Job1Args{}}, {Args: &Job2Args{}}, @@ -928,7 +959,7 @@ func TestRequireManyInsertedTx(t *testing.T) { // Max attempts { mockT := NewMockT(t) - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, []ExpectedJob{ { Args: &Job2Args{}, Opts: &RequireInsertedOpts{ @@ -950,7 +981,7 @@ func TestRequireManyInsertedTx(t *testing.T) { // Priority { mockT := NewMockT(t) - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, []ExpectedJob{ { Args: &Job2Args{}, Opts: &RequireInsertedOpts{ @@ -972,7 +1003,7 @@ func TestRequireManyInsertedTx(t *testing.T) { // Queue { mockT := NewMockT(t) - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, []ExpectedJob{ { Args: &Job2Args{}, Opts: &RequireInsertedOpts{ @@ -994,7 +1025,7 @@ func TestRequireManyInsertedTx(t *testing.T) { // Scheduled at { mockT := NewMockT(t) - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, []ExpectedJob{ { Args: &Job2Args{}, Opts: &RequireInsertedOpts{ @@ -1016,7 +1047,7 @@ func TestRequireManyInsertedTx(t *testing.T) { // State { mockT := NewMockT(t) - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, []ExpectedJob{ { Args: &Job2Args{}, Opts: &RequireInsertedOpts{ @@ -1038,7 +1069,7 @@ func TestRequireManyInsertedTx(t *testing.T) { // Tags { mockT := NewMockT(t) - _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, []ExpectedJob{ + _ = requireManyInsertedTx[*riverpgxv5.Driver](ctx, mockT, bundle.tx, emptySchema, []ExpectedJob{ { Args: &Job2Args{}, Opts: &RequireInsertedOpts{ diff --git a/rivertest/worker.go b/rivertest/worker.go index 4a8c3400..fc377987 100644 --- a/rivertest/worker.go +++ b/rivertest/worker.go @@ -144,7 +144,7 @@ func (w *Worker[T, TTx]) workJob(ctx context.Context, tb testing.TB, tx TTx, job } else { archetype.Time = &baseservice.TimeGeneratorWithStubWrapper{TimeGenerator: timeGen} } - completer := jobcompleter.NewInlineCompleter(archetype, exec, w.client.Pilot(), subscribeCh) + completer := jobcompleter.NewInlineCompleter(archetype, w.config.Schema, exec, w.client.Pilot(), subscribeCh) for _, hook := range w.config.Hooks { if withBaseService, ok := hook.(baseservice.WithBaseService); ok { diff --git a/rivertest/worker_test.go b/rivertest/worker_test.go index d8fc4724..77816bf6 100644 --- a/rivertest/worker_test.go +++ b/rivertest/worker_test.go @@ -11,7 +11,7 @@ import ( "github.com/riverqueue/river" "github.com/riverqueue/river/internal/execution" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/baseservice" @@ -51,7 +51,7 @@ func TestWorker_NewWorker(t *testing.T) { return &testBundle{ config: &river.Config{ID: "rivertest-worker"}, driver: riverpgxv5.New(nil), - tx: riverinternaltest.TestTx(ctx, t), + tx: riverdbtest.TestTxPgx(ctx, t), } } @@ -85,7 +85,7 @@ func TestWorker_Work(t *testing.T) { var ( config = &river.Config{ID: "rivertest-worker"} driver = riverpgxv5.New(nil) - tx = riverinternaltest.TestTx(ctx, t) + tx = riverdbtest.TestTxPgx(ctx, t) ) return &testBundle{ @@ -440,7 +440,7 @@ func TestWorker_WorkJob(t *testing.T) { client: client, config: config, driver: driver, - tx: riverinternaltest.TestTx(ctx, t), + tx: riverdbtest.TestTxPgx(ctx, t), workFunc: func(ctx context.Context, job *river.Job[testArgs]) error { return nil }, } diff --git a/rivertype/river_type.go b/rivertype/river_type.go index d1bbda48..6cd29718 100644 --- a/rivertype/river_type.go +++ b/rivertype/river_type.go @@ -397,8 +397,8 @@ type WorkerMiddleware interface { // Work is invoked after a job's JSON args being unmarshaled and before the // job is worked. Implementations must always include a call to doInner to - // call down the middleware stack and perform the batch insertion, and may run - // custom code before and after. + // call down the middleware stack and perform the batch insertion, and may + // run custom code before and after. // // Returning an error from this function will fail the overarching work // operation, even if the inner work originally succeeded. diff --git a/subscription_manager_test.go b/subscription_manager_test.go index 8761d79f..7ac732ef 100644 --- a/subscription_manager_test.go +++ b/subscription_manager_test.go @@ -10,7 +10,7 @@ import ( "github.com/riverqueue/river/internal/jobcompleter" "github.com/riverqueue/river/internal/jobstats" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" "github.com/riverqueue/river/riverdriver" "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riversharedtest" @@ -34,7 +34,7 @@ func Test_SubscriptionManager(t *testing.T) { setup := func(t *testing.T) (*subscriptionManager, *testBundle) { t.Helper() - tx := riverinternaltest.TestTx(ctx, t) + tx := riverdbtest.TestTxPgx(ctx, t) exec := riverpgxv5.New(nil).UnwrapExecutor(tx) subscribeCh := make(chan []jobcompleter.CompleterJobUpdated, 1) diff --git a/worker_test.go b/worker_test.go index a6cbc0bd..93dac89a 100644 --- a/worker_test.go +++ b/worker_test.go @@ -7,7 +7,8 @@ import ( "github.com/jackc/pgx/v5" "github.com/stretchr/testify/require" - "github.com/riverqueue/river/internal/riverinternaltest" + "github.com/riverqueue/river/riverdbtest" + "github.com/riverqueue/river/riverdriver/riverpgxv5" "github.com/riverqueue/river/rivershared/riversharedtest" ) @@ -87,9 +88,15 @@ func TestWorkFunc(t *testing.T) { setup := func(t *testing.T) (*Client[pgx.Tx], *testBundle) { t.Helper() - dbPool := riverinternaltest.TestDB(ctx, t) + var ( + dbPool = riversharedtest.DBPool(ctx, t) + driver = riverpgxv5.New(dbPool) + schema = riverdbtest.TestSchema(ctx, t, driver, nil) + config = newTestConfig(t, nil) + ) + config.Schema = schema - client := newTestClient(t, dbPool, newTestConfig(t, nil)) + client := newTestClient(t, dbPool, config) startClient(ctx, t, client) return client, &testBundle{}