Skip to content

Commit bfe5ba0

Browse files
committed
Convert SQLite JSON columns to JSONB
Alright, so this one's been some cleanup that I've been intending for a while. Previously, sqlc made using JSONB in SQLite completely unusable because it sent back the binary format which you're explicitly not supposed to parse yourself. I ended up fixing this like a year ago in [1], but getting a sqlc release out the door took so long that I ended up forgetting about it. Here, modify our SQLite definitions so that JSON becomes JSONB, and add migration to convert existing installation values to the same. Luckily, this doesn't require a table rewrite because both JSON and JSONB are SQLite BLOB types. I didn't want to add a migration version just for SQLite, so to keep everything in sync I also added a version 7 for Postgres that's just a no-op with a comment. [1] sqlc-dev/sqlc#3968
1 parent a4143a0 commit bfe5ba0

17 files changed

Lines changed: 245 additions & 86 deletions

CHANGELOG.md

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,27 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
77

88
## [Unreleased]
99

10+
⚠️ Version 0.36.0 contains a new database migration, version 7, but running it is **only necessary if you're using SQLite**. It converts all uses of `json` to `jsonb`. If using Postgres, version 7 is a no-op. You can run it now or wait for another migration in the future and run the two together.
11+
12+
See [documentation on running River migrations](https://riverqueue.com/docs/migrations). If migrating with the CLI, make sure to update it to its latest version:
13+
14+
```shell
15+
go install github.com/riverqueue/river/cmd/river@latest
16+
river migrate-up --database-url "$DATABASE_URL"
17+
```
18+
19+
If not using River's internal migration system, the raw SQL can alternatively be dumped with:
20+
21+
```shell
22+
go install github.com/riverqueue/river/cmd/river@latest
23+
river migrate-get --database-url sqlite:// --version 6 --up > river7.up.sql
24+
river migrate-get --database-url sqlite:// --version 6 --down > river7.down.sql
25+
```
26+
27+
### Changed
28+
29+
- Convert SQLite JSON columns to JSONB (including migration). [PR #1224](https://github.com/riverqueue/river/pull/1224).
30+
1031
## [0.35.0] - 2026-04-18
1132

1233
### Changed

riverdriver/river_driver_interface.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -883,7 +883,7 @@ func MigrationLineMainTruncateTables(version int) []string {
883883
return []string{"river_job", "river_leader"}
884884
case 4:
885885
return []string{"river_job", "river_leader", "river_queue"}
886-
case 0, 5, 6:
886+
case 0, 5, 6, 7:
887887
return []string{"river_job", "river_leader", "river_queue", "river_client", "river_client_queue"}
888888
}
889889

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
-- No-op migration to keep version numbers in sync across all drivers.
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
-- No-op migration to keep version numbers in sync across all drivers.
2+
-- The SQLite driver uses this version for a JSONB conversion.

riverdriver/riverdrivertest/migration.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -70,6 +70,8 @@ func exerciseMigration[TTx any](ctx context.Context, t *testing.T,
7070
driver.GetMigrationTruncateTables(riverdriver.MigrationLineMain, 5))
7171
require.Equal(t, []string{"river_job", "river_leader", "river_queue", "river_client", "river_client_queue"},
7272
driver.GetMigrationTruncateTables(riverdriver.MigrationLineMain, 6))
73+
require.Equal(t, []string{"river_job", "river_leader", "river_queue", "river_client", "river_client_queue"},
74+
driver.GetMigrationTruncateTables(riverdriver.MigrationLineMain, 7))
7375
require.Equal(t, []string{"river_job", "river_leader", "river_queue", "river_client", "river_client_queue"},
7476
driver.GetMigrationTruncateTables(riverdriver.MigrationLineMain, 0))
7577
})
Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
-- No-op migration to keep version numbers in sync across all drivers.
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
-- No-op migration to keep version numbers in sync across all drivers.
2+
-- The SQLite driver uses this version for a JSONB conversion.

riverdriver/riversqlite/internal/dbsqlc/river_client.sql

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
CREATE TABLE river_client (
22
id text PRIMARY KEY NOT NULL,
33
created_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
4-
metadata blob NOT NULL DEFAULT (json('{}')),
4+
metadata jsonb NOT NULL DEFAULT (jsonb('{}')),
55
paused_at timestamp,
66
updated_at timestamp NOT NULL,
77
CONSTRAINT name_length CHECK (length(id) > 0 AND length(id) < 128)

riverdriver/riversqlite/internal/dbsqlc/river_client_queue.sql

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ CREATE TABLE river_client_queue (
33
name text NOT NULL,
44
created_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
55
max_workers integer NOT NULL DEFAULT 0,
6-
metadata blob NOT NULL DEFAULT (json('{}')),
6+
metadata jsonb NOT NULL DEFAULT (jsonb('{}')),
77
num_jobs_completed integer NOT NULL DEFAULT 0,
88
num_jobs_running integer NOT NULL DEFAULT 0,
99
updated_at timestamp NOT NULL,

riverdriver/riversqlite/internal/dbsqlc/river_job.sql

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,20 @@
11
CREATE TABLE river_job (
22
id integer PRIMARY KEY, -- SQLite makes this autoincrementing automatically
3-
args blob NOT NULL DEFAULT '{}',
3+
args jsonb NOT NULL DEFAULT (jsonb('{}')),
44
attempt integer NOT NULL DEFAULT 0,
55
attempted_at timestamp,
6-
attempted_by blob, -- JSON array of strings
6+
attempted_by jsonb, -- JSON array of strings
77
created_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
8-
errors blob, -- JSON array of error objects
8+
errors jsonb, -- JSON array of error objects
99
finalized_at timestamp,
1010
kind text NOT NULL,
1111
max_attempts integer NOT NULL,
12-
metadata blob NOT NULL DEFAULT (json('{}')),
12+
metadata jsonb NOT NULL DEFAULT (jsonb('{}')),
1313
priority integer NOT NULL DEFAULT 1,
1414
queue text NOT NULL DEFAULT 'default',
1515
state text NOT NULL DEFAULT 'available',
1616
scheduled_at timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP,
17-
tags blob NOT NULL DEFAULT (json('[]')), -- JSON array of strings
17+
tags jsonb NOT NULL DEFAULT (jsonb('[]')), -- JSON array of strings
1818
unique_key blob,
1919
unique_states integer,
2020
CONSTRAINT finalized_or_finalized_at_null CHECK (
@@ -44,7 +44,7 @@ SET
4444
finalized_at = CASE WHEN state = 'running' THEN finalized_at ELSE coalesce(cast(sqlc.narg('now') AS text), datetime('now', 'subsec')) END,
4545
-- Mark the job as cancelled by query so that the rescuer knows not to
4646
-- rescue it, even if it gets stuck in the running state:
47-
metadata = json_set(metadata, '$.cancel_attempted_at', cast(@cancel_attempted_at AS text))
47+
metadata = jsonb_set(metadata, '$.cancel_attempted_at', cast(@cancel_attempted_at AS text))
4848
WHERE id = @id
4949
AND state NOT IN ('cancelled', 'completed', 'discarded')
5050
AND finalized_at IS NULL
@@ -219,12 +219,12 @@ INSERT INTO /* TEMPLATE: schema */river_job(
219219
coalesce(cast(sqlc.narg('created_at') AS text), datetime('now', 'subsec')),
220220
@kind,
221221
@max_attempts,
222-
json(cast(@metadata AS blob)),
222+
jsonb(@metadata),
223223
@priority,
224224
@queue,
225225
coalesce(cast(sqlc.narg('scheduled_at') AS text), datetime('now', 'subsec')),
226226
@state,
227-
json(cast(@tags AS blob)),
227+
jsonb(@tags),
228228
CASE WHEN length(cast(@unique_key AS blob)) = 0 THEN NULL ELSE @unique_key END,
229229
@unique_states
230230
)
@@ -265,12 +265,12 @@ INSERT INTO /* TEMPLATE: schema */river_job(
265265
coalesce(cast(sqlc.narg('created_at') AS text), datetime('now', 'subsec')),
266266
@kind,
267267
@max_attempts,
268-
json(cast(@metadata AS blob)),
268+
jsonb(@metadata),
269269
@priority,
270270
@queue,
271271
coalesce(cast(sqlc.narg('scheduled_at') AS text), datetime('now', 'subsec')),
272272
@state,
273-
json(cast(@tags AS blob)),
273+
jsonb(@tags),
274274
CASE WHEN length(cast(@unique_key AS blob)) = 0 THEN NULL ELSE @unique_key END,
275275
@unique_states
276276
)
@@ -313,18 +313,18 @@ INSERT INTO /* TEMPLATE: schema */river_job(
313313
@args,
314314
@attempt,
315315
cast(sqlc.narg('attempted_at') as text),
316-
CASE WHEN length(cast(@attempted_by AS blob)) = 0 THEN NULL ELSE json(@attempted_by) END,
316+
CASE WHEN length(cast(@attempted_by AS blob)) = 0 THEN NULL ELSE jsonb(@attempted_by) END,
317317
coalesce(cast(sqlc.narg('created_at') AS text), datetime('now', 'subsec')),
318318
CASE WHEN length(cast(@errors AS blob)) = 0 THEN NULL ELSE @errors END,
319319
cast(sqlc.narg('finalized_at') as text),
320320
@kind,
321321
@max_attempts,
322-
json(cast(@metadata AS blob)),
322+
jsonb(@metadata),
323323
@priority,
324324
@queue,
325325
coalesce(cast(sqlc.narg('scheduled_at') AS text), datetime('now', 'subsec')),
326326
@state,
327-
json(cast(@tags AS blob)),
327+
jsonb(@tags),
328328
CASE WHEN length(cast(@unique_key AS blob)) = 0 THEN NULL ELSE @unique_key END,
329329
@unique_states
330330
) RETURNING *;
@@ -358,10 +358,10 @@ LIMIT @max;
358358
-- name: JobRescue :exec
359359
UPDATE /* TEMPLATE: schema */river_job
360360
SET
361-
errors = json_insert(coalesce(errors, json('[]')), '$[#]', json(cast(@error AS blob))),
361+
errors = jsonb_insert(coalesce(errors, jsonb('[]')), '$[#]', jsonb(@error)),
362362
finalized_at = cast(sqlc.narg('finalized_at') as text),
363363
scheduled_at = @scheduled_at,
364-
metadata = json_set(
364+
metadata = jsonb_set(
365365
metadata,
366366
'$."river:rescue_count"',
367367
coalesce(
@@ -444,7 +444,7 @@ RETURNING *;
444444

445445
-- name: JobScheduleSetDiscarded :many
446446
UPDATE /* TEMPLATE: schema */river_job
447-
SET metadata = json_patch(metadata, json('{"unique_key_conflict": "scheduler_discarded"}')),
447+
SET metadata = jsonb_patch(metadata, jsonb('{"unique_key_conflict": "scheduler_discarded"}')),
448448
finalized_at = coalesce(cast(sqlc.narg('now') AS text), datetime('now', 'subsec')),
449449
state = 'discarded'
450450
WHERE id IN (sqlc.slice('id'))
@@ -454,7 +454,7 @@ RETURNING *;
454454
-- for JobSetStateIfRunning to use when falling back to non-running jobs.
455455
-- name: JobSetMetadataIfNotRunning :one
456456
UPDATE /* TEMPLATE: schema */river_job
457-
SET metadata = json_patch(metadata, json(cast(@metadata_updates AS blob)))
457+
SET metadata = jsonb_patch(metadata, jsonb(@metadata_updates))
458458
WHERE id = @id
459459
AND state != 'running'
460460
RETURNING *;
@@ -472,15 +472,15 @@ SET
472472
THEN @attempt
473473
ELSE attempt END,
474474
errors = CASE WHEN cast(@errors_do_update AS boolean)
475-
THEN json_insert(coalesce(errors, json('[]')), '$[#]', json(cast(@error AS blob)))
475+
THEN jsonb_insert(coalesce(errors, jsonb('[]')), '$[#]', jsonb(@error))
476476
ELSE errors END,
477477
finalized_at = CASE WHEN /* should_cancel */((@state = 'retryable' OR @state = 'scheduled') AND (metadata -> 'cancel_attempted_at') iS NOT NULL)
478478
THEN coalesce(cast(sqlc.narg('now') AS text), datetime('now', 'subsec'))
479479
WHEN cast(@finalized_at_do_update AS boolean)
480480
THEN @finalized_at
481481
ELSE finalized_at END,
482482
metadata = CASE WHEN cast(@metadata_do_merge AS boolean)
483-
THEN json_patch(metadata, json(cast(@metadata_updates AS blob)))
483+
THEN jsonb_patch(metadata, jsonb(@metadata_updates))
484484
ELSE metadata END,
485485
scheduled_at = CASE WHEN /* NOT should_cancel */(cast(@state AS text) <> 'retryable' AND @state <> 'scheduled' OR (metadata -> 'cancel_attempted_at') IS NULL) AND cast(@scheduled_at_do_update AS boolean)
486486
THEN @scheduled_at
@@ -495,7 +495,7 @@ RETURNING *;
495495
-- name: JobUpdate :one
496496
UPDATE /* TEMPLATE: schema */river_job
497497
SET
498-
metadata = CASE WHEN cast(@metadata_do_merge AS boolean) THEN json_patch(metadata, json(cast(@metadata AS blob))) ELSE metadata END
498+
metadata = CASE WHEN cast(@metadata_do_merge AS boolean) THEN jsonb_patch(metadata, jsonb(@metadata)) ELSE metadata END
499499
WHERE id = @id
500500
RETURNING *;
501501

@@ -510,7 +510,7 @@ SET
510510
errors = CASE WHEN cast(@errors_do_update AS boolean) THEN @errors ELSE errors END,
511511
finalized_at = CASE WHEN cast(@finalized_at_do_update AS boolean) THEN @finalized_at ELSE finalized_at END,
512512
max_attempts = CASE WHEN cast(@max_attempts_do_update AS boolean) THEN @max_attempts ELSE max_attempts END,
513-
metadata = CASE WHEN cast(@metadata_do_update AS boolean) THEN json(cast(@metadata AS blob)) ELSE metadata END,
513+
metadata = CASE WHEN cast(@metadata_do_update AS boolean) THEN jsonb(@metadata) ELSE metadata END,
514514
state = CASE WHEN cast(@state_do_update AS boolean) THEN @state ELSE state END
515515
WHERE id = @id
516516
RETURNING *;

0 commit comments

Comments
 (0)