All checks were successful
CI / test (pull_request) Successful in 9m32s
Root cause of the 500 on create/update/delete: the non-root app user in
the Docker container lacked write permission to the volume mount point.
Docker volume mounts are owned by root by default; the app user (added
in a previous commit) could read the database but not write to it.
Fixes:
1. Dockerfile — RUN mkdir -p /app/data before chown so the directory
exists in the image with correct ownership. Docker uses this as a
seed when initialising a new named volume, ensuring the app user
owns the mount point from the start.
NOTE: existing volumes from before the non-root user was introduced
will still be root-owned. Fix with:
docker run --rm -v catalyst-data:/data alpine chown -R 1000:1000 /data
2. server/routes.js — replace bare `throw e` in POST/PUT catch blocks
with console.error (route context + error) + explicit 500 response.
Add try-catch to DELETE handler which previously had none. Unexpected
DB errors now log the route they came from and return a clean JSON
body instead of relying on the generic Express error handler.
3. server/db.js — wrap the boot init() call in try-catch. Fatal startup
errors (e.g. data directory not writable) now print a clear message
pointing to the cause before exiting, instead of a raw stack trace.
TDD: tests written first (RED), then fixed (GREEN). Six new tests in
tests/api.test.js verify that unexpected DB errors on POST, PUT, and
DELETE return 500 with { error: 'internal server error' } and call
console.error with the route context string.
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
146 lines
7.3 KiB
JavaScript
146 lines
7.3 KiB
JavaScript
import { DatabaseSync } from 'node:sqlite';
|
|
import { mkdirSync } from 'fs';
|
|
import { dirname, join } from 'path';
|
|
import { fileURLToPath } from 'url';
|
|
|
|
const __dirname = dirname(fileURLToPath(import.meta.url));
|
|
const DEFAULT_PATH = join(__dirname, '../data/catalyst.db');
|
|
|
|
let db;
|
|
|
|
function init(path) {
|
|
if (path !== ':memory:') {
|
|
mkdirSync(dirname(path), { recursive: true });
|
|
}
|
|
db = new DatabaseSync(path);
|
|
db.exec('PRAGMA journal_mode = WAL');
|
|
db.exec('PRAGMA foreign_keys = ON');
|
|
db.exec('PRAGMA synchronous = NORMAL');
|
|
createSchema();
|
|
if (path !== ':memory:') seed();
|
|
}
|
|
|
|
function createSchema() {
|
|
db.exec(`
|
|
CREATE TABLE IF NOT EXISTS instances (
|
|
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
name TEXT NOT NULL CHECK(length(name) BETWEEN 1 AND 100),
|
|
state TEXT NOT NULL DEFAULT 'deployed'
|
|
CHECK(state IN ('deployed','testing','degraded')),
|
|
stack TEXT NOT NULL DEFAULT 'development'
|
|
CHECK(stack IN ('production','development')),
|
|
vmid INTEGER NOT NULL UNIQUE CHECK(vmid > 0),
|
|
atlas INTEGER NOT NULL DEFAULT 0 CHECK(atlas IN (0,1)),
|
|
argus INTEGER NOT NULL DEFAULT 0 CHECK(argus IN (0,1)),
|
|
semaphore INTEGER NOT NULL DEFAULT 0 CHECK(semaphore IN (0,1)),
|
|
patchmon INTEGER NOT NULL DEFAULT 0 CHECK(patchmon IN (0,1)),
|
|
tailscale INTEGER NOT NULL DEFAULT 0 CHECK(tailscale IN (0,1)),
|
|
andromeda INTEGER NOT NULL DEFAULT 0 CHECK(andromeda IN (0,1)),
|
|
tailscale_ip TEXT NOT NULL DEFAULT '',
|
|
hardware_acceleration INTEGER NOT NULL DEFAULT 0 CHECK(hardware_acceleration IN (0,1)),
|
|
created_at TEXT NOT NULL DEFAULT (datetime('now')),
|
|
updated_at TEXT NOT NULL DEFAULT (datetime('now'))
|
|
);
|
|
CREATE INDEX IF NOT EXISTS idx_instances_state ON instances(state);
|
|
CREATE INDEX IF NOT EXISTS idx_instances_stack ON instances(stack);
|
|
`);
|
|
}
|
|
|
|
const SEED = [
|
|
{ name: 'plex', state: 'deployed', stack: 'production', vmid: 117, atlas: 1, argus: 1, semaphore: 0, patchmon: 1, tailscale: 1, andromeda: 0, tailscale_ip: '100.64.0.1', hardware_acceleration: 1 },
|
|
{ name: 'foldergram', state: 'testing', stack: 'development', vmid: 137, atlas: 0, argus: 0, semaphore: 0, patchmon: 0, tailscale: 0, andromeda: 0, tailscale_ip: '', hardware_acceleration: 0 },
|
|
{ name: 'homeassistant', state: 'deployed', stack: 'production', vmid: 102, atlas: 1, argus: 1, semaphore: 1, patchmon: 1, tailscale: 1, andromeda: 0, tailscale_ip: '100.64.0.5', hardware_acceleration: 0 },
|
|
{ name: 'gitea', state: 'deployed', stack: 'production', vmid: 110, atlas: 1, argus: 0, semaphore: 1, patchmon: 1, tailscale: 1, andromeda: 0, tailscale_ip: '100.64.0.8', hardware_acceleration: 0 },
|
|
{ name: 'postgres-primary', state: 'degraded', stack: 'production', vmid: 201, atlas: 1, argus: 1, semaphore: 0, patchmon: 1, tailscale: 0, andromeda: 1, tailscale_ip: '', hardware_acceleration: 0 },
|
|
{ name: 'nextcloud', state: 'testing', stack: 'development', vmid: 144, atlas: 0, argus: 0, semaphore: 0, patchmon: 0, tailscale: 1, andromeda: 0, tailscale_ip: '100.64.0.12', hardware_acceleration: 0 },
|
|
{ name: 'traefik', state: 'deployed', stack: 'production', vmid: 100, atlas: 1, argus: 1, semaphore: 0, patchmon: 1, tailscale: 1, andromeda: 0, tailscale_ip: '100.64.0.2', hardware_acceleration: 0 },
|
|
{ name: 'monitoring-stack', state: 'testing', stack: 'development', vmid: 155, atlas: 0, argus: 0, semaphore: 1, patchmon: 0, tailscale: 0, andromeda: 0, tailscale_ip: '', hardware_acceleration: 0 },
|
|
];
|
|
|
|
function seed() {
|
|
const count = db.prepare('SELECT COUNT(*) as n FROM instances').get().n;
|
|
if (count > 0) return;
|
|
const insert = db.prepare(`
|
|
INSERT INTO instances
|
|
(name, state, stack, vmid, atlas, argus, semaphore, patchmon,
|
|
tailscale, andromeda, tailscale_ip, hardware_acceleration)
|
|
VALUES
|
|
(@name, @state, @stack, @vmid, @atlas, @argus, @semaphore, @patchmon,
|
|
@tailscale, @andromeda, @tailscale_ip, @hardware_acceleration)
|
|
`);
|
|
db.exec('BEGIN');
|
|
for (const s of SEED) insert.run(s);
|
|
db.exec('COMMIT');
|
|
}
|
|
|
|
// ── Queries ───────────────────────────────────────────────────────────────────
|
|
|
|
export function getInstances(filters = {}) {
|
|
const parts = ['SELECT * FROM instances WHERE 1=1'];
|
|
const params = {};
|
|
if (filters.search) {
|
|
parts.push('AND (name LIKE @search OR CAST(vmid AS TEXT) LIKE @search OR stack LIKE @search)');
|
|
params.search = `%${filters.search}%`;
|
|
}
|
|
if (filters.state) { parts.push('AND state = @state'); params.state = filters.state; }
|
|
if (filters.stack) { parts.push('AND stack = @stack'); params.stack = filters.stack; }
|
|
parts.push('ORDER BY name ASC');
|
|
return db.prepare(parts.join(' ')).all(params);
|
|
}
|
|
|
|
export function getInstance(vmid) {
|
|
return db.prepare('SELECT * FROM instances WHERE vmid = ?').get(vmid) ?? null;
|
|
}
|
|
|
|
export function getDistinctStacks() {
|
|
return db.prepare(`SELECT DISTINCT stack FROM instances WHERE stack != '' ORDER BY stack`)
|
|
.all().map(r => r.stack);
|
|
}
|
|
|
|
// ── Mutations ─────────────────────────────────────────────────────────────────
|
|
|
|
export function createInstance(data) {
|
|
return db.prepare(`
|
|
INSERT INTO instances
|
|
(name, state, stack, vmid, atlas, argus, semaphore, patchmon,
|
|
tailscale, andromeda, tailscale_ip, hardware_acceleration)
|
|
VALUES
|
|
(@name, @state, @stack, @vmid, @atlas, @argus, @semaphore, @patchmon,
|
|
@tailscale, @andromeda, @tailscale_ip, @hardware_acceleration)
|
|
`).run(data);
|
|
}
|
|
|
|
export function updateInstance(vmid, data) {
|
|
return db.prepare(`
|
|
UPDATE instances SET
|
|
name=@name, state=@state, stack=@stack, vmid=@newVmid,
|
|
atlas=@atlas, argus=@argus, semaphore=@semaphore, patchmon=@patchmon,
|
|
tailscale=@tailscale, andromeda=@andromeda, tailscale_ip=@tailscale_ip,
|
|
hardware_acceleration=@hardware_acceleration, updated_at=datetime('now')
|
|
WHERE vmid=@vmid
|
|
`).run({ ...data, newVmid: data.vmid, vmid });
|
|
}
|
|
|
|
export function deleteInstance(vmid) {
|
|
return db.prepare('DELETE FROM instances WHERE vmid = ?').run(vmid);
|
|
}
|
|
|
|
// ── Test helpers ──────────────────────────────────────────────────────────────
|
|
|
|
export function _resetForTest() {
|
|
if (db) db.close();
|
|
init(':memory:');
|
|
}
|
|
|
|
// ── Boot ──────────────────────────────────────────────────────────────────────
|
|
|
|
const DB_PATH = process.env.DB_PATH ?? DEFAULT_PATH;
|
|
try {
|
|
init(DB_PATH);
|
|
} catch (e) {
|
|
console.error('[catalyst] fatal: could not open database at', DB_PATH);
|
|
console.error('[catalyst] ensure the data directory exists and is writable by the server process.');
|
|
console.error(e);
|
|
process.exit(1);
|
|
}
|