Files & Blobs
Chunked browser Blob and stream storage using conventional files and file_parts tables.
Use plain s.bytes() when the binary value is small and should always load with its row.
For larger files, use the chunked storage described below.
Add the conventional tables
db.createFileFromBlob, db.createFileFromStream, db.loadFileAsBlob, and
db.loadFileAsStream expect these exact table and column names on app:
file_parts: s.table({
data: s.bytes(),
}),
files: s.table({
name: s.string().optional(),
mimeType: s.string(),
partIds: s.array(s.ref("file_parts")),
partSizes: s.array(s.int()),
}),
uploads: s.table({
owner_id: s.string(),
label: s.string(),
fileId: s.ref("files"),
}),file_parts.data stores the raw chunk bytes. files.partIds keeps the ordered chunk ids, and
files.partSizes stores each chunk's byte length in the same order.
In the example above, uploads.file is your app-owned reference to a stored file. files.name
is optional metadata; createFileFromBlob(...) fills it from File.name when available.
Add permissions
export const fileBlobPermissions = s.definePermissions(app, ({ policy, allowedTo, session }) => {
policy.uploads.allowRead.where({ owner_id: session.user_id });
policy.uploads.allowInsert.where({ owner_id: session.user_id });
policy.uploads.allowUpdate.where({ owner_id: session.user_id });
policy.uploads.allowDelete.where({ owner_id: session.user_id });
// Files are created before the parent upload row exists, so inserts are direct for now.
policy.files.allowInsert.where({});
policy.file_parts.allowInsert.where({});
policy.files.allowRead.where(allowedTo.readReferencing(policy.uploads, "fileId"));
policy.file_parts.allowRead.where(allowedTo.readReferencing(policy.files, "partIds"));
policy.files.allowDelete.where(allowedTo.deleteReferencing(policy.uploads, "fileId"));
policy.file_parts.allowDelete.where(allowedTo.deleteReferencing(policy.files, "partIds"));
});uploads owns access. files inherit read and delete access from uploads.file, and
file_parts inherit from files.partIds.
Insert permissions are direct for now
File parts and files are created before the parent upload row exists, so insert inheritance does not naturally apply yet. Currently, grant direct insert access to the clients that may upload, or perform uploads in a trusted backend context.
Files are write-once, so files and file_parts normally do not need update
policies.
Create from a blob
export async function createUploadFromBlob(db: Db, blob: Blob | File) {
const file = await db.createFileFromBlob(app, blob, { tier: "edge" });
return db
.insert(app.uploads, {
owner_id: EXAMPLE_OWNER_ID,
label: "Profile photo",
fileId: file.id,
})
.wait({ tier: "edge" });
}pub async fn create_file_from_bytes(
client: &JazzClient,
data: &[u8],
name: Option<&str>,
mime_type: &str,
) -> jazz_tools::Result<ObjectId> {
let mut part_ids = Vec::new();
let mut part_sizes = Vec::new();
for chunk in data.chunks(CHUNK_SIZE) {
let (part_id, _) = client
.create(
"file_parts",
jazz_tools::row_input!("data" => chunk.to_vec()),
)
.await?;
part_ids.push(Value::Uuid(part_id));
part_sizes.push(Value::Integer(chunk.len() as i32));
}
let mut file_values = jazz_tools::row_input!(
"mimeType" => mime_type,
"partIds" => part_ids,
"partSizes" => part_sizes,
);
if let Some(name) = name {
file_values.insert("name".to_string(), name.into());
}
let (file_id, _) = client.create("files", file_values).await?;
Ok(file_id)
}pub async fn create_upload_from_bytes(
client: &JazzClient,
data: &[u8],
owner_id: &str,
) -> jazz_tools::Result<ObjectId> {
let file_id = create_file_from_bytes(client, data, Some("photo.jpg"), "image/jpeg").await?;
let (upload_id, _) = client
.create(
"uploads",
jazz_tools::row_input!(
"owner_id" => owner_id,
"label" => "Profile photo",
"fileId" => file_id,
),
)
.await?;
Ok(upload_id)
}Returns the file row; store its id on your own table.
Create from a stream
export async function createUploadFromStream(db: Db, stream: ReadableStream<Uint8Array>) {
const file = await db.createFileFromStream(app, stream, {
tier: "edge",
name: "camera.raw",
mimeType: "application/octet-stream",
});
return db
.insert(app.uploads, {
owner_id: EXAMPLE_OWNER_ID,
label: "Camera import",
fileId: file.id,
})
.wait({ tier: "edge" });
}Rust does not have a separate stream helper. Read your stream into chunks and create
file_parts rows as they arrive, using the same approach shown in Create from a
blob above.
Load as a blob
export async function loadUploadBlob(db: Db, uploadId: string) {
const upload = await db.one(app.uploads.where({ id: uploadId }), { tier: "edge" });
if (!upload) {
return null;
}
const blob = await db.loadFileAsBlob(app, upload.fileId, { tier: "edge" });
return blob;
}pub async fn load_file_bytes(
client: &JazzClient,
upload_id: ObjectId,
) -> jazz_tools::Result<Option<Vec<u8>>> {
let uploads = client
.query(
QueryBuilder::new("uploads")
.select(&["fileId"])
.filter_eq("_id", Value::Uuid(upload_id))
.build(),
Some(DurabilityTier::EdgeServer),
)
.await?;
let Some((_, row)) = uploads.first() else {
return Ok(None);
};
let Value::Uuid(file_id) = &row[0] else {
return Ok(None);
};
let files = client
.query(
QueryBuilder::new("files")
.select(&["partIds"])
.filter_eq("_id", Value::Uuid(*file_id))
.build(),
Some(DurabilityTier::EdgeServer),
)
.await?;
let Some((_, row)) = files.first() else {
return Ok(None);
};
let Value::Array(part_ids) = &row[0] else {
return Ok(None);
};
let mut data = Vec::new();
for part_ref in part_ids {
let Value::Uuid(part_id) = part_ref else {
continue;
};
let parts = client
.query(
QueryBuilder::new("file_parts")
.select(&["data"])
.filter_eq("_id", Value::Uuid(*part_id))
.build(),
Some(DurabilityTier::EdgeServer),
)
.await?;
if let Some((_, row)) = parts.first()
&& let Value::Bytea(chunk) = &row[0]
{
data.extend_from_slice(chunk);
}
}
Ok(Some(data))
}Load as a stream
export async function loadUploadStream(db: Db, uploadId: string) {
const upload = await db.one(app.uploads.where({ id: uploadId }), { tier: "edge" });
if (!upload) {
return null;
}
const stream = await db.loadFileAsStream(app, upload.fileId, { tier: "edge" });
return stream;
}To stream chunks rather than collecting all bytes, walk files.partIds and query each
file_parts row individually, yielding chunks as they arrive instead of concatenating
into a single Vec<u8>.
Both approaches fetch each chunk sequentially rather than eager-loading every part in one query.
Durability and incomplete local data
The file helpers forward normal Jazz durability options:
- TypeScript: pass
tiertocreateFileFromBlob(...)/createFileFromStream(...), or query options like{ tier: "edge" }toloadFileAsBlob(...)/loadFileAsStream(...). - Rust: pass
Some(DurabilityTier::EdgeServer)orSome(DurabilityTier::GlobalServer)toclient.query(...).
If the requested tier does not have the full file yet, the load fails when it reaches the missing
part. Use edge or global when you need the read to wait for a more complete remote snapshot
than the local store currently has.
No automatic cascade yet
Until file cascade delete lands, delete the chunk rows and the files row before deleting your
parent app row so the inherited delete policies still match:
export async function deleteUploadWithFile(db: Db, uploadId: string) {
const upload = await db.one(app.uploads.where({ id: uploadId }), { tier: "edge" });
if (!upload) {
return;
}
const file = await db.one(app.files.where({ id: upload.fileId }), { tier: "edge" });
if (file) {
// Delete chunks and the file while the parent upload row still exists.
for (const partId of file.partIds) {
db.delete(app.file_parts, partId);
}
db.delete(app.files, file.id);
}
db.delete(app.uploads, uploadId);
}pub async fn delete_upload_with_file(
client: &JazzClient,
upload_id: ObjectId,
) -> jazz_tools::Result<()> {
let uploads = client
.query(
QueryBuilder::new("uploads")
.select(&["fileId"])
.filter_eq("_id", Value::Uuid(upload_id))
.build(),
Some(DurabilityTier::EdgeServer),
)
.await?;
let Some((_, row)) = uploads.first() else {
return Ok(());
};
let Value::Uuid(file_id) = &row[0] else {
return Ok(());
};
let files = client
.query(
QueryBuilder::new("files")
.select(&["partIds"])
.filter_eq("_id", Value::Uuid(*file_id))
.build(),
Some(DurabilityTier::EdgeServer),
)
.await?;
if let Some((file_row_id, row)) = files.first() {
if let Value::Array(part_ids) = &row[0] {
// Delete chunks while the parent file row still exists.
for part_ref in part_ids {
if let Value::Uuid(part_id) = part_ref {
client.delete(*part_id).await?;
}
}
}
client.delete(*file_row_id).await?;
}
client.delete(upload_id).await?;
Ok(())
}Example app
See the file upload example for a complete React app with image upload, rendering, and deletion.