diff --git a/examples/src/dzi/double.tsx b/examples/src/dzi/double.tsx index 19bb3eb..f6d90c4 100644 --- a/examples/src/dzi/double.tsx +++ b/examples/src/dzi/double.tsx @@ -32,8 +32,19 @@ const exampleSettings: DziRenderSettings = { view: Box2D.create([0, 0], [1, 1]), }, }; - +/** + * HEY!!! + * this is an example React Component for rendering two DZI images which share a camera. + * Additionally, both images have an SVG overlay. + * This example is as bare-bones as possible! It is NOT the recommended way to do anything, its just trying to show + * one way of: + * 1. using our rendering utilities for DZI data, specifically in a react component. Your needs for state-management, + * SVG overlays, etc may all be different! + * + */ export function TwoClientsPOC() { + // the DZI renderer expects a "relative" camera - that means a box, from 0 to 1. 0 is the bottom or left of the image, + // and 1 is the top or right of the image, regardless of the aspect ratio of that image. const [view, setView] = useState(Box2D.create([0, 0], [1, 1])); const zoom = (e: React.WheelEvent) => { const scale = e.deltaY > 0 ? 1.1 : 0.9; diff --git a/examples/src/omezarr/app.tsx b/examples/src/omezarr/app.tsx index 4a93ecb..333dd2f 100644 --- a/examples/src/omezarr/app.tsx +++ b/examples/src/omezarr/app.tsx @@ -7,7 +7,15 @@ const demo_versa = 'https://neuroglancer-vis-prototype.s3.amazonaws.com/VERSA/sc export function AppUi() { return ; } - +/** + * HEY!!! + * this is an example React Component for rendering A single slice of an OMEZARR image in a react component + * This example is as bare-bones as possible! It is NOT the recommended way to do anything, its just trying to show + * one way of: + * 1. using our rendering utilities for OmeZarr data, specifically in a react component. Your needs for state-management, + * slicing logic, etc might all be different! + * + */ function DataPlease() { // load our canned data for now: const [omezarr, setfile] = useState(undefined); diff --git a/examples/src/omezarr/sliceview.tsx b/examples/src/omezarr/sliceview.tsx index de31c28..c662665 100644 --- a/examples/src/omezarr/sliceview.tsx +++ b/examples/src/omezarr/sliceview.tsx @@ -15,6 +15,8 @@ type Props = { }; const settings: RenderSettings = { tileSize: 256, + // in a "real" app, you'd most likely expose sliders to control how the data in the file + // gets mapped to pixel/color intensity on the screen. for now, we just use hardcoded data gamut: { R: { gamut: { min: 0, max: 80 }, index: 0 }, G: { gamut: { min: 0, max: 100 }, index: 1 }, @@ -23,10 +25,17 @@ const settings: RenderSettings = { plane: 'xy', planeIndex: 3, camera: { + // the omezarr renderer expects a box in whatever space is given by the omezarr file itself in its + // axes metadata = for example, millimeters. if you load a volume that says its 30mm X 30mm X 10mm, + // and you want to view XY slices and have them fit perfectly on your screen, then a box + // like [0,0],[30,30] would be appropriate! view: Box2D.create([0, 0], [250, 120]), screenSize: [500, 500], }, }; +// this example uses the RenderServer utility - this lets you render to canvas elements without having to +// initialize WebGL on that canvas itself, at a small cost to performance. the compose function is the configurable +// step used to get the pixels from WebGL to the target canvas. function compose(ctx: CanvasRenderingContext2D, image: ImageData) { ctx.putImageData(image, 0, 0); } @@ -50,7 +59,7 @@ export function SliceView(props: Props) { useEffect(() => { if (server && renderer.current && cnvs.current && omezarr) { - const hey: RenderFrameFn = (target, cache, callback) => { + const renderFn: RenderFrameFn = (target, cache, callback) => { if (renderer.current) { return renderer.current( omezarr, @@ -63,17 +72,18 @@ export function SliceView(props: Props) { return null; }; server.beginRendering( - hey, + renderFn, + // here's where we handle lifecycle events in that rendering function (its async and slow because it may have to fetch data from far away) (e) => { switch (e.status) { case 'begin': server.regl?.clear({ framebuffer: e.target, color: [0, 0, 0, 0], depth: 1 }); break; case 'progress': - // wanna see the tiles as they arrive? e.server.copyToClient(compose); break; case 'finished': { + // the bare minimum event handling would be this: copy webGL's work to the target canvas using the compose function e.server.copyToClient(compose); } } diff --git a/packages/dzi/package.json b/packages/dzi/package.json index 62baf91..e7281dd 100644 --- a/packages/dzi/package.json +++ b/packages/dzi/package.json @@ -1,6 +1,6 @@ { "name": "@alleninstitute/vis-dzi", - "version": "0.0.5", + "version": "0.0.6", "contributors": [ { "name": "Lane Sawyer", diff --git a/packages/dzi/src/loader.ts b/packages/dzi/src/loader.ts index 995730a..46bd883 100644 --- a/packages/dzi/src/loader.ts +++ b/packages/dzi/src/loader.ts @@ -30,7 +30,7 @@ export type DziTile = { function tileUrl(dzi: DziImage, level: number, tile: TileIndex): string { return `${dzi.imagesUrl}${level.toFixed(0)}/${tile.col.toFixed(0)}_${tile.row.toFixed(0)}.${dzi.format}`; } -// some quick notes on this deepzoom image format: +// some quick notes on this deep zoom image format: // 1. image / tile names are given by {column}_{row}.{format} // 2. a layer (which may contain multiple tiles) is a folder // 2.1 that folder contains all the tiles for that layer. diff --git a/packages/dzi/src/renderer.ts b/packages/dzi/src/renderer.ts index 1937ad0..5e0d393 100644 --- a/packages/dzi/src/renderer.ts +++ b/packages/dzi/src/renderer.ts @@ -11,7 +11,13 @@ import { buildTileRenderer } from './tile-renderer'; export type RenderSettings = { camera: { + /** + * a region of a dzi image, expressed as a relative parameter (eg. [0,0],[1,1] means the whole image) + */ view: box2D; + /** + * the resolution of the output screen on which to project the region of source pixels given by view + */ screenSize: vec2; }; }; @@ -19,6 +25,12 @@ export type RenderSettings = { type GpuProps = { pixels: CachedTexture; }; +/** + * + * @param regl a valid REGL context (https://github.com/regl-project/regl) + * @returns an object which can fetch tiles from a DeepZoomImage, determine the visibility of those tiles given a simple camera, and render said tiles + * using regl (which uses webGL) + */ export function buildDziRenderer(regl: REGL.Regl): Renderer { const renderCmd = buildTileRenderer(regl, { enable: false }); const fetchDziTile = ( @@ -68,7 +80,13 @@ export function buildDziRenderer(regl: REGL.Regl): Renderer { expect(visible[0].bounds).toEqual(Box2D.create([0, 0], [x, y])); }); }); + describe('sizeInUnits', () => { + it('respects scale transformations', () => { + const pyramid = exampleOmeZarr.multiscales[0]; + const { axes, datasets } = pyramid; + + const layer9xy = sizeInUnits('xy', axes, datasets[9]); + const layer0xy = sizeInUnits('xy', axes, datasets[0]); + + const layer9yz = sizeInUnits('yz', axes, datasets[9]); + const layer0yz = sizeInUnits('yz', axes, datasets[0]); + // we're looking at the highest resolution and lowest resolution layers. + // I think in an ideal world, we'd expect each layer to end up having an exactly equal size, + // however I think that isnt happening here for floating-point reasons - so the small differences are acceptable. + expect(layer9xy).toEqual([13.9776, 10.3936]); + expect(layer0xy).toEqual([13.9993, 10.4993]); + // note the Y coordinate (last above, first below) is as expected: + expect(layer9yz).toEqual([10.3936, 14.200000000000001]); + expect(layer0yz).toEqual([10.4993, 14.200000000000001]); + }); + }); }); diff --git a/packages/omezarr/src/sliceview/loader.ts b/packages/omezarr/src/sliceview/loader.ts index 3112815..1256d71 100644 --- a/packages/omezarr/src/sliceview/loader.ts +++ b/packages/omezarr/src/sliceview/loader.ts @@ -8,7 +8,7 @@ export type VoxelTile = { plane: AxisAlignedPlane; // the plane in which the tile sits realBounds: box2D; // in the space given by the axis descriptions of the omezarr dataset bounds: box2D; // in voxels, in the plane - planeIndex: number; // the index of this slice along the axis being sliced (orthoganal to plane) + planeIndex: number; // the index of this slice along the axis being sliced (orthogonal to plane) layerIndex: number; // the index in the resolution pyramid of the omezarr dataset }; @@ -67,6 +67,19 @@ function getVisibleTilesInLayer( }); return visibleTiles; } +/** + * get tiles of the omezarr image which are visible (intersect with @param camera.view). + * @param camera an object describing the current view: the region of the omezarr, and the resolution at which it + * will be displayed. + * @param plane the plane (eg. 'xy') from which to draw tiles + * @param planeIndex the index of the plane along the orthogonal axis (if plane is xy, then the planes are slices along the Z axis) + * note that not all ome-zarr LOD layers can be expected to have the same number of slices! an index which exists at a high LOD may not + * exist at a low LOD. + * @param dataset the omezarr image to pull tiles from + * @param tileSize the size of the tiles, in pixels. it is recommended to use a size that agrees with the chunking used in the dataset, however, + * other utilities in this library will stitch together chunks to satisfy the requested tile size. + * @returns an array of objects representing tiles (bounding information, etc) which are visible from the given dataset. + */ export function getVisibleTiles( camera: { view: box2D; @@ -98,7 +111,15 @@ export function getVisibleTiles( } return getVisibleTilesInLayer(camera, plane, planeIndex, dataset, tileSize, layerIndex); } - +/** + * a function which returns a promise of float32 data from the requested region of an omezarr dataset. + * Note that omezarr decoding can be slow - consider wrapping this function in a web-worker (or a pool of them) + * to improve performance (note also that the webworker message passing will need to itself be wrapped in promises) + * @param metadata an omezarr object + * @param r a slice request @see getSlice + * @param layerIndex an index into the LOD pyramid of the given ZarrDataset. + * @returns the requested voxel information from the given layer of the given dataset. + */ export const defaultDecoder = (metadata: ZarrDataset, r: ZarrRequest, layerIndex: number): Promise => { return getSlice(metadata, r, layerIndex).then((result: { shape: number[]; buffer: Chunk<'float32'> }) => { const { shape, buffer } = result; diff --git a/packages/omezarr/src/sliceview/tile-renderer.ts b/packages/omezarr/src/sliceview/tile-renderer.ts index e23b1a8..b12356d 100644 --- a/packages/omezarr/src/sliceview/tile-renderer.ts +++ b/packages/omezarr/src/sliceview/tile-renderer.ts @@ -7,16 +7,22 @@ import REGL, { type Framebuffer2D } from 'regl'; type Props = { target: Framebuffer2D | null; - tile: vec4; - view: vec4; - Rgamut: vec2; - Ggamut: vec2; - Bgamut: vec2; + tile: vec4; // [minx,miny,maxx,maxy] representing the bounding box of the tile we're rendering + view: vec4; // [minx,miny,maxx,maxy] representing the camera in the same space as the tile's bounding box + Rgamut: vec2; // [min,max] RedOut = RedChannelValue-Rgamut.min/(Rgamut.max-Rgamut.min) + Ggamut: vec2; // [min,max] GreenOut = GreenChannelValue-Ggamut.min/(Ggamut.max-Ggamut.min) + Bgamut: vec2; // [min,max] BlueOut = BlueChannelValue-Bgamut.min/(Bgamut.max-Bgamut.min) R: REGL.Texture2D; G: REGL.Texture2D; B: REGL.Texture2D; }; - +/** + * + * @param regl an active REGL context + * @returns a function (regl command) which renders 3 individual channels as the RGB + * components of an image. Each channel is mapped to the output RGB space via the given Gamut. + * the rendering is done in the given target buffer (or null for the screen). + */ export function buildTileRenderer(regl: REGL.Regl) { const cmd = regl< { diff --git a/packages/omezarr/src/zarr-data.ts b/packages/omezarr/src/zarr-data.ts index 42fae66..a23680a 100644 --- a/packages/omezarr/src/zarr-data.ts +++ b/packages/omezarr/src/zarr-data.ts @@ -51,7 +51,14 @@ async function getRawInfo(store: zarr.FetchStore) { async function mapAsync(arr: ReadonlyArray, fn: (t: T, index: number) => Promise) { return Promise.all(arr.map((v, i) => fn(v, i))); } -// return the mapping from path (aka resolution group???) to the dimensional shape of the data +/** + * + * @param url a url which resolves to an omezarr dataset + * @returns a structure describing the omezarr dataset. See + * https://ngff.openmicroscopy.org/latest/#multiscale-md for the specification. + * The object returned from this function can be passed to most of the other utilities for ome-zarr data + * manipulation. + */ export async function loadMetadata(url: string) { const store = new zarr.FetchStore(url); const root = zarr.root(store); @@ -83,6 +90,13 @@ const sliceDimension = { xz: 'y', yz: 'x', } as const; + +/** + * a simple utility that maps canonnical plane names to a more flexible way of dealing with + * planes in a volume + * @param plane a friendly name for a plane in an omezarr volume (eg. 'xy') + * @returns a more flexible mapping for the same information, eg: {u:'x',v:'y'} + */ export function uvForPlane(plane: T) { return uvTable[plane]; } @@ -90,6 +104,18 @@ export function sliceDimensionForPlane(plane: AxisAlignedPlane) { return sliceDimension[plane]; } export type ZarrRequest = Record; +/** + * given a region of a volume to view at a certain output resolution, find the layer in the ome-zarr dataset which + * is most appropriate - that is to say, as close to 1:1 relation between voxels and display pixels as possible. + * @param dataset an object representing an omezarr file - see @function loadMetadata + * @param plane a plane in the volume - the dimensions of this plane will be matched to the displayResolution + * when choosing an appropriate LOD layer + * @param relativeView a region of the selected plane which is the "screen" - the screen has resolution @param displayResolution. + * an example relative view of [0,0],[1,1] would suggest we're trying to view the entire slice at the given resolution. + * @param displayResolution + * @returns an LOD (level-of-detail) layer from the given dataset, that is appropriate for viewing at the given + * displayResolution. + */ export function pickBestScale( dataset: ZarrDataset, plane: { @@ -130,7 +156,18 @@ export function pickBestScale( function indexFor(dim: OmeDimension, axes: readonly AxisDesc[]) { return axes.findIndex((axe) => axe.name === dim); } - +/** + * determine the size of a slice of the volume, in the units specified by the axes metadata + * as described in the ome-zarr spec (https://ngff.openmicroscopy.org/latest/#axes-md) + * NOTE that only scale transformations (https://ngff.openmicroscopy.org/latest/#trafo-md) are supported at present - other types will be ignored. + * @param plane the plane to measure (eg. 'xy') + * @param axes the axes metadata from the omezarr file in question + * @param dataset one of the "datasets" in the omezarr layer pyramid (https://ngff.openmicroscopy.org/latest/#multiscale-md) + * @returns the size, with respect to the coordinateTransformations present on the given dataset, of the requested plane. + * @example imagine a layer that is 29998 voxels wide in the X dimension, and a scale transformation of 0.00035 for that dimension. + * this function would return (29998*0.00035 = 10.4993) for the size of that dimension, which you would interpret to be in whatever unit + * is given by the axes metadata for that dimension (eg. millimeters) + */ export function sizeInUnits( plane: | AxisAlignedPlane @@ -158,12 +195,28 @@ export function sizeInUnits( }); return size; } +/** + * get the size in voxels of a layer of an omezarr on a given dimension + * @param dim the dimension to measure + * @param axes the axes metadata for the zarr dataset + * @param dataset an entry in the datasets list in the multiscales list in a ZarrDataset object + * @returns the size, in voxels, of the given dimension of the given layer + * @example (pseudocode of course) return omezarr.multiscales[0].datasets[LAYER].shape[DIMENSION] + */ export function sizeInVoxels(dim: OmeDimension, axes: readonly AxisDesc[], dataset: DatasetWithShape) { const uI = indexFor(dim, axes); if (uI === -1) return undefined; return dataset.shape[uI]; } +/** + * get the size of a plane of a volume (given a specific layer) in voxels + * see @function sizeInVoxels + * @param plane the plane to measure (eg. 'xy') + * @param axes the axes metadata of an omezarr object + * @param dataset a layer of the ome-zarr resolution pyramid + * @returns a vec2 containing the requested sizes, or undefined if the requested plane is malformed, or not present in the dataset + */ export function planeSizeInVoxels( plane: { u: OmeDimension; @@ -215,6 +268,16 @@ export async function explain(z: ZarrDataset) { export function indexOfDimension(axes: readonly AxisDesc[], dim: OmeDimension) { return axes.findIndex((ax) => ax.name === dim); } +/** + * get voxels / pixels from a region of a layer of an omezarr dataset + * @param metadata a zarrDataset from which to request a slice of voxels + * @param r a slice object, describing the requested region of data - note that it is quite possible to request + * data that is not "just" a slice. The semantics of this slice object should match up with conventions in numpy or other multidimensional array tools: + * @see https://zarrita.dev/slicing.html + * @param layerIndex an index into the layer pyramid of the ome-zarr dataset. + * @returns the requested chunk of image data from the given layer of the omezarr LOD pyramid. Note that if the given layerIndex is invalid, it will be treated as though it is the highest index possible. + * @throws an error if the request results in anything of lower-or-equal dimensionality than a single value + */ export async function getSlice(metadata: ZarrDataset, r: ZarrRequest, layerIndex: number) { // put the request in native order const root = zarr.root(new zarr.FetchStore(metadata.url)); diff --git a/packages/scatterbrain/package.json b/packages/scatterbrain/package.json index 2b27a38..720ff4c 100644 --- a/packages/scatterbrain/package.json +++ b/packages/scatterbrain/package.json @@ -1,6 +1,6 @@ { "name": "@alleninstitute/vis-scatterbrain", - "version": "0.0.6", + "version": "0.0.7", "contributors": [ { "name": "Lane Sawyer", diff --git a/packages/scatterbrain/src/abstract/types.ts b/packages/scatterbrain/src/abstract/types.ts index fcb3996..6149e08 100644 --- a/packages/scatterbrain/src/abstract/types.ts +++ b/packages/scatterbrain/src/abstract/types.ts @@ -13,14 +13,45 @@ export type CachedVertexBuffer = { export type ReglCacheEntry = CachedTexture | CachedVertexBuffer; export type Renderer> = { + /** + * a function which returns items from the given dataset - this is the place to express spatial indexing + * or any other filtering that may be appropriate + * @param data the dataset to pull items from + * @param settings the settings that determine what items are appropriate + * @returns a list of the requested items, whatever they may be + */ getVisibleItems: (data: Dataset, settings: Settings) => Array; + /** + * fetch raw, expensive-to-load content (an "Item" is a placeholder for that content) + * @param item An item to fetch content for + * @param dataset the dataset which owns the given item + * @param settings + * @param signal an AbortSignal that allows the fetching of content to be cancelled + * @returns a map of meaningful names (eg. position, color, amplitude, etc) to functions that promise raw content, like pixels or other raw, renderable information. + * expect that the functions returned in this way have closures over the other arguments to this function - + * that is to say, DONT mutate them (make them Readonly if possible) + */ fetchItemContent: ( item: Item, dataset: Dataset, settings: Settings, signal?: AbortSignal ) => Record Promise>; + /** + * + * @param cacheData the results of fetching all the content for an Item + * @returns true if the content matches the expectations of our rendering function + */ isPrepared: (cacheData: Record) => cacheData is GpuData; + /** + * actually render the content of an item + * @param target REGL framebuffer to render to (null is the canvas to which regl is bound - it is shared and mutable!) + * @param item the item describing the content to render + * @param data the dataset which owns the item + * @param settings the configuration of the current rendering task + * @param gpuData the data as fetched and uploaded to the GPU @see fetchItemContent and validated by @see isPrepared + * @returns void - this function will render (mutate!) the content (pixels!) of the target + */ renderItem: ( target: REGL.Framebuffer2D | null, item: Item, @@ -28,6 +59,21 @@ export type Renderer void; + /** + * compute a unique (but please not random!) string that the cache system can use to identify the content + * associated with this {item, settings, data} + * @param item the item we're caching the data for + * @param requestKey a key of gpuData (TODO: make this fact official via Typescript if possible) + * @param data the dataset that owns the given item + * @param settings the configuration of the current rendering task + * @returns a string, suitable for use in a cache + */ cacheKey: (item: Item, requestKey: string, data: Dataset, settings: Settings) => string; + /** + * in some cases, rendering may rely on non-item-specific rendering resources (lookup tables, buffers, etc) + * this function is the place to release those + * @param regl the regl context (the same that was used to create this renderer) + * @returns + */ destroy: (regl: REGL.Regl) => void; };