diff --git a/brainchop-mainthread.js b/brainchop-mainthread.js index 14906a0..dcab2b4 100644 --- a/brainchop-mainthread.js +++ b/brainchop-mainthread.js @@ -1,664 +1,25 @@ import * as tf from '@tensorflow/tfjs' -import { BWLabeler } from './bwlabels.js' import { inferenceModelsList } from './brainchop-parameters.js' -export { runInference } - -async function getModelNumParameters(modelObj) { - let numParameters = 0 - for (let layerIdx = 0; layerIdx < modelObj.layers.length; layerIdx++) { - numParameters += modelObj.layers[layerIdx].countParams() - } - return numParameters -} - -async function getModelNumLayers(modelObj) { - return modelObj.layers.length -} - -async function isModelChnlLast(modelObj) { - for (let layerIdx = 0; layerIdx < modelObj.layers.length; layerIdx++) { - if (modelObj.layersByDepth[layerIdx][0].dataFormat) { - return modelObj.layersByDepth[layerIdx][0].dataFormat === 'channelsLast' - } - } -} - -async function load_model(modelUrl) { - console.log('main thread load_model', modelUrl) - return await tf.loadLayersModel(modelUrl) -} - -async function getAllSlicesDataAsTF3D(num_of_slices, niftiHeader, niftiImage) { - // Get nifti dimensions - const cols = niftiHeader.dims[1] // Slice width - const rows = niftiHeader.dims[2] // Slice height - let typedData - if (niftiHeader.datatypeCode === 2) { - // enum from nvimage/utils DT_UINT8 = 2 - typedData = new Uint8Array(niftiImage) - } else if (niftiHeader.datatypeCode === 4) { - // DT_INT16 = 4 - typedData = new Int16Array(niftiImage) - } else if (niftiHeader.datatypeCode === 8) { - // DT_INT32 = 8 - typedData = new Int32Array(niftiImage) - } else if (niftiHeader.datatypeCode === 16) { - // DT_FLOAT32 = 16 - typedData = new Float32Array(niftiImage) - } else if (niftiHeader.datatypeCode === 64) { - // DT_FLOAT64 = 64 - typedData = new Float64Array(niftiImage) - } else if (niftiHeader.datatypeCode === 256) { - // DT_INT8 = 256 - typedData = new Int8Array(niftiImage) - } else if (niftiHeader.datatypeCode === 512) { - // DT_UINT16 = 512 - typedData = new Uint16Array(niftiImage) - } else if (niftiHeader.datatypeCode === 768) { - // DT_UINT32 = 768 - typedData = new Uint32Array(niftiImage) - } else { - return - } - const allSlices_2D = [] - let offset3D = 0 - // Draw pixels - for (let slice = 0; slice < num_of_slices; slice++) { - const slice = new Array(rows * cols) - let offset2D = 0 - for (let row = 0; row < rows; row++) { - for (let col = 0; col < cols; col++) { - const value = typedData[offset3D++] - // Create 1Dim Array of pixel value, this 1 dim represents one channel - slice[offset2D++] = value & 0xff - } - } - allSlices_2D.push(tf.tensor(slice, [rows, cols])) // slice_height, slice_width - } - const allSlices_3D = tf.stack(allSlices_2D) - tf.dispose(allSlices_2D) - return allSlices_3D -} - -async function calculateQuantiles(tensor, lowerQuantile = 0.01, upperQuantile = 0.99) { - // Flatten the tensor - const flatTensor = tensor.flatten() - - // Convert the flattened tensor to an array to sort it - const flatArray = await flatTensor.array() - flatArray.sort((a, b) => a - b) // Sort the array in ascending order - - // Convert the sorted array back to a tensor - const sortedTensor = tf.tensor1d(flatArray) - - // Calculate the indices for the quantiles - const numElements = sortedTensor.shape[0] - const lowIndex = Math.floor(numElements * lowerQuantile) - const highIndex = Math.ceil(numElements * upperQuantile) - 1 // Subtract 1 because indices are 0-based - - // Slice the sorted tensor to get qmin and qmax - const qmin = sortedTensor.slice(lowIndex, 1) // Get the value at the low index - const qmax = sortedTensor.slice(highIndex, 1) // Get the value at the high index - - // Get the actual values from the tensors - const qminValue = (await qmin.array())[0] - const qmaxValue = (await qmax.array())[0] - - // Clean up tensors to free memory - flatTensor.dispose() - sortedTensor.dispose() - qmin.dispose() - qmax.dispose() - - return { qmin: qminValue, qmax: qmaxValue } -} - -async function quantileNormalizeVolumeData(tensor, lowerQuantile = 0.05, upperQuantile = 0.95) { - // Call calculateQuantiles and wait for the result - const { qmin, qmax } = await calculateQuantiles(tensor, lowerQuantile, upperQuantile) - - // Convert qmin and qmax back to scalars - const qminScalar = tf.scalar(qmin) - const qmaxScalar = tf.scalar(qmax) - - // Perform the operation: (tensor - qmin) / (qmax - qmin) - const resultTensor = tensor.sub(qminScalar).div(qmaxScalar.sub(qminScalar)) - - // Dispose of the created scalars to free memory - qminScalar.dispose() - qmaxScalar.dispose() - - // Return the resulting tensor - return resultTensor -} - -async function minMaxNormalizeVolumeData(volumeData) { - // Normalize the data to the range 0 - 1 using min-max scaling - const volumeData_Max = volumeData.max() - const volumeData_Min = volumeData.min() - const normalizedSlices_3d = await volumeData.sub(volumeData_Min).div(volumeData_Max.sub(volumeData_Min)) - return normalizedSlices_3d -} - -async function inferenceFullVolumeSeqCovLayer( - _model, - _slices_3d, - _input_shape, - _isChannelLast, - _num_of_slices, - _slice_height, - _slice_width -) { - window.alert('inferenceFullVolumeSeqCovLayer() is not dead code?') -} - -async function inferenceFullVolume( - _model, - _slices_3d, - _input_shape, - _isChannelLast, - _num_of_slices, - _slice_height, - _slice_width -) { - window.alert('inferenceFullVolume() is not dead code?') -} - -async function inferenceSubVolumes( - _model, - _slices_3d, - _num_of_slices, - _slice_height, - _slice_width, - _pipeline1_out = null -) { - window.alert('inferenceSubVolumes() is not dead code?') -} - -async function tensor2LightBuffer(_tensor, _dtype) { - window.alert('tensor2LightBuffer() is not dead code?') -} - -async function draw3dObjBoundingVolume(_unstackOutVolumeTensor) { - window.alert('draw3dObjBoundingVolume() is not dead code?') -} - -async function argMaxLarge( - _outVolumeBuffer, - _num_of_slices, - _slice_height, - _slice_width, - _numOfClasses, - _dtype = 'float32' -) { - window.alert('argMaxLarge() is not dead code?') -} - -async function addZeroPaddingTo3dTensor(tensor3d, rowPadArr = [1, 1], colPadArr = [1, 1], depthPadArr = [1, 1]) { - if (tensor3d.rank !== 3) { - throw new Error('Tensor must be 3D') - } - return tensor3d.pad([rowPadArr, colPadArr, depthPadArr]) -} - -async function removeZeroPaddingFrom3dTensor(tensor3d, rowPad = 1, colPad = 1, depthPad = 1) { - if (tensor3d.rank !== 3) { - throw new Error('Tensor must be 3D') - } - const [h, w, d] = tensor3d.shape - return tensor3d.slice([rowPad, colPad, depthPad], [h - 2 * rowPad, w - 2 * colPad, d - 2 * depthPad]) -} - -async function resizeWithZeroPadding(croppedTensor3d, newDepth, newHeight, newWidth, refVoxel, boundVolSizeArr) { - const row_pad_befor = refVoxel[0] - const col_pad_befor = refVoxel[1] - const depth_pad_befor = refVoxel[2] - // last and lower volume voxel - const row_max = row_pad_befor + boundVolSizeArr[0] - 1 // size [2, 2, 2] means 2 voxels total in each dim - const col_max = col_pad_befor + boundVolSizeArr[1] - 1 - const depth_max = depth_pad_befor + boundVolSizeArr[2] - 1 - - const row_pad_after = newHeight - row_max - 1 > 0 ? newHeight - row_max - 1 : 0 - const col_pad_after = newWidth - col_max - 1 > 0 ? newWidth - col_max - 1 : 0 - const depth_pad_after = newDepth - depth_max - 1 > 0 ? newDepth - depth_max - 1 : 0 - - return croppedTensor3d.pad([ - [row_pad_befor, row_pad_after], - [col_pad_befor, col_pad_after], - [depth_pad_befor, depth_pad_after] - ]) -} - -async function applyMriThreshold(tensor, percentage) { - // Perform asynchronous operations outside of tf.tidy - const maxTensor = tensor.max() - const thresholdTensor = maxTensor.mul(percentage) - const threshold = await thresholdTensor.data() // Extracts the threshold value - - // Dispose tensors not needed anymore - maxTensor.dispose() - thresholdTensor.dispose() - - // Use tf.tidy for synchronous operations - return tf.tidy(() => { - const dataForProcessing = tensor.clone() - - // Thresholding (assuming background has very low values compared to the head) - const mask = dataForProcessing.greater(threshold[0]) - // -- const denoisedMriData = dataForProcessing.mul(mask); - - // No need to manually dispose dataForProcessing and mask, as tf.tidy() will dispose them auto. - return mask - }) - - // -- return denoisedMriData; -} - -async function binarizeVolumeDataTensor(volumeDataTensor) { - const alpha = 0 - // element-wise: (x > 0 ? 1 : alpha * x ); e.g. Tenosr [0, 0.9, 0.8, -3] => Tensor [0, 1, 1, 0] - return volumeDataTensor.step(alpha) -} - -async function generateBrainMask( - unstackOutVolumeTensor, - num_of_slices, - slice_height, - slice_width, - modelEntry, - opts, - callbackUI, - callbackImg, - isFinalImage = true -) { - if (unstackOutVolumeTensor[0].dtype !== 'int32') { - callbackUI('', -1, 'generateBrainMask assumes int32') - } - if (modelEntry.preModelPostProcess) { - callbackUI('', -1, 'generateBrainMask assumes BWLabeler instead of preModelPostProcess') - } - const numSlices = unstackOutVolumeTensor.length - const numPixels2D = unstackOutVolumeTensor[0].size - const numVox3D = numSlices * numPixels2D - // preallocate to reduce heap usage - const brainOut = new Int32Array(numVox3D) - let offset = 0 - for (let i = 0; i < numSlices; i++) { - brainOut.set(unstackOutVolumeTensor[i].dataSync(), offset) - offset += numPixels2D - } - for (let i = 0; i < numVox3D; i++) { - brainOut[i] = brainOut[i] !== 0 ? 1 : 0 - } - if (isFinalImage || opts.showPhase1Output) { - // all done - callbackImg(brainOut, opts, modelEntry) - callbackUI('Segmentation finished', 0) - } - return tf.tensor(brainOut, [num_of_slices, slice_height, slice_width]) -} - -async function convByOutputChannelAndInputSlicing(input, filter, biases, stride, pad, dilationRate, sliceSize) { - // const batchSize = input.shape[0] - // const depth = input.shape[1] - // const height = input.shape[2] - // const width = input.shape[3] - const inChannels = input.shape[4] - const outChannels = filter.shape[4] - - // Create an empty array to hold the output channels - let outputChannels = null - - // Slice the input tensor and process one output channel at a time - for (let channel = 0; channel < outChannels; channel++) { - const numSlices = Math.ceil(inChannels / sliceSize) - const biasesSlice = biases.slice([channel], [1]) - let outputChannel = null - - for (let i = 0; i < numSlices; i++) { - const startChannel = i * sliceSize - const endChannel = Math.min((i + 1) * sliceSize, inChannels) - - // Only proceed if there are channels to process - if (startChannel < inChannels) { - const resultSlice = tf.tidy(() => { - const inputSlice = input.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, endChannel - startChannel]) - const filterSlice = filter.slice([0, 0, 0, startChannel, channel], [-1, -1, -1, endChannel - startChannel, 1]) - // Perform the convolution for the current slice and output channel - return tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate) - }) - - if (outputChannel === null) { - outputChannel = resultSlice - } else { - const updatedOutputChannel = outputChannel.add(resultSlice) - outputChannel.dispose() - resultSlice.dispose() - outputChannel = updatedOutputChannel - } - } - } - - // Add the biases to the accumulated convolutions for this channel - const biasedOutputChannel = outputChannel.add(biasesSlice) - outputChannel.dispose() - biasesSlice.dispose() - - // Accumulate the channel to the output array - if (outputChannels == null) { - outputChannels = biasedOutputChannel - } else { - const updatedOutputChannels = await tf.concat([outputChannels, biasedOutputChannel], 4) - biasedOutputChannel.dispose() - outputChannels.dispose() - outputChannels = updatedOutputChannels - } - } - - return outputChannels -} - -function processTensorInChunks(inputTensor, filterWeights, chunkSize) { - // Assuming inputTensor's shape: [batch, depth, height, width, inChannels] - // and filterWeights's shape: [filterDepth, filterHeight, filterWidth, inChannels, outChannels] - const stride = 1 - const pad = 0 - const dilationRate = 1 - const inChannels = inputTensor.shape[4] - const numSlices = Math.ceil(inChannels / chunkSize) - - let accumulatedResult = null - - for (let i = 0; i < numSlices; i++) { - const startChannel = i * chunkSize - const endChannel = Math.min((i + 1) * chunkSize, inChannels) - const channels = endChannel - startChannel - - const inputSlice = tf.tidy(() => { - // Slice the input tensor to get the current chunk - return inputTensor.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, channels]) - }) - - const filterSlice = tf.tidy(() => { - // Slice the filter weights to match the input tensor's current chunk - return filterWeights.slice([0, 0, 0, startChannel, 0], [-1, -1, -1, channels, -1]) - }) - - const resultSlice = tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate) - // Clean up the slices to free memory - inputSlice.dispose() - filterSlice.dispose() - - // Squeeze the result slice to remove dimensions of size 1 - const squeezedResultSlice = tf.squeeze(resultSlice) - resultSlice.dispose() // Dispose of the original resultSlice after squeezing - - if (accumulatedResult === null) { - accumulatedResult = squeezedResultSlice - } else { - // Accumulate the result by adding the new result slice to it - const newAccumulatedResult = accumulatedResult.add(squeezedResultSlice) - - // Dispose of the previous accumulatedResult and squeezedResultSlice - accumulatedResult.dispose() - // Dispose of squeezedResultSlice only if it wasn't assigned to accumulatedResult - if (accumulatedResult !== squeezedResultSlice) { - squeezedResultSlice.dispose() - } - // Update accumulatedResult with the new result - accumulatedResult = newAccumulatedResult - } - - tf.tidy(() => { - tf.matMul(tf.zeros([1, 1]), tf.zeros([1, 1])) - }) - } - - return accumulatedResult -} - -class SequentialConvLayer { - constructor(model, chunkSize, isChannelLast, callbackUI) { - this.model = model - this.outChannels = model.outputLayers[0].kernel.shape[4] - this.chunkSize = chunkSize - this.isChannelLast = isChannelLast - this.callbackUI = callbackUI - } - - /** - * Apply sequential convolution layer - * @since 3.0.0 - * @member SequentialConvLayer - * @param {tf.Tensor} inputTensor e.g. [ 1, 256, 256, 256, 5 ] - * @return {promise} - * - * convLayer.rank -> 3 - * typeof(convLayer) -> "object" - * convLayer: Object { dataFormat: "channelsLast", dilationRate: Array(3) [ 1, 1, 1 ], inputSpec: Array [ {…} ], - * name: "output", padding: "same", strides: Array(3) [ 1, 1, 1 ], ...} - * - * weights.shape -> Array(5) [ 1, 1, 1, 5, 3 ] - * weights.print() - * //=> Tensor - * [[[[[0.146999 , -1.4474995, -2.8961499], - * [1.1067894, 0.6897876 , -0.7573005], - * [-0.38512 , -0.2812168, -0.8637539], - * [0.9341159, -0.0344299, -2.3668685], - * [0.1052373, 1.266812 , 0.6542516 ]]]]] - * - * biases.shape -> Array [ 3 ] - * biases.print() - * //=> Tensor - * [-0.7850812, -2.3238883, 2.1639345] - * - * for idx = 0 -> filterWeights.shape -> Array(5) [ 1, 1, 1, 5, 1 ] - * filterWeights.print() - * //=> Tensor - * [[[[[0.146999 ], - * [1.1067894], - * [-0.38512 ], - * [0.9341159], - * [0.1052373]]]]] - * - * for idx = 0 -> filterBiases.shape -> Array [1] - * filterBiases.print() - * //=> Tensor - * [-0.7850812] - - */ - - async apply(inputTensor) { - const oldDeleteTextureThreshold = tf.ENV.get('WEBGL_DELETE_TEXTURE_THRESHOLD') - tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0) - // eslint-disable-next-line @typescript-eslint/no-this-alias - const self = this - // Important to avoid "undefined" class var members inside the timer. - // "this" has another meaning inside the timer. - - // document.getElementById("progressBarChild").parentElement.style.visibility = "visible"; - - return new Promise((resolve) => { - const startTime = performance.now() - - const convLayer = self.model.layers[self.model.layers.length - 1] - const weights = convLayer.getWeights()[0] // - const biases = convLayer.getWeights()[1] - const outputShape = self.isChannelLast ? inputTensor.shape.slice(1, -1) : inputTensor.shape.slice(2) - // -- e.g. outputShape : [256,256,256] or cropped Dim - // -- if inputTensor [ 1, D, H, W, 50 ], channelLast true -> outputShape : outputShape [D, H, W] - // -- if inputTensor [ 1, 50, D, H, W ], channelLast false -> outputShape : outputShape [D, H, W] - - let outB = tf.mul(tf.ones(outputShape), -10000) - // -- e.g. outB.shape [256,256,256] - let outC = tf.zeros(outputShape) - // -- e.g. outC.shape [256,256,256] - let chIdx = 0 - - // console.log("---------------------------------------------------------"); - console.log(' channel loop') - - const seqTimer = window.setInterval(async function () { - tf.engine().startScope() // Start TensorFlow.js scope - /* console.log('=======================') - const memoryInfo0 = await tf.memory() - console.log(`| Number of Tensors: ${memoryInfo0.numTensors}`) - console.log(`| Number of Data Buffers: ${memoryInfo0.numDataBuffers}`) */ - - const result = await tf.tidy(() => { - const filterWeights = weights.slice([0, 0, 0, 0, chIdx], [-1, -1, -1, -1, 1]) - // -- e.g. filterWeights.shape [ 1, 1, 1, 5, 1 ] - const filterBiases = biases.slice([chIdx], [1]) - // -- e.g. filterBiases.shape [1] -> Tensor [-0.7850812] - const outA = processTensorInChunks( - inputTensor, - filterWeights, - Math.min(self.chunkSize, self.outChannels) - ).add(filterBiases) - const greater = tf.greater(outA, outB) - const newoutB = tf.where(greater, outA, outB) - const newoutC = tf.where(greater, tf.fill(outC.shape, chIdx), outC) - // Dispose the old tensors before reassigning - tf.dispose([outB, outC, filterWeights, filterBiases, outA, greater]) - // Dummy operation to trigger cleanup - tf.tidy(() => tf.matMul(tf.ones([1, 1]), tf.ones([1, 1]))) - return [newoutC, newoutB] - }) - - console.log('=======================') - self.callbackUI(`Iteration ${chIdx}`, chIdx / self.outChannels) - const memoryInfo = await tf.memory() - console.log(`Number of Tensors: ${memoryInfo.numTensors}`) - console.log(`Number of Data Buffers: ${memoryInfo.numDataBuffers}`) - console.log(`Megabytes In Use: ${(memoryInfo.numBytes / 1048576).toFixed(3)} MB`) - if (memoryInfo.unreliable) { - console.log(`Unreliable: ${memoryInfo.unreliable}`) - } - // Dispose of previous values before assigning new tensors to outC and outB - if (typeof outC !== 'undefined') { - outC.dispose() - } - if (typeof outB !== 'undefined') { - outB.dispose() - } - // Assign the new values to outC and outB - outC = tf.keep(result[0]) - outB = tf.keep(result[1]) - // // Assign the new values to outC and outB - // outC = result[0]; - // outB = result[1]; - tf.engine().endScope() - - if (chIdx === self.outChannels - 1) { - window.clearInterval(seqTimer) - // document.getElementById("progressBarChild").style.width = 0 + "%"; - tf.dispose(outB) - const endTime = performance.now() - const executionTime = endTime - startTime - console.log(`Execution time for output layer: ${executionTime} milliseconds`) - tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', oldDeleteTextureThreshold) - resolve(outC) - } else { - chIdx++ - - // the seemingly strange sequence of operations - // below prevents tfjs from uncontrolably - // grabbing buffers, even when all tensors have - // already been disposed - - const outCShape = outC.shape - const outCdata = outC.dataSync() - const outBShape = outC.shape - const outBdata = outB.dataSync() - outC.dispose() - outB.dispose() - // tf.disposeVariables() - outC = tf.tensor(outCdata, outCShape) - outB = tf.tensor(outBdata, outBShape) - - // document.getElementById("progressBarChild").style.width = (chIdx + 1) * 100 / self.outChannels + "%"; - } - - // Artificially introduce a pause to allow for garbage collection to catch up - await new Promise((resolve) => setTimeout(resolve, 300)) - }, 0) - }) - } -} // <<<< End of class - -async function generateOutputSlicesV2( - img, - OutVolumeTensorShape, - OutVolumeTensorType, - num_of_slices, - numSegClasses, - slice_height, - slice_width, - modelEntry, - opts, - niftiImage -) { - // Convert all slices into 1 Dim array - // const allOutputSlices3DContours = [] - if (opts.isPostProcessEnable) { - const BWInstance = new BWLabeler() - const dim = new Uint32Array(OutVolumeTensorShape) - const conn = 26 // Example connectivity - const binarize = true - const onlyLargestClusterPerClass = true - const [_labelCount, labeledImage] = BWInstance.bwlabel(img, dim, conn, binarize, onlyLargestClusterPerClass) - for (let i = 0; i < img.length; i++) { - img[i] *= labeledImage[i] - } - } // if isPostProcessEnable - const typedArrayConstructor = { - float32: Float32Array, - int32: Int32Array - // Add other cases as needed for different dtypes - }[OutVolumeTensorType] - // Create a new TypedArray from img with the same type as outLabelVolume - const allOutputSlices3DCC1DimArray = new Uint8Array(img) - - const modelType = modelEntry.type - - // return img - switch (modelType) { - case 'Brain_Masking': { - const brainMask = new Uint8Array(allOutputSlices3DCC1DimArray.length) - for (let i = 0; i < allOutputSlices3DCC1DimArray.length; i++) { - brainMask[i] = allOutputSlices3DCC1DimArray[i] !== 0 ? 1 : 0 - } - // labelArrayBuffer = createNiftiOutArrayBuffer(rawNiftiData, brainMask); - // allOutputSlices3DCC1DimArray = brainMask; - // --labelsHistogramMap = null; - // maskBrainExtraction = true; - return brainMask - // break; - } - case 'Brain_Extraction': { - const maskedData = new Uint8Array(allOutputSlices3DCC1DimArray.length) - // const brainData = nifti2data(rawNiftiData); - - for (let i = 0; i < allOutputSlices3DCC1DimArray.length; i++) { - // Create the mask - 1 where the value is non-zero, 0 where it is zero. - const maskValue = allOutputSlices3DCC1DimArray[i] !== 0 ? 1 : 0 - // Apply the mask to the data - multiply by the mask value. - maskedData[i] = niftiImage[i] * maskValue - } - // labelArrayBuffer = createNiftiOutArrayBuffer(rawNiftiData, maskedData); - - // Update `allOutputSlices3DCC1DimArray` if needed. - // allOutputSlices3DCC1DimArray = maskedData; - - // Other operations... - // maskBrainExtraction = true; - return maskedData - // break; - } - } - - return img -} +import { + addZeroPaddingTo3dTensor, + applyMriThreshold, + binarizeVolumeDataTensor, + convByOutputChannelAndInputSlicing, + draw3dObjBoundingVolume, + firstLastNonZero3D, + generateBrainMask, + generateOutputSlicesV2, + getAllSlicesDataAsTF3D, + getModelNumLayers, + getModelNumParameters, + isModelChnlLast, + load_model, + minMaxNormalizeVolumeData, + quantileNormalizeVolumeData, + removeZeroPaddingFrom3dTensor, + resizeWithZeroPadding, + SequentialConvLayer +} from './tensor-utils.js' async function inferenceFullVolumeSeqCovLayerPhase2( opts, @@ -709,54 +70,14 @@ async function inferenceFullVolumeSeqCovLayerPhase2( mask_3d = await pipeline1_out.greater([0]).asType('bool') // -- pipeline1_out.dispose(); } - console.log(' mask_3d shape : ', mask_3d.shape) - - const coords = await tf.whereAsync(mask_3d) - // -- Get each voxel coords (x, y, z) - + const [row_min, row_max, col_min, col_max, depth_min, depth_max] = await firstLastNonZero3D(mask_3d) mask_3d.dispose() - - const coordsArr = coords.arraySync() - - let row_min = slice_height - let row_max = 0 - let col_min = slice_width - let col_max = 0 - let depth_min = num_of_slices - let depth_max = 0 - - for (let i = 0; i < coordsArr.length; i++) { - if (row_min > coordsArr[i][0]) { - row_min = coordsArr[i][0] - } else if (row_max < coordsArr[i][0]) { - row_max = coordsArr[i][0] - } - - if (col_min > coordsArr[i][1]) { - col_min = coordsArr[i][1] - } else if (col_max < coordsArr[i][1]) { - col_max = coordsArr[i][1] - } - - if (depth_min > coordsArr[i][2]) { - depth_min = coordsArr[i][2] - } else if (depth_max < coordsArr[i][2]) { - depth_max = coordsArr[i][2] - } - } - - console.log('row min and max :', row_min, row_max) - console.log('col min and max :', col_min, col_max) - console.log('depth min and max :', depth_min, depth_max) - // -- Reference voxel that cropped volume started slice with it const refVoxel = [row_min, col_min, depth_min] // -- Starting form refVoxel, size of bounding volume const boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] - coords.dispose() - // -- Extract 3d object (e.g. brain) const cropped_slices_3d = await slices_3d.slice( [row_min, col_min, depth_min], @@ -780,7 +101,7 @@ async function inferenceFullVolumeSeqCovLayerPhase2( testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr) console.log(' outLabelVolume final shape after resizing : ', testVol.shape) - draw3dObjBoundingVolume(tf.unstack(testVol)) + draw3dObjBoundingVolume(tf.unstack(testVol), opts, modelEntry, callbackImg) testVol.dispose() return 0 @@ -865,10 +186,6 @@ async function inferenceFullVolumeSeqCovLayerPhase2( const curTensor = [] curTensor[0] = await cropped_slices_3d_w_pad.reshape(adjusted_input_shape) - // console.log("curTensor[0] :", curTensor[0].dataSync()); - - // let curProgBar = parseInt(document.getElementById("progressBar").style.width); - const timer = window.setInterval(async function () { try { if (res.layers[i].activation.getClassName() !== 'linear') { @@ -924,7 +241,7 @@ async function inferenceFullVolumeSeqCovLayerPhase2( // the larger it is, the more memory it uses // it was 8, but I set it to 3, got a different error // let seqConvLayer = new SequentialConvLayer(res, 10, isChannelLast); - const seqConvLayer = await new SequentialConvLayer(res, 10, isChannelLast, callbackUI) + const seqConvLayer = await new SequentialConvLayer(res, 10, isChannelLast, callbackUI, false) // Apply the last output tensor to the seq. instance const outputTensor = await seqConvLayer.apply(curTensor[i]) callbackUI('seqConvLayer Done') @@ -1071,7 +388,6 @@ async function inferenceFullVolumeSeqCovLayerPhase2( callbackUI(unreliableReasons, NaN, unreliableReasons) } } - // }); } async function inferenceFullVolumePhase2( @@ -1121,42 +437,8 @@ async function inferenceFullVolumePhase2( // -- pipeline1_out.dispose() } console.log(' mask_3d shape : ', mask_3d.shape) - const coords = await tf.whereAsync(mask_3d) - // -- Get each voxel coords (x, y, z) + const [row_min, row_max, col_min, col_max, depth_min, depth_max] = await firstLastNonZero3D(mask_3d) mask_3d.dispose() - const coordsArr = coords.arraySync() - - let row_min = slice_height - let row_max = 0 - let col_min = slice_width - let col_max = 0 - let depth_min = num_of_slices - let depth_max = 0 - - for (let i = 0; i < coordsArr.length; i++) { - if (row_min > coordsArr[i][0]) { - row_min = coordsArr[i][0] - } else if (row_max < coordsArr[i][0]) { - row_max = coordsArr[i][0] - } - - if (col_min > coordsArr[i][1]) { - col_min = coordsArr[i][1] - } else if (col_max < coordsArr[i][1]) { - col_max = coordsArr[i][1] - } - - if (depth_min > coordsArr[i][2]) { - depth_min = coordsArr[i][2] - } else if (depth_max < coordsArr[i][2]) { - depth_max = coordsArr[i][2] - } - } - - console.log('row min and max :', row_min, row_max) - console.log('col min and max :', col_min, col_max) - console.log('depth min and max :', depth_min, depth_max) - // -- Reference voxel that cropped volume started slice with it const refVoxel = [row_min, col_min, depth_min] console.log('refVoxel :', refVoxel) @@ -1166,8 +448,6 @@ async function inferenceFullVolumePhase2( console.log('boundVolSizeArr :', boundVolSizeArr) - coords.dispose() - // -- Extract 3d object (e.g. brain) const cropped_slices_3d = slices_3d.slice( [row_min, col_min, depth_min], @@ -1199,8 +479,8 @@ async function inferenceFullVolumePhase2( testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr) console.log(' outLabelVolume final shape after resizing : ', testVol.shape) - // todo draw3dObjBoundingVolume() - draw3dObjBoundingVolume(tf.unstack(testVol)) + draw3dObjBoundingVolume(tf.unstack(testVol), opts, modelEntry, callbackImg) + testVol.dispose() return 0 @@ -1325,25 +605,8 @@ async function inferenceFullVolumePhase2( try { const argMaxLargeTime = performance.now() console.log(' tf.argMax failed .. try argMaxLarge ..') - // todo tensor2LightBuffer() - const modelOutBuffer = tensor2LightBuffer( - curTensor[i].reshape([ - cropped_slices_3d_w_pad.shape[0], - cropped_slices_3d_w_pad.shape[1], - cropped_slices_3d_w_pad.shape[2], - expected_Num_labels - ]), - 'float16' - ) - // todo argMaxLarge() - prediction_argmax = argMaxLarge( - modelOutBuffer, - cropped_slices_3d_w_pad.shape[0], - cropped_slices_3d_w_pad.shape[1], - cropped_slices_3d_w_pad.shape[2], - expected_Num_labels, - 'float16' - ) + window.alert('tensor2LightBuffer() is not dead code?') + window.alert('argMaxLarge() is not dead code?') console.log( 'argMaxLarge for fullVolume takes : ', ((performance.now() - argMaxLargeTime) / 1000).toFixed(4) @@ -1695,18 +958,8 @@ async function inferenceFullVolumePhase1( try { const argMaxLargeTime = performance.now() console.log(' tf.argMax failed .. try argMaxLarge ..') - const modelOutBuffer = tensor2LightBuffer( - curTensor[i].reshape([num_of_slices, slice_height, slice_width, expected_Num_labels]), - 'float16' - ) - prediction_argmax = argMaxLarge( - modelOutBuffer, - num_of_slices, - slice_height, - slice_width, - expected_Num_labels, - 'float16' - ) + window.alert('tensor2LightBuffer() is not dead code?') + window.alert('argMaxLarge() is not dead code?') console.log( 'argMaxLarge for fullVolume takes : ', ((performance.now() - argMaxLargeTime) / 1000).toFixed(4) @@ -1884,8 +1137,7 @@ async function inferenceFullVolumePhase1( } } else { // -- In version 3.0.0 this function not used - await inferenceSubVolumes(model, slices_3d, num_of_slices, slice_height, slice_width, slices_3d_mask) - // inferenceSubVolumes(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask) + window.alert('inferenceSubVolumes() is not dead code?') } } } @@ -1953,7 +1205,7 @@ async function inferenceFullVolumePhase1( } } else { // -- In version 3.0.0 this function not used - inferenceSubVolumes(model, slices_3d, num_of_slices, slice_height, slice_width, null) + window.alert('inferenceSubVolumes() is not dead code?') } } } @@ -1978,7 +1230,7 @@ async function enableProductionMode(textureF16Flag = true) { console.log(tf.getBackend()) } -async function runInference(opts, modelEntry, niftiHeader, niftiImage, callbackImg, callbackUI) { +export async function runInference(opts, modelEntry, niftiHeader, niftiImage, callbackImg, callbackUI) { const statData = [] statData.startTime = Date.now() // for common webworker/mainthread do not use performance.now() callbackUI('Segmentation started', 0) @@ -2016,7 +1268,6 @@ async function runInference(opts, modelEntry, niftiHeader, niftiImage, callbackI return 0 } let batch_D, batch_H, batch_W - let input_shape const slice_width = niftiHeader.dims[1] const slice_height = niftiHeader.dims[2] const num_of_slices = niftiHeader.dims[3] @@ -2031,8 +1282,6 @@ async function runInference(opts, modelEntry, niftiHeader, niftiImage, callbackI batch_D = batchInputShape[1] batch_H = batchInputShape[2] batch_W = batchInputShape[3] - - input_shape = [batchSize, batch_D, batch_H, batch_W, numOfChan] } else { console.log('Model Channel First') if (isNaN(batchInputShape[1]) || batchInputShape[1] !== 1) { @@ -2043,8 +1292,8 @@ async function runInference(opts, modelEntry, niftiHeader, niftiImage, callbackI batch_D = batchInputShape[2] batch_H = batchInputShape[3] batch_W = batchInputShape[4] - input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W] } + // const input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W] // //-- Atlas version check // if ( (batch_D > 30) && (batch_H == 256) && (batch_W == 256) ) { // const errTxt = "The subvolume dimension in z-axis shouldn't exceed 30 number of slices for browser limitation" @@ -2096,26 +1345,10 @@ async function runInference(opts, modelEntry, niftiHeader, niftiImage, callbackI if (enableSeqConv) { console.log('Seq Convoluton Enabled') - await inferenceFullVolumeSeqCovLayer( - model, - slices_3d, - input_shape, - isChannelLast, - num_of_slices, - slice_height, - slice_width - ) + window.alert('inferenceFullVolumeSeqCovLayer() is not dead code?') } else { console.log('Seq Convoluton Disabled') - await inferenceFullVolume( - model, - slices_3d, - input_shape, - isChannelLast, - num_of_slices, - slice_height, - slice_width - ) + window.alert('inferenceFullVolume() is not dead code?') } } } diff --git a/brainchop-parameters.js b/brainchop-parameters.js index cd226e3..c39c880 100644 --- a/brainchop-parameters.js +++ b/brainchop-parameters.js @@ -27,8 +27,6 @@ const inferenceModelsList = [ type: 'Segmentation', path: '/models/model5_gw_ae/model.json', modelName: '\u26A1 Tissue GWM (light)', - labelsPath: './models/model5_gw_ae/labels.json', - colorsPath: './models/model5_gw_ae/colorLUT.json', colormapPath: './models/model5_gw_ae/colormap3.json', preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. @@ -52,8 +50,6 @@ const inferenceModelsList = [ type: 'Segmentation', path: '/models/model20chan3cls/model.json', modelName: '\u{1F52A} Tissue GWM (High Acc)', - labelsPath: './models/model20chan3cls/labels.json', - colorsPath: './models/model20chan3cls/colorLUT.json', colormapPath: './models/model20chan3cls/colormap.json', preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. @@ -78,8 +74,6 @@ const inferenceModelsList = [ type: 'Segmentation', path: '/models/model20chan3cls/model.json', modelName: '\u{1F52A} Tissue GWM (High Acc, Low Mem)', - labelsPath: './models/model20chan3cls/labels.json', - colorsPath: './models/model20chan3cls/colorLUT.json', colormapPath: './models/model20chan3cls/colormap.json', preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. @@ -104,8 +98,6 @@ const inferenceModelsList = [ type: 'Atlas', path: '/models/model30chan18cls/model.json', modelName: '\u{1FA93} Subcortical + GWM (High Mem, Fast)', - labelsPath: './models/model30chan18cls/labels.json', - colorsPath: './models/model30chan18cls/colorLUT.json', colormapPath: './models/model30chan18cls/colormap.json', preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. @@ -130,8 +122,6 @@ const inferenceModelsList = [ type: 'Atlas', path: '/models/model30chan18cls/model.json', modelName: '\u{1FA93} Subcortical + GWM (Low Mem, Slow)', - labelsPath: './models/model30chan18cls/labels.json', - colorsPath: './models/model30chan18cls/colorLUT.json', colormapPath: './models/model30chan18cls/colormap.json', preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. @@ -156,8 +146,6 @@ const inferenceModelsList = [ type: 'Atlas', path: '/models/model18cls/model.json', modelName: '\u{1FA93} Subcortical + GWM (Low Mem, Faster)', - labelsPath: './models/model18cls/labels.json', - colorsPath: './models/model18cls/colorLUT.json', colormapPath: './models/model18cls/colormap.json', preModelId: null, // model run first e.g. Brain_Extraction { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. @@ -182,8 +170,6 @@ const inferenceModelsList = [ type: 'Atlas', path: '/models/model30chan18cls/model.json', modelName: '\u{1F52A}\u{1FA93} Subcortical + GWM (Failsafe, Less Acc)', - labelsPath: './models/model30chan18cls/labels.json', - colorsPath: './models/model30chan18cls/colorLUT.json', colormapPath: './models/model30chan18cls/colormap.json', preModelId: 1, // model run first e.g. Brain_Extraction { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. @@ -208,8 +194,6 @@ const inferenceModelsList = [ type: 'Atlas', path: '/models/model30chan50cls/model.json', modelName: '\u{1F52A} Aparc+Aseg 50 (High Mem, Fast)', - labelsPath: './models/model30chan50cls/labels.json', - colorsPath: './models/model30chan50cls/colorLUT.json', colormapPath: './models/model30chan50cls/colormap.json', preModelId: 1, // Model run first e.g. crop the brain { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. @@ -234,8 +218,6 @@ const inferenceModelsList = [ type: 'Atlas', path: '/models/model30chan50cls/model.json', modelName: '\u{1F52A} Aparc+Aseg 50 (Low Mem, Slow)', - labelsPath: './models/model30chan50cls/labels.json', - colorsPath: './models/model30chan50cls/colorLUT.json', colormapPath: './models/model30chan50cls/colormap.json', preModelId: 1, // Model run first e.g. crop the brain { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. @@ -261,8 +243,6 @@ const inferenceModelsList = [ type: 'Brain_Extraction', path: '/models/model5_gw_ae/model.json', modelName: '\u26A1 Extract the Brain (FAST)', - labelsPath: null, - colorsPath: null, preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. isBatchOverlapEnable: false, // create extra overlap batches for inference @@ -285,8 +265,6 @@ const inferenceModelsList = [ type: 'Brain_Extraction', path: '/models/model11_gw_ae/model.json', modelName: '\u{1F52A} Extract the Brain (High Acc, Slow)', - labelsPath: null, - colorsPath: null, preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. isBatchOverlapEnable: false, // create extra overlap batches for inference @@ -310,8 +288,6 @@ const inferenceModelsList = [ type: 'Brain_Masking', path: '/models/model5_gw_ae/model.json', modelName: '\u26A1 Brain Mask (FAST)', - labelsPath: null, - colorsPath: null, colormapPath: './models/model5_gw_ae/colormap.json', preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. @@ -335,8 +311,6 @@ const inferenceModelsList = [ type: 'Brain_Masking', path: '/models/model11_gw_ae/model.json', modelName: '\u{1F52A} Brain Mask (High Acc, Low Mem)', - labelsPath: null, - colorsPath: null, preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. isBatchOverlapEnable: false, // create extra overlap batches for inference @@ -360,10 +334,8 @@ const inferenceModelsList = [ type: 'Atlas', path: '/models/model21_104class/model.json', modelName: '\u{1F52A} Aparc+Aseg 104 (High Mem, Fast)', - labelsPath: './models/model21_104class/labels.json', - colorsPath: './models/model21_104class/colorLUT.json', colormapPath: './models/model21_104class/colormap.json', - preModelId: 1, // model run first e.g. Brain_Extraction { null, 1, 2, .. } + preModelId: 0, // model run first e.g. Brain_Extraction { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. isBatchOverlapEnable: false, // create extra overlap batches for inference numOverlapBatches: 200, // Number of extra overlap batches for inference @@ -386,10 +358,8 @@ const inferenceModelsList = [ type: 'Atlas', path: '/models/model21_104class/model.json', modelName: '\u{1F52A} Aparc+Aseg 104 (Low Mem, Slow)', - labelsPath: './models/model21_104class/labels.json', - colorsPath: './models/model21_104class/colorLUT.json', colormapPath: './models/model21_104class/colormap.json', - preModelId: 1, // model run first e.g. Brain_Extraction { null, 1, 2, .. } + preModelId: 0, // model run first e.g. Brain_Extraction { null, 1, 2, .. } preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output. isBatchOverlapEnable: false, // create extra overlap batches for inference numOverlapBatches: 200, // Number of extra overlap batches for inference diff --git a/brainchop-webworker.js b/brainchop-webworker.js index 8e0ede4..af17007 100644 --- a/brainchop-webworker.js +++ b/brainchop-webworker.js @@ -1,6 +1,25 @@ import * as tf from '@tensorflow/tfjs' -import { BWLabeler } from './bwlabels.js' import { inferenceModelsList } from './brainchop-parameters.js' +import { + addZeroPaddingTo3dTensor, + applyMriThreshold, + binarizeVolumeDataTensor, + convByOutputChannelAndInputSlicing, + draw3dObjBoundingVolume, + firstLastNonZero3D, + generateBrainMask, + generateOutputSlicesV2, + getAllSlicesDataAsTF3D, + getModelNumLayers, + getModelNumParameters, + isModelChnlLast, + load_model, + minMaxNormalizeVolumeData, + quantileNormalizeVolumeData, + removeZeroPaddingFrom3dTensor, + resizeWithZeroPadding, + SequentialConvLayer +} from './tensor-utils.js' function callbackUI(message = '', progressFrac = -1, modalMessage = '', statData = []) { let statStr = [] @@ -27,665 +46,6 @@ function callbackImg(img, opts, modelEntry) { self.postMessage({ cmd: 'img', img, opts, modelEntry }) } -async function getModelNumParameters(modelObj) { - let numParameters = 0 - for (let layerIdx = 0; layerIdx < modelObj.layers.length; layerIdx++) { - numParameters += modelObj.layers[layerIdx].countParams() - } - return numParameters -} - -async function getModelNumLayers(modelObj) { - return modelObj.layers.length -} - -async function isModelChnlLast(modelObj) { - for (let layerIdx = 0; layerIdx < modelObj.layers.length; layerIdx++) { - if (modelObj.layersByDepth[layerIdx][0].dataFormat) { - return modelObj.layersByDepth[layerIdx][0].dataFormat === 'channelsLast' - } - } -} - -async function load_model(modelUrl) { - console.log('webworker load_model', modelUrl) - return await tf.loadLayersModel(modelUrl) -} - -async function getAllSlicesDataAsTF3D(num_of_slices, niftiHeader, niftiImage) { - // Get nifti dimensions - const cols = niftiHeader.dims[1] // Slice width - const rows = niftiHeader.dims[2] // Slice height - let typedData - if (niftiHeader.datatypeCode === 2) { - // enum from nvimage/utils DT_UINT8 = 2 - typedData = new Uint8Array(niftiImage) - } else if (niftiHeader.datatypeCode === 4) { - // DT_INT16 = 4 - typedData = new Int16Array(niftiImage) - } else if (niftiHeader.datatypeCode === 8) { - // DT_INT32 = 8 - typedData = new Int32Array(niftiImage) - } else if (niftiHeader.datatypeCode === 16) { - // DT_FLOAT32 = 16 - typedData = new Float32Array(niftiImage) - } else if (niftiHeader.datatypeCode === 64) { - // DT_FLOAT64 = 64 - typedData = new Float64Array(niftiImage) - } else if (niftiHeader.datatypeCode === 256) { - // DT_INT8 = 256 - typedData = new Int8Array(niftiImage) - } else if (niftiHeader.datatypeCode === 512) { - // DT_UINT16 = 512 - typedData = new Uint16Array(niftiImage) - } else if (niftiHeader.datatypeCode === 768) { - // DT_UINT32 = 768 - typedData = new Uint32Array(niftiImage) - } else { - return - } - const allSlices_2D = [] - let offset3D = 0 - // Draw pixels - for (let slice = 0; slice < num_of_slices; slice++) { - const slice = new Array(rows * cols) - let offset2D = 0 - for (let row = 0; row < rows; row++) { - for (let col = 0; col < cols; col++) { - const value = typedData[offset3D++] - // Create 1Dim Array of pixel value, this 1 dim represents one channel - slice[offset2D++] = value & 0xff - } - } - allSlices_2D.push(tf.tensor(slice, [rows, cols])) // slice_height, slice_width - } - const allSlices_3D = tf.stack(allSlices_2D) - tf.dispose(allSlices_2D) - return allSlices_3D -} - -async function calculateQuantiles(tensor, lowerQuantile = 0.01, upperQuantile = 0.99) { - // Flatten the tensor - const flatTensor = tensor.flatten() - - // Convert the flattened tensor to an array to sort it - const flatArray = await flatTensor.array() - flatArray.sort((a, b) => a - b) // Sort the array in ascending order - - // Convert the sorted array back to a tensor - const sortedTensor = tf.tensor1d(flatArray) - - // Calculate the indices for the quantiles - const numElements = sortedTensor.shape[0] - const lowIndex = Math.floor(numElements * lowerQuantile) - const highIndex = Math.ceil(numElements * upperQuantile) - 1 // Subtract 1 because indices are 0-based - - // Slice the sorted tensor to get qmin and qmax - const qmin = sortedTensor.slice(lowIndex, 1) // Get the value at the low index - const qmax = sortedTensor.slice(highIndex, 1) // Get the value at the high index - - // Get the actual values from the tensors - const qminValue = (await qmin.array())[0] - const qmaxValue = (await qmax.array())[0] - - // Clean up tensors to free memory - flatTensor.dispose() - sortedTensor.dispose() - qmin.dispose() - qmax.dispose() - - return { qmin: qminValue, qmax: qmaxValue } -} - -async function quantileNormalizeVolumeData(tensor, lowerQuantile = 0.05, upperQuantile = 0.95) { - // Call calculateQuantiles and wait for the result - const { qmin, qmax } = await calculateQuantiles(tensor, lowerQuantile, upperQuantile) - - // Convert qmin and qmax back to scalars - const qminScalar = tf.scalar(qmin) - const qmaxScalar = tf.scalar(qmax) - - // Perform the operation: (tensor - qmin) / (qmax - qmin) - const resultTensor = tensor.sub(qminScalar).div(qmaxScalar.sub(qminScalar)) - - // Dispose of the created scalars to free memory - qminScalar.dispose() - qmaxScalar.dispose() - - // Return the resulting tensor - return resultTensor -} - -async function minMaxNormalizeVolumeData(volumeData) { - // Normalize the data to the range 0 - 1 using min-max scaling - const volumeData_Max = volumeData.max() - const volumeData_Min = volumeData.min() - const normalizedSlices_3d = await volumeData.sub(volumeData_Min).div(volumeData_Max.sub(volumeData_Min)) - return normalizedSlices_3d -} - -async function inferenceFullVolumeSeqCovLayer( - _model, - _slices_3d, - _input_shape, - _isChannelLast, - _num_of_slices, - _slice_height, - _slice_width -) { - callbackUI('', -1, 'inferenceFullVolumeSeqCovLayer() is not dead code?') -} - -async function inferenceFullVolume( - _model, - _slices_3d, - _input_shape, - _isChannelLast, - _num_of_slices, - _slice_height, - _slice_width -) { - callbackUI('', -1, 'inferenceFullVolume() is not dead code?') -} - -async function inferenceSubVolumes( - _model, - _slices_3d, - _num_of_slices, - _slice_height, - _slice_width, - _pipeline1_out = null -) { - callbackUI('', -1, 'inferenceSubVolumes() is not dead code?') -} - -async function tensor2LightBuffer(_tensor, _dtype) { - callbackUI('', -1, 'tensor2LightBuffer() is not dead code?') -} - -async function argMaxLarge( - _outVolumeBuffer, - _num_of_slices, - _slice_height, - _slice_width, - _numOfClasses, - _dtype = 'float32' -) { - callbackUI('', -1, 'argMaxLarge() is not dead code?') -} - -async function binarizeVolumeDataTensor(volumeDataTensor) { - const alpha = 0 - // element-wise: (x > 0 ? 1 : alpha * x ); e.g. Tenosr [0, 0.9, 0.8, -3] => Tensor [0, 1, 1, 0] - return volumeDataTensor.step(alpha) -} - -async function draw3dObjBoundingVolume(unstackOutVolumeTensor, opts, modelEntry) { - const allOutputSlices3DCC = [] - - // dataSync() using to flatten array. Takes around 1.5 s - for (let sliceTensorIdx = 0; sliceTensorIdx < unstackOutVolumeTensor.length; sliceTensorIdx++) { - allOutputSlices3DCC[sliceTensorIdx] = Array.from(unstackOutVolumeTensor[sliceTensorIdx].dataSync()) - } - - // Use this conversion to download output slices as nii file. Takes around 30 ms - // does not use `push` to avoid stack overflows. In future: consider .set() with typed arrays - const allOutputSlices3DCC1DimArray = new Array(allOutputSlices3DCC[0].length * allOutputSlices3DCC.length) - let index = 0 - for (let sliceIdx = 0; sliceIdx < allOutputSlices3DCC.length; sliceIdx++) { - for (let i = 0; i < allOutputSlices3DCC[sliceIdx].length; i++) { - allOutputSlices3DCC1DimArray[index++] = allOutputSlices3DCC[sliceIdx][i] - } - } - console.log('Done with allOutputSlices3DCC1DimArray ') - const brainMaskTensor1d = await binarizeVolumeDataTensor(tf.tensor1d(allOutputSlices3DCC1DimArray)) - const brainOut = Array.from(brainMaskTensor1d.dataSync()) - callbackImg(brainOut, opts, modelEntry) -} - -async function addZeroPaddingTo3dTensor(tensor3d, rowPadArr = [1, 1], colPadArr = [1, 1], depthPadArr = [1, 1]) { - if (tensor3d.rank !== 3) { - throw new Error('Tensor must be 3D') - } - return tensor3d.pad([rowPadArr, colPadArr, depthPadArr]) -} - -async function removeZeroPaddingFrom3dTensor(tensor3d, rowPad = 1, colPad = 1, depthPad = 1) { - if (tensor3d.rank !== 3) { - throw new Error('Tensor must be 3D') - } - const [h, w, d] = tensor3d.shape - return tensor3d.slice([rowPad, colPad, depthPad], [h - 2 * rowPad, w - 2 * colPad, d - 2 * depthPad]) -} - -async function resizeWithZeroPadding(croppedTensor3d, newDepth, newHeight, newWidth, refVoxel, boundVolSizeArr) { - const row_pad_befor = refVoxel[0] - const col_pad_befor = refVoxel[1] - const depth_pad_befor = refVoxel[2] - // last and lower volume voxel - const row_max = row_pad_befor + boundVolSizeArr[0] - 1 // size [2, 2, 2] means 2 voxels total in each dim - const col_max = col_pad_befor + boundVolSizeArr[1] - 1 - const depth_max = depth_pad_befor + boundVolSizeArr[2] - 1 - - const row_pad_after = newHeight - row_max - 1 > 0 ? newHeight - row_max - 1 : 0 - const col_pad_after = newWidth - col_max - 1 > 0 ? newWidth - col_max - 1 : 0 - const depth_pad_after = newDepth - depth_max - 1 > 0 ? newDepth - depth_max - 1 : 0 - - return croppedTensor3d.pad([ - [row_pad_befor, row_pad_after], - [col_pad_befor, col_pad_after], - [depth_pad_befor, depth_pad_after] - ]) -} - -async function applyMriThreshold(tensor, percentage) { - // Perform asynchronous operations outside of tf.tidy - const maxTensor = tensor.max() - const thresholdTensor = maxTensor.mul(percentage) - const threshold = await thresholdTensor.data() // Extracts the threshold value - - // Dispose tensors not needed anymore - maxTensor.dispose() - thresholdTensor.dispose() - - // Use tf.tidy for synchronous operations - return tf.tidy(() => { - const dataForProcessing = tensor.clone() - - // Thresholding (assuming background has very low values compared to the head) - const mask = dataForProcessing.greater(threshold[0]) - // -- const denoisedMriData = dataForProcessing.mul(mask) - - // No need to manually dispose dataForProcessing and mask, as tf.tidy() will dispose them auto. - return mask - }) - - // -- return denoisedMriData -} - -async function generateBrainMask( - unstackOutVolumeTensor, - num_of_slices, - slice_height, - slice_width, - modelEntry, - opts, - callbackUI, - callbackImg, - isFinalImage = true -) { - if (unstackOutVolumeTensor[0].dtype !== 'int32') { - callbackUI('', -1, 'generateBrainMask assumes int32') - } - if (modelEntry.preModelPostProcess) { - callbackUI('', -1, 'generateBrainMask assumes BWLabeler instead of preModelPostProcess') - } - const numSlices = unstackOutVolumeTensor.length - const numPixels2D = unstackOutVolumeTensor[0].size - const numVox3D = numSlices * numPixels2D - // preallocate to reduce heap usage - const brainOut = new Int32Array(numVox3D) - let offset = 0 - for (let i = 0; i < numSlices; i++) { - brainOut.set(unstackOutVolumeTensor[i].dataSync(), offset) - offset += numPixels2D - } - for (let i = 0; i < numVox3D; i++) { - brainOut[i] = brainOut[i] !== 0 ? 1 : 0 - } - if (isFinalImage || opts.showPhase1Output) { - // all done - callbackImg(brainOut, opts, modelEntry) - callbackUI('Segmentation finished', 0) - } - return tf.tensor(brainOut, [num_of_slices, slice_height, slice_width]) -} - -async function convByOutputChannelAndInputSlicing(input, filter, biases, stride, pad, dilationRate, sliceSize) { - const inChannels = input.shape[4] - const outChannels = filter.shape[4] - - // Create an empty array to hold the output channels - let outputChannels = null - - // Slice the input tensor and process one output channel at a time - for (let channel = 0; channel < outChannels; channel++) { - const numSlices = Math.ceil(inChannels / sliceSize) - const biasesSlice = biases.slice([channel], [1]) - let outputChannel = null - - for (let i = 0; i < numSlices; i++) { - const startChannel = i * sliceSize - const endChannel = Math.min((i + 1) * sliceSize, inChannels) - - // Only proceed if there are channels to process - if (startChannel < inChannels) { - const resultSlice = tf.tidy(() => { - const inputSlice = input.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, endChannel - startChannel]) - const filterSlice = filter.slice([0, 0, 0, startChannel, channel], [-1, -1, -1, endChannel - startChannel, 1]) - // Perform the convolution for the current slice and output channel - return tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate) - }) - - if (outputChannel === null) { - outputChannel = resultSlice - } else { - const updatedOutputChannel = outputChannel.add(resultSlice) - outputChannel.dispose() - resultSlice.dispose() - outputChannel = updatedOutputChannel - } - } - } - - // Add the biases to the accumulated convolutions for this channel - const biasedOutputChannel = outputChannel.add(biasesSlice) - outputChannel.dispose() - biasesSlice.dispose() - - // Accumulate the channel to the output array - if (outputChannels == null) { - outputChannels = biasedOutputChannel - } else { - const updatedOutputChannels = await tf.concat([outputChannels, biasedOutputChannel], 4) - biasedOutputChannel.dispose() - outputChannels.dispose() - outputChannels = updatedOutputChannels - } - } - - return outputChannels -} - -function processTensorInChunks(inputTensor, filterWeights, chunkSize) { - // Assuming inputTensor's shape: [batch, depth, height, width, inChannels] - // and filterWeights's shape: [filterDepth, filterHeight, filterWidth, inChannels, outChannels] - const stride = 1 - const pad = 0 - const dilationRate = 1 - const inChannels = inputTensor.shape[4] - const numSlices = Math.ceil(inChannels / chunkSize) - - let accumulatedResult = null - for (let i = 0; i < numSlices; i++) { - const startChannel = i * chunkSize - const endChannel = Math.min((i + 1) * chunkSize, inChannels) - const channels = endChannel - startChannel - - const inputSlice = tf.tidy(() => { - // Slice the input tensor to get the current chunk - return inputTensor.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, channels]) - }) - const filterSlice = tf.tidy(() => { - // Slice the filter weights to match the input tensor's current chunk - return filterWeights.slice([0, 0, 0, startChannel, 0], [-1, -1, -1, channels, -1]) - }) - - const resultSlice = tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate) - // Clean up the slices to free memory - inputSlice.dispose() - filterSlice.dispose() - - // Squeeze the result slice to remove dimensions of size 1 - const squeezedResultSlice = tf.squeeze(resultSlice) - resultSlice.dispose() // Dispose of the original resultSlice after squeezing - - if (accumulatedResult === null) { - accumulatedResult = squeezedResultSlice - } else { - // Accumulate the result by adding the new result slice to it - const newAccumulatedResult = accumulatedResult.add(squeezedResultSlice) - - // Dispose of the previous accumulatedResult and squeezedResultSlice - accumulatedResult.dispose() - // Dispose of squeezedResultSlice only if it wasn't assigned to accumulatedResult - if (accumulatedResult !== squeezedResultSlice) { - squeezedResultSlice.dispose() - } - // Update accumulatedResult with the new result - accumulatedResult = newAccumulatedResult - } - - tf.tidy(() => { - tf.matMul(tf.zeros([1, 1]), tf.zeros([1, 1])) - }) - } - - return accumulatedResult -} - -class SequentialConvLayer { - constructor(model, chunkSize, isChannelLast) { - this.model = model - this.outChannels = model.outputLayers[0].kernel.shape[4] - this.chunkSize = chunkSize - this.isChannelLast = isChannelLast - } - - /** - * Apply sequential convolution layer - * @since 3.0.0 - * @member SequentialConvLayer - * @param {tf.Tensor} inputTensor e.g. [ 1, 256, 256, 256, 5 ] - * @return {outC} - * - * convLayer.rank -> 3 - * typeof(convLayer) -> "object" - * convLayer: Object { dataFormat: "channelsLast", dilationRate: Array(3) [ 1, 1, 1 ], inputSpec: Array [ {…} ], - * name: "output", padding: "same", strides: Array(3) [ 1, 1, 1 ], ...} - * - * weights.shape -> Array(5) [ 1, 1, 1, 5, 3 ] - * weights.print() - * //=> Tensor - * [[[[[0.146999 , -1.4474995, -2.8961499], - * [1.1067894, 0.6897876 , -0.7573005], - * [-0.38512 , -0.2812168, -0.8637539], - * [0.9341159, -0.0344299, -2.3668685], - * [0.1052373, 1.266812 , 0.6542516 ]]]]] - * - * biases.shape -> Array [ 3 ] - * biases.print() - * //=> Tensor - * [-0.7850812, -2.3238883, 2.1639345] - * - * for idx = 0 -> filterWeights.shape -> Array(5) [ 1, 1, 1, 5, 1 ] - * filterWeights.print() - * //=> Tensor - * [[[[[0.146999 ], - * [1.1067894], - * [-0.38512 ], - * [0.9341159], - * [0.1052373]]]]] - * - * for idx = 0 -> filterBiases.shape -> Array [1] - * filterBiases.print() - * //=> Tensor - * [-0.7850812] - - */ - - async apply(inputTensor) { - const oldDeleteTextureThreshold = tf.ENV.get('WEBGL_DELETE_TEXTURE_THRESHOLD') - tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0) - - // eslint-disable-next-line @typescript-eslint/no-this-alias - const self = this - // Important to avoid "undefined" class var members inside the timer. - // "this" has another meaning inside the timer. - - // document.getElementById("progressBarChild").parentElement.style.visibility = "visible" - const startTime = performance.now() - - const convLayer = self.model.layers[self.model.layers.length - 1] - const weights = convLayer.getWeights()[0] // - const biases = convLayer.getWeights()[1] - const outputShape = self.isChannelLast ? inputTensor.shape.slice(1, -1) : inputTensor.shape.slice(2) - // -- e.g. outputShape : [256,256,256] or cropped Dim - // -- if inputTensor [ 1, D, H, W, 50 ], channelLast true -> outputShape : outputShape [D, H, W] - // -- if inputTensor [ 1, 50, D, H, W ], channelLast false -> outputShape : outputShape [D, H, W] - - let outB = tf.mul(tf.ones(outputShape), -10000) - // -- e.g. outB.shape [256,256,256] - let outC = tf.zeros(outputShape) - // -- e.g. outC.shape [256,256,256] - let chIdx = 0 - - // console.log("---------------------------------------------------------") - console.log(' channel loop') - - while (true) { - tf.engine().startScope() // Start TensorFlow.js scope - /* console.log('=======================') - const memoryInfo0 = await tf.memory() - console.log(`| Number of Tensors: ${memoryInfo0.numTensors}`) - console.log(`| Number of Data Buffers: ${memoryInfo0.numDataBuffers}`) */ - - const result = await tf.tidy(() => { - const filterWeights = weights.slice([0, 0, 0, 0, chIdx], [-1, -1, -1, -1, 1]) - // -- e.g. filterWeights.shape [ 1, 1, 1, 5, 1 ] - const filterBiases = biases.slice([chIdx], [1]) - // -- e.g. filterBiases.shape [1] -> Tensor [-0.7850812] - const outA = processTensorInChunks(inputTensor, filterWeights, Math.min(self.chunkSize, self.outChannels)).add( - filterBiases - ) - const greater = tf.greater(outA, outB) - const newoutB = tf.where(greater, outA, outB) - const newoutC = tf.where(greater, tf.fill(outC.shape, chIdx), outC) - // Dispose the old tensors before reassigning - tf.dispose([outB, outC, filterWeights, filterBiases, outA, greater]) - // Dummy operation to trigger cleanup - tf.tidy(() => tf.matMul(tf.ones([1, 1]), tf.ones([1, 1]))) - return [newoutC, newoutB] - }) - console.log('=======================') - callbackUI(`Iteration ${chIdx}`, chIdx / self.outChannels) - const memoryInfo = await tf.memory() - console.log(`Number of Tensors: ${memoryInfo.numTensors}`) - console.log(`Number of Data Buffers: ${memoryInfo.numDataBuffers}`) - console.log(`Megabytes In Use: ${(memoryInfo.numBytes / 1048576).toFixed(3)} MB`) - if (memoryInfo.unreliable) { - console.log(`Unreliable: ${memoryInfo.unreliable}`) - } - // Dispose of previous values before assigning new tensors to outC and outB - if (typeof outC !== 'undefined') { - outC.dispose() - } - if (typeof outB !== 'undefined') { - outB.dispose() - } - // Assign the new values to outC and outB - outC = tf.keep(result[0]) - outB = tf.keep(result[1]) - // // Assign the new values to outC and outB - // outC = result[0] - // outB = result[1] - tf.engine().endScope() - - if (chIdx === self.outChannels - 1) { - // document.getElementById("progressBarChild").style.width = 0 + "%" - tf.dispose(outB) - const endTime = performance.now() - const executionTime = endTime - startTime - console.log(`Execution time for output layer: ${executionTime} milliseconds`) - tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', oldDeleteTextureThreshold) - return outC - } else { - chIdx++ - - // the seemingly strange sequence of operations - // below prevents tfjs from uncontrolably - // grabbing buffers, even when all tensors have - // already been disposed - - const outCShape = outC.shape - const outCdata = outC.dataSync() - const outBShape = outC.shape - const outBdata = outB.dataSync() - outC.dispose() - outB.dispose() - // tf.disposeVariables() - outC = tf.tensor(outCdata, outCShape) - outB = tf.tensor(outBdata, outBShape) - - // document.getElementById("progressBarChild").style.width = (chIdx + 1) * 100 / self.outChannels + "%" - } - } - } -} // <<<< End of class - -async function generateOutputSlicesV2( - img, - OutVolumeTensorShape, - OutVolumeTensorType, - num_of_slices, - numSegClasses, - slice_height, - slice_width, - modelEntry, - opts, - niftiImage -) { - // Convert all slices into 1 Dim array - if (opts.isPostProcessEnable) { - const BWInstance = new BWLabeler() - const dim = new Uint32Array(OutVolumeTensorShape) - const conn = 26 // Example connectivity - const binarize = true - const onlyLargestClusterPerClass = true - const [_labelCount, labeledImage] = BWInstance.bwlabel(img, dim, conn, binarize, onlyLargestClusterPerClass) - for (let i = 0; i < img.length; i++) { - img[i] *= labeledImage[i] - } - } // if isPostProcessEnable - const typedArrayConstructor = { - float32: Float32Array, - int32: Int32Array - // Add other cases as needed for different dtypes - }[OutVolumeTensorType] - // Create a new TypedArray from img with the same type as outLabelVolume - const allOutputSlices3DCC1DimArray = new Uint8Array(img) - - const modelType = modelEntry.type - - // return img - switch (modelType) { - case 'Brain_Masking': { - const brainMask = new Uint8Array(allOutputSlices3DCC1DimArray.length) - for (let i = 0; i < allOutputSlices3DCC1DimArray.length; i++) { - brainMask[i] = allOutputSlices3DCC1DimArray[i] !== 0 ? 1 : 0 - } - // labelArrayBuffer = createNiftiOutArrayBuffer(rawNiftiData, brainMask) - // allOutputSlices3DCC1DimArray = brainMask - // --labelsHistogramMap = null - // maskBrainExtraction = true - return brainMask - // break - } - case 'Brain_Extraction': { - const maskedData = new Uint8Array(allOutputSlices3DCC1DimArray.length) - // const brainData = nifti2data(rawNiftiData) - - for (let i = 0; i < allOutputSlices3DCC1DimArray.length; i++) { - // Create the mask - 1 where the value is non-zero, 0 where it is zero. - const maskValue = allOutputSlices3DCC1DimArray[i] !== 0 ? 1 : 0 - // Apply the mask to the data - multiply by the mask value. - maskedData[i] = niftiImage[i] * maskValue - } - // labelArrayBuffer = createNiftiOutArrayBuffer(rawNiftiData, maskedData) - - // Update `allOutputSlices3DCC1DimArray` if needed. - // allOutputSlices3DCC1DimArray = maskedData - - // Other operations... - // maskBrainExtraction = true - return maskedData - // break - } - } - - return img -} - async function inferenceFullVolumeSeqCovLayerPhase2( opts, modelEntry, @@ -734,28 +94,13 @@ async function inferenceFullVolumeSeqCovLayerPhase2( } console.log(' mask_3d shape : ', mask_3d.shape) - const coords = await tf.whereAsync(mask_3d) - // -- Get each voxel coords (x, y, z) - + const [row_min, row_max, col_min, col_max, depth_min, depth_max] = await firstLastNonZero3D(mask_3d) mask_3d.dispose() - const row_min = coords.min(0).arraySync()[0] - const row_max = coords.max(0).arraySync()[0] - const col_min = coords.min(0).arraySync()[1] - const col_max = coords.max(0).arraySync()[1] - const depth_min = coords.min(0).arraySync()[2] - const depth_max = coords.max(0).arraySync()[2] - - console.log('row min and max :', row_min, row_max) - console.log('col min and max :', col_min, col_max) - console.log('depth min and max :', depth_min, depth_max) - // -- Reference voxel that cropped volume started slice with it const refVoxel = [row_min, col_min, depth_min] // -- Starting form refVoxel, size of bounding volume const boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] - coords.dispose() - // -- Extract 3d object (e.g. brain) const cropped_slices_3d = await slices_3d.slice( [row_min, col_min, depth_min], @@ -778,8 +123,7 @@ async function inferenceFullVolumeSeqCovLayerPhase2( testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr) console.log(' outLabelVolume final shape after resizing : ', testVol.shape) - - draw3dObjBoundingVolume(tf.unstack(testVol), opts, modelEntry) + draw3dObjBoundingVolume(tf.unstack(testVol), opts, modelEntry, callbackImg) testVol.dispose() return 0 @@ -914,7 +258,7 @@ async function inferenceFullVolumeSeqCovLayerPhase2( // the larger it is, the more memory it uses // it was 8, but I set it to 3, got a different error // let seqConvLayer = new SequentialConvLayer(res, 10, isChannelLast) - const seqConvLayer = await new SequentialConvLayer(res, 10, isChannelLast) + const seqConvLayer = await new SequentialConvLayer(res, 10, isChannelLast, callbackUI) // Apply the last output tensor to the seq. instance let outputTensor = null @@ -1114,42 +458,8 @@ async function inferenceFullVolumePhase2( // -- pipeline1_out.dispose() } console.log(' mask_3d shape : ', mask_3d.shape) - const coords = await tf.whereAsync(mask_3d) - // -- Get each voxel coords (x, y, z) + const [row_min, row_max, col_min, col_max, depth_min, depth_max] = await firstLastNonZero3D(mask_3d) mask_3d.dispose() - const coordsArr = coords.arraySync() - - let row_min = slice_height - let row_max = 0 - let col_min = slice_width - let col_max = 0 - let depth_min = num_of_slices - let depth_max = 0 - - for (let i = 0; i < coordsArr.length; i++) { - if (row_min > coordsArr[i][0]) { - row_min = coordsArr[i][0] - } else if (row_max < coordsArr[i][0]) { - row_max = coordsArr[i][0] - } - - if (col_min > coordsArr[i][1]) { - col_min = coordsArr[i][1] - } else if (col_max < coordsArr[i][1]) { - col_max = coordsArr[i][1] - } - - if (depth_min > coordsArr[i][2]) { - depth_min = coordsArr[i][2] - } else if (depth_max < coordsArr[i][2]) { - depth_max = coordsArr[i][2] - } - } - - console.log('row min and max :', row_min, row_max) - console.log('col min and max :', col_min, col_max) - console.log('depth min and max :', depth_min, depth_max) - // -- Reference voxel that cropped volume started slice with it const refVoxel = [row_min, col_min, depth_min] console.log('refVoxel :', refVoxel) @@ -1158,9 +468,6 @@ async function inferenceFullVolumePhase2( const boundVolSizeArr = [row_max - row_min + 1, col_max - col_min + 1, depth_max - depth_min + 1] console.log('boundVolSizeArr :', boundVolSizeArr) - - coords.dispose() - // -- Extract 3d object (e.g. brain) const cropped_slices_3d = slices_3d.slice( [row_min, col_min, depth_min], @@ -1192,8 +499,7 @@ async function inferenceFullVolumePhase2( testVol = await resizeWithZeroPadding(testVol, num_of_slices, slice_height, slice_width, refVoxel, boundVolSizeArr) console.log(' outLabelVolume final shape after resizing : ', testVol.shape) - // todo draw3dObjBoundingVolume() - draw3dObjBoundingVolume(tf.unstack(testVol), opts, modelEntry) + draw3dObjBoundingVolume(tf.unstack(testVol), opts, modelEntry, callbackImg) testVol.dispose() return 0 @@ -1321,25 +627,8 @@ async function inferenceFullVolumePhase2( try { const argMaxLargeTime = performance.now() console.log(' tf.argMax failed .. try argMaxLarge ..') - // todo tensor2LightBuffer() - const modelOutBuffer = tensor2LightBuffer( - curTensor[i].reshape([ - cropped_slices_3d_w_pad.shape[0], - cropped_slices_3d_w_pad.shape[1], - cropped_slices_3d_w_pad.shape[2], - expected_Num_labels - ]), - 'float16' - ) - // todo argMaxLarge() - prediction_argmax = argMaxLarge( - modelOutBuffer, - cropped_slices_3d_w_pad.shape[0], - cropped_slices_3d_w_pad.shape[1], - cropped_slices_3d_w_pad.shape[2], - expected_Num_labels, - 'float16' - ) + callbackUI('', -1, 'tensor2LightBuffer() is not dead code?') + callbackUI('', -1, 'argMaxLarge() is not dead code?') console.log( 'argMaxLarge for fullVolume takes : ', ((performance.now() - argMaxLargeTime) / 1000).toFixed(4) @@ -1680,18 +969,8 @@ async function inferenceFullVolumePhase1( try { const argMaxLargeTime = performance.now() console.log(' tf.argMax failed .. try argMaxLarge ..') - const modelOutBuffer = tensor2LightBuffer( - curTensor[i].reshape([num_of_slices, slice_height, slice_width, expected_Num_labels]), - 'float16' - ) - prediction_argmax = argMaxLarge( - modelOutBuffer, - num_of_slices, - slice_height, - slice_width, - expected_Num_labels, - 'float16' - ) + callbackUI('', -1, 'tensor2LightBuffer() is not dead code?') + callbackUI('', -1, 'argMaxLarge() is not dead code?') console.log( 'argMaxLarge for fullVolume takes : ', ((performance.now() - argMaxLargeTime) / 1000).toFixed(4) @@ -1865,8 +1144,7 @@ async function inferenceFullVolumePhase1( } } else { // -- In version 3.0.0 this function not used - await inferenceSubVolumes(model, slices_3d, num_of_slices, slice_height, slice_width, slices_3d_mask) - // inferenceSubVolumes(model, slices_3d.transpose(), num_of_slices, slice_height, slice_width, slices_3d_mask) + callbackUI('', -1, 'inferenceSubVolumes() is not dead code?') } } } @@ -1929,7 +1207,7 @@ async function inferenceFullVolumePhase1( } } else { // -- In version 3.0.0 this function not used - inferenceSubVolumes(model, slices_3d, num_of_slices, slice_height, slice_width, null) + callbackUI('', -1, 'inferenceSubVolumes() is not dead code?') } } } @@ -1992,7 +1270,6 @@ async function runInferenceWW(opts, modelEntry, niftiHeader, niftiImage) { return 0 } let batch_D, batch_H, batch_W - let input_shape const slice_width = niftiHeader.dims[1] const slice_height = niftiHeader.dims[2] const num_of_slices = niftiHeader.dims[3] @@ -2007,8 +1284,6 @@ async function runInferenceWW(opts, modelEntry, niftiHeader, niftiImage) { batch_D = batchInputShape[1] batch_H = batchInputShape[2] batch_W = batchInputShape[3] - - input_shape = [batchSize, batch_D, batch_H, batch_W, numOfChan] } else { console.log('Model Channel First') if (isNaN(batchInputShape[1]) || batchInputShape[1] !== 1) { @@ -2019,8 +1294,8 @@ async function runInferenceWW(opts, modelEntry, niftiHeader, niftiImage) { batch_D = batchInputShape[2] batch_H = batchInputShape[3] batch_W = batchInputShape[4] - input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W] } + // const input_shape = [batchSize, numOfChan, batch_D, batch_H, batch_W] // --Check whether the model will make inference at once as FullVolumeModel let isModelFullVol if (batch_D === 256 && batch_H === 256 && batch_W === 256) { @@ -2064,27 +1339,9 @@ async function runInferenceWW(opts, modelEntry, niftiHeader, niftiImage) { const enableSeqConv = modelEntry.enableSeqConv if (enableSeqConv) { - console.log('Seq Convoluton Enabled') - await inferenceFullVolumeSeqCovLayer( - model, - slices_3d, - input_shape, - isChannelLast, - num_of_slices, - slice_height, - slice_width - ) + callbackUI('', -1, 'inferenceFullVolumeSeqCovLayer() is not dead code?') } else { - console.log('Seq Convoluton Disabled') - await inferenceFullVolume( - model, - slices_3d, - input_shape, - isChannelLast, - num_of_slices, - slice_height, - slice_width - ) + callbackUI('', -1, 'inferenceFullVolume() is not dead code?') } } } diff --git a/public/models/GT/labels.json b/public/models/GT/labels.json deleted file mode 100644 index 35ee231..0000000 --- a/public/models/GT/labels.json +++ /dev/null @@ -1 +0,0 @@ -{"0": "background", "1": "Grey Matter", "2": "White Matter"} diff --git a/public/models/mnm_tfjs_me_test/colorLUT.json b/public/models/mnm_tfjs_me_test/colorLUT.json deleted file mode 100644 index 6da374a..0000000 --- a/public/models/mnm_tfjs_me_test/colorLUT.json +++ /dev/null @@ -1 +0,0 @@ -{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"} \ No newline at end of file diff --git a/public/models/mnm_tfjs_me_test/group1-shard1of1.bin b/public/models/mnm_tfjs_me_test/group1-shard1of1.bin deleted file mode 100644 index 210906a..0000000 Binary files a/public/models/mnm_tfjs_me_test/group1-shard1of1.bin and /dev/null differ diff --git a/public/models/mnm_tfjs_me_test/labels.json b/public/models/mnm_tfjs_me_test/labels.json deleted file mode 100644 index 4885a94..0000000 --- a/public/models/mnm_tfjs_me_test/labels.json +++ /dev/null @@ -1 +0,0 @@ -{"0": "background", "1": "White Matter", "2": "Grey Matter"} diff --git a/public/models/mnm_tfjs_me_test/model.json b/public/models/mnm_tfjs_me_test/model.json deleted file mode 100644 index e288b78..0000000 --- a/public/models/mnm_tfjs_me_test/model.json +++ /dev/null @@ -1 +0,0 @@ -{"format": "layers-model", "generatedBy": "keras v2.4.0", "convertedBy": "TensorFlow.js Converter v3.2.0", "modelTopology": {"keras_version": "2.4.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 38, 38, 38, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "17", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "17", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "18", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "18", "inbound_nodes": [[["17", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "19", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "19", "inbound_nodes": [[["18", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "20", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "20", "inbound_nodes": [[["19", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "21", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "21", "inbound_nodes": [[["20", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["21", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "23", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "23", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["23", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "25", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "25", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["25", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "27", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "27", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["27", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "29", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "29", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["29", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["30", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "17/kernel", "shape": [3, 3, 3, 1, 21], "dtype": "float32"}, {"name": "17/bias", "shape": [21], "dtype": "float32"}, {"name": "19/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "19/bias", "shape": [21], "dtype": "float32"}, {"name": "21/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "21/bias", "shape": [21], "dtype": "float32"}, {"name": "23/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "23/bias", "shape": [21], "dtype": "float32"}, {"name": "25/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "25/bias", "shape": [21], "dtype": "float32"}, {"name": "27/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "27/bias", "shape": [21], "dtype": "float32"}, {"name": "29/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "29/bias", "shape": [21], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 21, 3], "dtype": "float32"}, {"name": "output/bias", "shape": [3], "dtype": "float32"}]}]} \ No newline at end of file diff --git a/public/models/model11_50class/colorLUT.json b/public/models/model11_50class/colorLUT.json deleted file mode 100644 index 99c0f7e..0000000 --- a/public/models/model11_50class/colorLUT.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "0": "rgb(0,0,0)", - "1": "rgb(245,245,245)", - "2": "rgb(196,58,250)", - "3": "rgb(220,248,164)", - "4": "rgb(230,148,34)", - "5": "rgb(0,118,14)", - "6": "rgb(122,186,220)", - "7": "rgb(236,13,176)", - "8": "rgb(12,48,255)", - "9": "rgb(119,159,176)", - "10": "rgb(220,216,20)", - "11": "rgb(103,255,255)", - "12": "rgb(60,60,60)", - "13": "rgb(255,165,0)", - "14": "rgb(165,42,42)", - "15": "rgb(0,0,208)", - "16": "rgb(25,100,40)", - "17": "rgb(125,100,160)", - "18": "rgb(100,25,0)", - "19": "rgb(220,20,100)", - "20": "rgb(220,20,10)", - "21": "rgb(180,220,140)", - "22": "rgb(220,60,220)", - "23": "rgb(180,40,120)", - "24": "rgb(140,20,140)", - "25": "rgb(20,30,140)", - "26": "rgb(35,75,50)", - "27": "rgb(225,140,140)", - "28": "rgb(200,35,75)", - "29": "rgb(160,100,50)", - "30": "rgb(20,220,60)", - "31": "rgb(60,220,60)", - "32": "rgb(220,180,140)", - "33": "rgb(20,100,50)", - "34": "rgb(220,60,20)", - "35": "rgb(120,100,60)", - "36": "rgb(220,20,20)", - "37": "rgb(220,180,220)", - "38": "rgb(60,20,220)", - "39": "rgb(160,140,180)", - "40": "rgb(80,20,140)", - "41": "rgb(75,50,125)", - "42": "rgb(20,220,160)", - "43": "rgb(20,180,140)", - "44": "rgb(140,220,220)", - "45": "rgb(80,160,20)", - "46": "rgb(100,0,100)", - "47": "rgb(70,70,70)", - "48": "rgb(150,150,200)", - "49": "rgb(255,192,32)" -} \ No newline at end of file diff --git a/public/models/model11_50class/group1-shard1of1.bin b/public/models/model11_50class/group1-shard1of1.bin deleted file mode 100644 index 216fc84..0000000 Binary files a/public/models/model11_50class/group1-shard1of1.bin and /dev/null differ diff --git a/public/models/model11_50class/labels.json b/public/models/model11_50class/labels.json deleted file mode 100644 index 58541ce..0000000 --- a/public/models/model11_50class/labels.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "0": "BG", - "1": "Cerebral-White-Matter", - "2": "Ventricle", - "3": "Cerebellum-White-Matter", - "4": "Cerebellum", - "5": "Thalamus-Proper*", - "6": "Caudate", - "7": "Putamen", - "8": "Pallidum", - "9": "Brain-Stem", - "10": "Hippocampus", - "11": "Amygdala", - "12": "CSF", - "13": "Accumbens-area", - "14": "VentralDC", - "15": "CC_Posterior / CC_Mid_Posterior / CC_Central / CC_Mid_Anterior / CC_Anterior", - "16": "ctx-bankssts", - "17": "ctx-caudalanteriorcingulate", - "18": "ctx-caudalmiddlefrontal", - "19": "ctx-cuneus", - "20": "ctx-entorhinal", - "21": "ctx-fusiform", - "22": "ctx-inferiorparietal", - "23": "ctx-inferiortemporal", - "24": "ctx-isthmuscingulate", - "25": "ctx-lateraloccipital", - "26": "ctx-lateralorbitofrontal", - "27": "ctx-lingual", - "28": "ctx-medialorbitofrontal", - "29": "ctx-middletemporal", - "30": "ctx-parahippocampal", - "31": "ctx-paracentral", - "32": "ctx-parsopercularis", - "33": "ctx-parsorbitalis", - "34": "ctx-parstriangularis", - "35": "ctx-pericalcarine", - "36": "ctx-postcentral", - "37": "ctx-posteriorcingulate", - "38": "ctx-precentral", - "39": "ctx-precuneus", - "40": "ctx-rostralanteriorcingulate", - "41": "ctx-rostralmiddlefrontal", - "42": "ctx-superiorfrontal", - "43": "ctx-superiorparietal", - "44": "ctx-superiortemporal", - "45": "ctx-supramarginal", - "46": "ctx-frontalpole", - "47": "ctx-temporalpole", - "48": "ctx-transversetemporal", - "49": "ctx-insula" -} \ No newline at end of file diff --git a/public/models/model11_50class/labels.zip b/public/models/model11_50class/labels.zip deleted file mode 100644 index 824f07b..0000000 Binary files a/public/models/model11_50class/labels.zip and /dev/null differ diff --git a/public/models/model11_50class/labelsWithCompleteAnnot.json b/public/models/model11_50class/labelsWithCompleteAnnot.json deleted file mode 100644 index 8735db8..0000000 --- a/public/models/model11_50class/labelsWithCompleteAnnot.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "0": "BG", - "1": "Left-Cerebral-White-Matter / Right-Cerebral-White-Matter", - "2": "Left-Lateral-Ventricle / Left-Inf-Lat-Vent / Right-Lateral-Ventricle / Right-Inf-Lat-Vent / 3rd-Ventricle / 4th-Ventricle", - "3": "Left-Cerebellum-White-Matter / Right-Cerebellum-White-Matter", - "4": "Left-Cerebellum-Cortex / Right-Cerebellum-Cortex", - "5": "Left-Thalamus-Proper* / Right-Thalamus-Proper*", - "6": "Left-Caudate / Right-Caudate", - "7": "Left-Putamen / Right-Putamen", - "8": "Left-Pallidum / Right-Pallidum", - "9": "Brain-Stem", - "10": "Left-Hippocampus / Right-Hippocampus", - "11": "Left-Amygdala / Right-Amygdala", - "12": "CSF", - "13": "Left-Accumbens-area / Right-Accumbens-area", - "14": "Left-VentralDC / Right-VentralDC", - "15": "CC_Posterior / CC_Mid_Posterior / CC_Central / CC_Mid_Anterior / CC_Anterior", - "16": "ctx-lh-bankssts / ctx-rh-bankssts", - "17": "ctx-lh-caudalanteriorcingulate / ctx-rh-caudalanteriorcingulate", - "18": "ctx-lh-caudalmiddlefrontal / ctx-rh-caudalmiddlefrontal", - "19": "ctx-lh-cuneus / ctx-rh-cuneus", - "20": "ctx-lh-entorhinal / ctx-rh-entorhinal", - "21": "ctx-lh-fusiform / ctx-rh-fusiform", - "22": "ctx-lh-inferiorparietal / ctx-rh-inferiorparietal", - "23": "ctx-lh-inferiortemporal / ctx-rh-inferiortemporal", - "24": "ctx-lh-isthmuscingulate / ctx-rh-isthmuscingulate", - "25": "ctx-lh-lateraloccipital / ctx-rh-lateraloccipital", - "26": "ctx-lh-lateralorbitofrontal / ctx-rh-lateralorbitofrontal", - "27": "ctx-lh-lingual / ctx-rh-lingual", - "28": "ctx-lh-medialorbitofrontal / ctx-rh-medialorbitofrontal", - "29": "ctx-lh-middletemporal / ctx-rh-middletemporal", - "30": "ctx-lh-parahippocampal / ctx-rh-parahippocampal", - "31": "ctx-lh-paracentral / ctx-rh-paracentral", - "32": "ctx-lh-parsopercularis / ctx-rh-parsopercularis", - "33": "ctx-lh-parsorbitalis / ctx-rh-parsorbitalis", - "34": "ctx-lh-parstriangularis / ctx-rh-parstriangularis", - "35": "ctx-lh-pericalcarine / ctx-rh-pericalcarine", - "36": "ctx-lh-postcentral / ctx-rh-postcentral", - "37": "ctx-lh-posteriorcingulate / ctx-rh-posteriorcingulate", - "38": "ctx-lh-precentral / ctx-rh-precentral", - "39": "ctx-lh-precuneus / ctx-rh-precuneus", - "40": "ctx-lh-rostralanteriorcingulate / ctx-rh-rostralanteriorcingulate", - "41": "ctx-lh-rostralmiddlefrontal / ctx-rh-rostralmiddlefrontal", - "42": "ctx-lh-superiorfrontal / ctx-rh-superiorfrontal", - "43": "ctx-lh-superiorparietal / ctx-rh-superiorparietal", - "44": "ctx-lh-superiortemporal / ctx-rh-superiortemporal", - "45": "ctx-lh-supramarginal / ctx-rh-supramarginal", - "46": "ctx-lh-frontalpole / ctx-rh-frontalpole", - "47": "ctx-lh-temporalpole / ctx-rh-temporalpole", - "48": "ctx-lh-transversetemporal / ctx-rh-transversetemporal", - "49": "ctx-lh-insula / ctx-rh-insula" -} \ No newline at end of file diff --git a/public/models/model11_50class/model.json b/public/models/model11_50class/model.json deleted file mode 100644 index 37f3f21..0000000 --- a/public/models/model11_50class/model.json +++ /dev/null @@ -1 +0,0 @@ -{"format": "layers-model", "generatedBy": "keras v2.7.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.7.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 256, 256, 256, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "input.1", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.1", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["input.1", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.4", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.4", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["input.4", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.8", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.8", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["input.8", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.12", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.12", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["input.12", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.16", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [16, 16, 16], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.16", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["input.16", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.20", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.20", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["input.20", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.24", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.24", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["input.24", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.28", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.28", "inbound_nodes": [[["34", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "36", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "36", "inbound_nodes": [[["input.28", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.32", "trainable": true, "dtype": "float32", "filters": 11, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.32", "inbound_nodes": [[["36", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "38", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "38", "inbound_nodes": [[["input.32", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 50, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["38", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "input.1/kernel", "shape": [3, 3, 3, 1, 11], "dtype": "float32"}, {"name": "input.1/bias", "shape": [11], "dtype": "float32"}, {"name": "input.12/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.12/bias", "shape": [11], "dtype": "float32"}, {"name": "input.16/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.16/bias", "shape": [11], "dtype": "float32"}, {"name": "input.20/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.20/bias", "shape": [11], "dtype": "float32"}, {"name": "input.24/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.24/bias", "shape": [11], "dtype": "float32"}, {"name": "input.28/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.28/bias", "shape": [11], "dtype": "float32"}, {"name": "input.32/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.32/bias", "shape": [11], "dtype": "float32"}, {"name": "input.4/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.4/bias", "shape": [11], "dtype": "float32"}, {"name": "input.8/kernel", "shape": [3, 3, 3, 11, 11], "dtype": "float32"}, {"name": "input.8/bias", "shape": [11], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 11, 50], "dtype": "float32"}, {"name": "output/bias", "shape": [50], "dtype": "float32"}]}]} \ No newline at end of file diff --git a/public/models/model11_gw_ae/colorLUT.json b/public/models/model11_gw_ae/colorLUT.json deleted file mode 100644 index 6da374a..0000000 --- a/public/models/model11_gw_ae/colorLUT.json +++ /dev/null @@ -1 +0,0 @@ -{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"} \ No newline at end of file diff --git a/public/models/model11_gw_ae/labels.json b/public/models/model11_gw_ae/labels.json deleted file mode 100644 index 4885a94..0000000 --- a/public/models/model11_gw_ae/labels.json +++ /dev/null @@ -1 +0,0 @@ -{"0": "background", "1": "White Matter", "2": "Grey Matter"} diff --git a/public/models/model18cls/colorLUT.json b/public/models/model18cls/colorLUT.json deleted file mode 100644 index 27d12d1..0000000 --- a/public/models/model18cls/colorLUT.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "0": "rgb(0,0,0)", - "1": "rgb(245,245,245)", - "2": "rgb(205,62,78)", - "3": "rgb(120,18,134)", - "4": "rgb(196,58,250)", - "5": "rgb(220,248,164)", - "6": "rgb(230,148,34)", - "7": "rgb(0,118,14)", - "8": "rgb(122,186,220)", - "9": "rgb(236,13,176)", - "10": "rgb(12,48,255)", - "11": "rgb(204,182,142)", - "12": "rgb(42,204,164)", - "13": "rgb(119,159,176)", - "14": "rgb(220,216,20)", - "15": "rgb(103,255,255)", - "16": "rgb(255,165,0)", - "17": "rgb(165,42,42)" -} - diff --git a/public/models/model18cls/labels.json b/public/models/model18cls/labels.json deleted file mode 100644 index d022502..0000000 --- a/public/models/model18cls/labels.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "0": "Unknown", - "1": "Cerebral-White-Matter", - "2": "Cerebral-Cortex", - "3": "Lateral-Ventricle", - "4": "Inferior-Lateral-Ventricle", - "5": "Cerebellum-White-Matter", - "6": "Cerebellum-Cortex", - "7": "Thalamus", - "8": "Caudate", - "9": "Putamen", - "10": "Pallidum", - "11": "3rd-Ventricle", - "12": "4th-Ventricle", - "13": "Brain-Stem", - "14": "Hippocampus", - "15": "Amygdala", - "16": "Accumbens-area", - "17": "VentralDC" -} diff --git a/public/models/model20chan3cls/colorLUT.json b/public/models/model20chan3cls/colorLUT.json deleted file mode 100644 index 6da374a..0000000 --- a/public/models/model20chan3cls/colorLUT.json +++ /dev/null @@ -1 +0,0 @@ -{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"} \ No newline at end of file diff --git a/public/models/model20chan3cls/labels.json b/public/models/model20chan3cls/labels.json deleted file mode 100644 index 4885a94..0000000 --- a/public/models/model20chan3cls/labels.json +++ /dev/null @@ -1 +0,0 @@ -{"0": "background", "1": "White Matter", "2": "Grey Matter"} diff --git a/public/models/model21_104class/colorLUT.json b/public/models/model21_104class/colorLUT.json deleted file mode 100644 index b321ef3..0000000 --- a/public/models/model21_104class/colorLUT.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "0": "rgb(0,0,0)", - "1": "rgb(25,100,40)", - "2": "rgb(125,100,160)", - "3": "rgb(100,25,0)", - "4": "rgb(220,20,100)", - "5": "rgb(220,20,10)", - "6": "rgb(180,220,140)", - "7": "rgb(220,60,220)", - "8": "rgb(180,40,120)", - "9": "rgb(140,20,140)", - "10": "rgb(20,30,140)", - "11": "rgb(35,75,50)", - "12": "rgb(225,140,140)", - "13": "rgb(200,35,75)", - "14": "rgb(160,100,50)", - "15": "rgb(20,220,60)", - "16": "rgb(60,220,60)", - "17": "rgb(220,180,140)", - "18": "rgb(20,100,50)", - "19": "rgb(220,60,20)", - "20": "rgb(120,100,60)", - "21": "rgb(220,20,20)", - "22": "rgb(220,180,220)", - "23": "rgb(60,20,220)", - "24": "rgb(160,140,180)", - "25": "rgb(80,20,140)", - "26": "rgb(75,50,125)", - "27": "rgb(20,220,160)", - "28": "rgb(20,180,140)", - "29": "rgb(140,220,220)", - "30": "rgb(80,160,20)", - "31": "rgb(100,0,100)", - "32": "rgb(70,70,70)", - "33": "rgb(150,150,200)", - "34": "rgb(255,192,32)", - "35": "rgb(25,100,40)", - "36": "rgb(125,100,160)", - "37": "rgb(100,25,0)", - "38": "rgb(220,20,100)", - "39": "rgb(220,20,10)", - "40": "rgb(180,220,140)", - "41": "rgb(220,60,220)", - "42": "rgb(180,40,120)", - "43": "rgb(140,20,140)", - "44": "rgb(20,30,140)", - "45": "rgb(35,75,50)", - "46": "rgb(225,140,140)", - "47": "rgb(200,35,75)", - "48": "rgb(160,100,50)", - "49": "rgb(20,220,60)", - "50": "rgb(60,220,60)", - "51": "rgb(220,180,140)", - "52": "rgb(20,100,50)", - "53": "rgb(220,60,20)", - "54": "rgb(120,100,60)", - "55": "rgb(220,20,20)", - "56": "rgb(220,180,220)", - "57": "rgb(60,20,220)", - "58": "rgb(160,140,180)", - "59": "rgb(80,20,140)", - "60": "rgb(75,50,125)", - "61": "rgb(20,220,160)", - "62": "rgb(20,180,140)", - "63": "rgb(140,220,220)", - "64": "rgb(80,160,20)", - "65": "rgb(100,0,100)", - "66": "rgb(70,70,70)", - "67": "rgb(150,150,200)", - "68": "rgb(255,192,32)", - "69": "rgb(0,118,14)", - "70": "rgb(0,118,14)", - "71": "rgb(122,186,220)", - "72": "rgb(122,186,220)", - "73": "rgb(236,13,176)", - "74": "rgb(236,13,176)", - "75": "rgb(12,48,255)", - "76": "rgb(13,48,255)", - "77": "rgb(220,216,20)", - "78": "rgb(220,216,20)", - "79": "rgb(103,255,255)", - "80": "rgb(103,255,255)", - "81": "rgb(255,165,0)", - "82": "rgb(255,165,0)", - "83": "rgb(165,42,42)", - "84": "rgb(165,42,42)", - "85": "rgb(245,245,245)", - "86": "rgb(245,245,245)", - "87": "rgb(120,18,134)", - "88": "rgb(196,58,250)", - "89": "rgb(120,18,134)", - "90": "rgb(196,58,250)", - "91": "rgb(204,182,142)", - "92": "rgb(42,204,164)", - "93": "rgb(60,60,60)", - "94": "rgb(119,159,176)", - "95": "rgb(220,248,164)", - "96": "rgb(220,248,164)", - "97": "rgb(230,148,34)", - "98": "rgb(230,148,34)", - "99": "rgb(0,0,64)", - "100": "rgb(0,0,112)", - "101": "rgb(0,0,160)", - "102": "rgb(0,0,208)", - "103": "rgb(0,0,255)" -} \ No newline at end of file diff --git a/public/models/model21_104class/labels.json b/public/models/model21_104class/labels.json deleted file mode 100644 index 1b69b32..0000000 --- a/public/models/model21_104class/labels.json +++ /dev/null @@ -1,106 +0,0 @@ -{ - "0": "BG", - "1": "ctx-lh-bankssts", - "2": "ctx-lh-caudalanteriorcingulate", - "3": "ctx-lh-caudalmiddlefrontal", - "4": "ctx-lh-cuneus", - "5": "ctx-lh-entorhinal", - "6": "ctx-lh-fusiform", - "7": "ctx-lh-inferiorparietal", - "8": "ctx-lh-inferiortemporal", - "9": "ctx-lh-isthmuscingulate", - "10": "ctx-lh-lateraloccipital", - "11": "ctx-lh-lateralorbitofrontal", - "12": "ctx-lh-lingual", - "13": "ctx-lh-medialorbitofrontal", - "14": "ctx-lh-middletemporal", - "15": "ctx-lh-parahippocampal", - "16": "ctx-lh-paracentral", - "17": "ctx-lh-parsopercularis", - "18": "ctx-lh-parsorbitalis", - "19": "ctx-lh-parstriangularis", - "20": "ctx-lh-pericalcarine", - "21": "ctx-lh-postcentral", - "22": "ctx-lh-posteriorcingulate", - "23": "ctx-lh-precentral", - "24": "ctx-lh-precuneus", - "25": "ctx-lh-rostralanteriorcingulate", - "26": "ctx-lh-rostralmiddlefrontal", - "27": "ctx-lh-superiorfrontal", - "28": "ctx-lh-superiorparietal", - "29": "ctx-lh-superiortemporal", - "30": "ctx-lh-supramarginal", - "31": "ctx-lh-frontalpole", - "32": "ctx-lh-temporalpole", - "33": "ctx-lh-transversetemporal", - "34": "ctx-lh-insula", - "35": "ctx-rh-bankssts", - "36": "ctx-rh-caudalanteriorcingulate", - "37": "ctx-rh-caudalmiddlefrontal", - "38": "ctx-rh-cuneus", - "39": "ctx-rh-entorhinal", - "40": "ctx-rh-fusiform", - "41": "ctx-rh-inferiorparietal", - "42": "ctx-rh-inferiortemporal", - "43": "ctx-rh-isthmuscingulate", - "44": "ctx-rh-lateraloccipital", - "45": "ctx-rh-lateralorbitofrontal", - "46": "ctx-rh-lingual", - "47": "ctx-rh-medialorbitofrontal", - "48": "ctx-rh-middletemporal", - "49": "ctx-rh-parahippocampal", - "50": "ctx-rh-paracentral", - "51": "ctx-rh-parsopercularis", - "52": "ctx-rh-parsorbitalis", - "53": "ctx-rh-parstriangularis", - "54": "ctx-rh-pericalcarine", - "55": "ctx-rh-postcentral", - "56": "ctx-rh-posteriorcingulate", - "57": "ctx-rh-precentral", - "58": "ctx-rh-precuneus", - "59": "ctx-rh-rostralanteriorcingulate", - "60": "ctx-rh-rostralmiddlefrontal", - "61": "ctx-rh-superiorfrontal", - "62": "ctx-rh-superiorparietal", - "63": "ctx-rh-superiortemporal", - "64": "ctx-rh-supramarginal", - "65": "ctx-rh-frontalpole", - "66": "ctx-rh-temporalpole", - "67": "ctx-rh-transversetemporal", - "68": "ctx-rh-insula", - "69": "Left-Thalamus-Proper*", - "70": "Right-Thalamus-Proper*", - "71": "Left-Caudate", - "72": "Right-Caudate", - "73": "Left-Putamen", - "74": "Right-Putamen", - "75": "Left-Pallidum", - "76": "Right-Pallidum", - "77": "Left-Hippocampus", - "78": "Right-Hippocampus", - "79": "Left-Amygdala", - "80": "Right-Amygdala", - "81": "Left-Accumbens-area", - "82": "Right-Accumbens-area", - "83": "Left-VentralDC", - "84": "Right-VentralDC", - "85": "Left-Cerebral-White-Matter", - "86": "Right-Cerebral-White-Matter", - "87": "Left-Lateral-Ventricle", - "88": "Left-Inf-Lat-Vent", - "89": "Right-Lateral-Ventricle", - "90": "Right-Inf-Lat-Vent", - "91": "3rd-Ventricle", - "92": "4th-Ventricle", - "93": "CSF", - "94": "Brain-Stem", - "95": "Left-Cerebellum-White-Matter", - "96": "Right-Cerebellum-White-Matter", - "97": "Left-Cerebellum-Cortex", - "98": "Right-Cerebellum-Cortex", - "99": "CC_Posterior", - "100": "CC_Mid_Posterior", - "101": "CC_Central", - "102": "CC_Mid_Anterior", - "103": "CC_Anterior" -} \ No newline at end of file diff --git a/public/models/model21_3class/colorLUT.json b/public/models/model21_3class/colorLUT.json deleted file mode 100644 index 6da374a..0000000 --- a/public/models/model21_3class/colorLUT.json +++ /dev/null @@ -1 +0,0 @@ -{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"} \ No newline at end of file diff --git a/public/models/model21_3class/group1-shard1of1.bin b/public/models/model21_3class/group1-shard1of1.bin deleted file mode 100644 index 2ebba53..0000000 Binary files a/public/models/model21_3class/group1-shard1of1.bin and /dev/null differ diff --git a/public/models/model21_3class/labels.json b/public/models/model21_3class/labels.json deleted file mode 100644 index 4885a94..0000000 --- a/public/models/model21_3class/labels.json +++ /dev/null @@ -1 +0,0 @@ -{"0": "background", "1": "White Matter", "2": "Grey Matter"} diff --git a/public/models/model21_3class/model.json b/public/models/model21_3class/model.json deleted file mode 100644 index 4f6e028..0000000 --- a/public/models/model21_3class/model.json +++ /dev/null @@ -1 +0,0 @@ -{"format": "layers-model", "generatedBy": "keras v2.7.0", "convertedBy": "TensorFlow.js Converter v3.9.0", "modelTopology": {"keras_version": "2.7.0", "backend": "tensorflow", "model_config": {"class_name": "Functional", "config": {"name": "model", "layers": [{"class_name": "InputLayer", "config": {"batch_input_shape": [null, 64, 64, 64, 1], "dtype": "float32", "sparse": false, "ragged": false, "name": "input"}, "name": "input", "inbound_nodes": []}, {"class_name": "Conv3D", "config": {"name": "input.1", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.1", "inbound_nodes": [[["input", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "22", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "22", "inbound_nodes": [[["input.1", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.4", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.4", "inbound_nodes": [[["22", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "24", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "24", "inbound_nodes": [[["input.4", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.8", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.8", "inbound_nodes": [[["24", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "26", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "26", "inbound_nodes": [[["input.8", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.12", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.12", "inbound_nodes": [[["26", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "28", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "28", "inbound_nodes": [[["input.12", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.16", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [16, 16, 16], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.16", "inbound_nodes": [[["28", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "30", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "30", "inbound_nodes": [[["input.16", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.20", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [8, 8, 8], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.20", "inbound_nodes": [[["30", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "32", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "32", "inbound_nodes": [[["input.20", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.24", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [4, 4, 4], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.24", "inbound_nodes": [[["32", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "34", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "34", "inbound_nodes": [[["input.24", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.28", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [2, 2, 2], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.28", "inbound_nodes": [[["34", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "36", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "36", "inbound_nodes": [[["input.28", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "input.32", "trainable": true, "dtype": "float32", "filters": 21, "kernel_size": [3, 3, 3], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "input.32", "inbound_nodes": [[["36", 0, 0, {}]]]}, {"class_name": "Activation", "config": {"name": "38", "trainable": true, "dtype": "float32", "activation": "relu"}, "name": "38", "inbound_nodes": [[["input.32", 0, 0, {}]]]}, {"class_name": "Conv3D", "config": {"name": "output", "trainable": true, "dtype": "float32", "filters": 3, "kernel_size": [1, 1, 1], "strides": [1, 1, 1], "padding": "same", "data_format": "channels_last", "dilation_rate": [1, 1, 1], "groups": 1, "activation": "linear", "use_bias": true, "kernel_initializer": {"class_name": "Zeros", "config": {}}, "bias_initializer": {"class_name": "Zeros", "config": {}}, "kernel_regularizer": null, "bias_regularizer": null, "activity_regularizer": null, "kernel_constraint": null, "bias_constraint": null}, "name": "output", "inbound_nodes": [[["38", 0, 0, {}]]]}], "input_layers": [["input", 0, 0]], "output_layers": [["output", 0, 0]]}}}, "weightsManifest": [{"paths": ["group1-shard1of1.bin"], "weights": [{"name": "input.1/kernel", "shape": [3, 3, 3, 1, 21], "dtype": "float32"}, {"name": "input.1/bias", "shape": [21], "dtype": "float32"}, {"name": "input.12/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.12/bias", "shape": [21], "dtype": "float32"}, {"name": "input.16/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.16/bias", "shape": [21], "dtype": "float32"}, {"name": "input.20/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.20/bias", "shape": [21], "dtype": "float32"}, {"name": "input.24/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.24/bias", "shape": [21], "dtype": "float32"}, {"name": "input.28/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.28/bias", "shape": [21], "dtype": "float32"}, {"name": "input.32/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.32/bias", "shape": [21], "dtype": "float32"}, {"name": "input.4/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.4/bias", "shape": [21], "dtype": "float32"}, {"name": "input.8/kernel", "shape": [3, 3, 3, 21, 21], "dtype": "float32"}, {"name": "input.8/bias", "shape": [21], "dtype": "float32"}, {"name": "output/kernel", "shape": [1, 1, 1, 21, 3], "dtype": "float32"}, {"name": "output/bias", "shape": [3], "dtype": "float32"}]}]} \ No newline at end of file diff --git a/public/models/model30chan18cls/colorLUT.json b/public/models/model30chan18cls/colorLUT.json deleted file mode 100644 index 27d12d1..0000000 --- a/public/models/model30chan18cls/colorLUT.json +++ /dev/null @@ -1,21 +0,0 @@ -{ - "0": "rgb(0,0,0)", - "1": "rgb(245,245,245)", - "2": "rgb(205,62,78)", - "3": "rgb(120,18,134)", - "4": "rgb(196,58,250)", - "5": "rgb(220,248,164)", - "6": "rgb(230,148,34)", - "7": "rgb(0,118,14)", - "8": "rgb(122,186,220)", - "9": "rgb(236,13,176)", - "10": "rgb(12,48,255)", - "11": "rgb(204,182,142)", - "12": "rgb(42,204,164)", - "13": "rgb(119,159,176)", - "14": "rgb(220,216,20)", - "15": "rgb(103,255,255)", - "16": "rgb(255,165,0)", - "17": "rgb(165,42,42)" -} - diff --git a/public/models/model30chan18cls/labels.json b/public/models/model30chan18cls/labels.json deleted file mode 100644 index d022502..0000000 --- a/public/models/model30chan18cls/labels.json +++ /dev/null @@ -1,20 +0,0 @@ -{ - "0": "Unknown", - "1": "Cerebral-White-Matter", - "2": "Cerebral-Cortex", - "3": "Lateral-Ventricle", - "4": "Inferior-Lateral-Ventricle", - "5": "Cerebellum-White-Matter", - "6": "Cerebellum-Cortex", - "7": "Thalamus", - "8": "Caudate", - "9": "Putamen", - "10": "Pallidum", - "11": "3rd-Ventricle", - "12": "4th-Ventricle", - "13": "Brain-Stem", - "14": "Hippocampus", - "15": "Amygdala", - "16": "Accumbens-area", - "17": "VentralDC" -} diff --git a/public/models/model30chan50cls/colorLUT.json b/public/models/model30chan50cls/colorLUT.json deleted file mode 100644 index 99c0f7e..0000000 --- a/public/models/model30chan50cls/colorLUT.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "0": "rgb(0,0,0)", - "1": "rgb(245,245,245)", - "2": "rgb(196,58,250)", - "3": "rgb(220,248,164)", - "4": "rgb(230,148,34)", - "5": "rgb(0,118,14)", - "6": "rgb(122,186,220)", - "7": "rgb(236,13,176)", - "8": "rgb(12,48,255)", - "9": "rgb(119,159,176)", - "10": "rgb(220,216,20)", - "11": "rgb(103,255,255)", - "12": "rgb(60,60,60)", - "13": "rgb(255,165,0)", - "14": "rgb(165,42,42)", - "15": "rgb(0,0,208)", - "16": "rgb(25,100,40)", - "17": "rgb(125,100,160)", - "18": "rgb(100,25,0)", - "19": "rgb(220,20,100)", - "20": "rgb(220,20,10)", - "21": "rgb(180,220,140)", - "22": "rgb(220,60,220)", - "23": "rgb(180,40,120)", - "24": "rgb(140,20,140)", - "25": "rgb(20,30,140)", - "26": "rgb(35,75,50)", - "27": "rgb(225,140,140)", - "28": "rgb(200,35,75)", - "29": "rgb(160,100,50)", - "30": "rgb(20,220,60)", - "31": "rgb(60,220,60)", - "32": "rgb(220,180,140)", - "33": "rgb(20,100,50)", - "34": "rgb(220,60,20)", - "35": "rgb(120,100,60)", - "36": "rgb(220,20,20)", - "37": "rgb(220,180,220)", - "38": "rgb(60,20,220)", - "39": "rgb(160,140,180)", - "40": "rgb(80,20,140)", - "41": "rgb(75,50,125)", - "42": "rgb(20,220,160)", - "43": "rgb(20,180,140)", - "44": "rgb(140,220,220)", - "45": "rgb(80,160,20)", - "46": "rgb(100,0,100)", - "47": "rgb(70,70,70)", - "48": "rgb(150,150,200)", - "49": "rgb(255,192,32)" -} \ No newline at end of file diff --git a/public/models/model30chan50cls/labels.json b/public/models/model30chan50cls/labels.json deleted file mode 100644 index 2d48813..0000000 --- a/public/models/model30chan50cls/labels.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "0": "BG", - "1": "Cerebral-White-Matter", - "2": "Ventricle", - "3": "Cerebellum-White-Matter", - "4": "Cerebellum", - "5": "Thalamus-Proper*", - "6": "Caudate", - "7": "Putamen", - "8": "Pallidum", - "9": "Brain-Stem", - "10": "Hippocampus", - "11": "Amygdala", - "12": "CSF", - "13": "Accumbens-area", - "14": "VentralDC", - "15": "Corpus callosum", - "16": "ctx-bankssts", - "17": "ctx-caudalanteriorcingulate", - "18": "ctx-caudalmiddlefrontal", - "19": "ctx-cuneus", - "20": "ctx-entorhinal", - "21": "ctx-fusiform", - "22": "ctx-inferiorparietal", - "23": "ctx-inferiortemporal", - "24": "ctx-isthmuscingulate", - "25": "ctx-lateraloccipital", - "26": "ctx-lateralorbitofrontal", - "27": "ctx-lingual", - "28": "ctx-medialorbitofrontal", - "29": "ctx-middletemporal", - "30": "ctx-parahippocampal", - "31": "ctx-paracentral", - "32": "ctx-parsopercularis", - "33": "ctx-parsorbitalis", - "34": "ctx-parstriangularis", - "35": "ctx-pericalcarine", - "36": "ctx-postcentral", - "37": "ctx-posteriorcingulate", - "38": "ctx-precentral", - "39": "ctx-precuneus", - "40": "ctx-rostralanteriorcingulate", - "41": "ctx-rostralmiddlefrontal", - "42": "ctx-superiorfrontal", - "43": "ctx-superiorparietal", - "44": "ctx-superiortemporal", - "45": "ctx-supramarginal", - "46": "ctx-frontalpole", - "47": "ctx-temporalpole", - "48": "ctx-transversetemporal", - "49": "ctx-insula" -} diff --git a/public/models/model5_gw_ae/colorLUT.json b/public/models/model5_gw_ae/colorLUT.json deleted file mode 100644 index 6da374a..0000000 --- a/public/models/model5_gw_ae/colorLUT.json +++ /dev/null @@ -1 +0,0 @@ -{"0": "rgb(0,0,0)", "1": "rgb(255,255,255)", "2": "rgb(205,62,78)"} \ No newline at end of file diff --git a/public/models/model5_gw_ae/labels.json b/public/models/model5_gw_ae/labels.json deleted file mode 100644 index 4885a94..0000000 --- a/public/models/model5_gw_ae/labels.json +++ /dev/null @@ -1 +0,0 @@ -{"0": "background", "1": "White Matter", "2": "Grey Matter"} diff --git a/tensor-utils.js b/tensor-utils.js new file mode 100644 index 0000000..4286c93 --- /dev/null +++ b/tensor-utils.js @@ -0,0 +1,613 @@ +import * as tf from '@tensorflow/tfjs' +import { BWLabeler } from './bwlabels.js' + +export async function addZeroPaddingTo3dTensor(tensor3d, rowPadArr = [1, 1], colPadArr = [1, 1], depthPadArr = [1, 1]) { + if (tensor3d.rank !== 3) { + throw new Error('Tensor must be 3D') + } + return tensor3d.pad([rowPadArr, colPadArr, depthPadArr]) +} + +export async function applyMriThreshold(tensor, percentage) { + // Perform asynchronous operations outside of tf.tidy + const maxTensor = tensor.max() + const thresholdTensor = maxTensor.mul(percentage) + const threshold = await thresholdTensor.data() // Extracts the threshold value + + // Dispose tensors not needed anymore + maxTensor.dispose() + thresholdTensor.dispose() + + // Use tf.tidy for synchronous operations + return tf.tidy(() => { + const dataForProcessing = tensor.clone() + + // Thresholding (assuming background has very low values compared to the head) + const mask = dataForProcessing.greater(threshold[0]) + // -- const denoisedMriData = dataForProcessing.mul(mask) + + // No need to manually dispose dataForProcessing and mask, as tf.tidy() will dispose them auto. + return mask + }) + + // -- return denoisedMriData +} + +export async function binarizeVolumeDataTensor(volumeDataTensor) { + const alpha = 0 + // element-wise: (x > 0 ? 1 : alpha * x ); e.g. Tenosr [0, 0.9, 0.8, -3] => Tensor [0, 1, 1, 0] + return volumeDataTensor.step(alpha) +} + +async function calculateQuantiles(tensor, lowerQuantile = 0.01, upperQuantile = 0.99) { + // Flatten the tensor + const flatTensor = tensor.flatten() + + // Convert the flattened tensor to an array to sort it + const flatArray = await flatTensor.array() + flatArray.sort((a, b) => a - b) // Sort the array in ascending order + + // Convert the sorted array back to a tensor + const sortedTensor = tf.tensor1d(flatArray) + + // Calculate the indices for the quantiles + const numElements = sortedTensor.shape[0] + const lowIndex = Math.floor(numElements * lowerQuantile) + const highIndex = Math.ceil(numElements * upperQuantile) - 1 // Subtract 1 because indices are 0-based + + // Slice the sorted tensor to get qmin and qmax + const qmin = sortedTensor.slice(lowIndex, 1) // Get the value at the low index + const qmax = sortedTensor.slice(highIndex, 1) // Get the value at the high index + + // Get the actual values from the tensors + const qminValue = (await qmin.array())[0] + const qmaxValue = (await qmax.array())[0] + + // Clean up tensors to free memory + flatTensor.dispose() + sortedTensor.dispose() + qmin.dispose() + qmax.dispose() + + return { qmin: qminValue, qmax: qmaxValue } +} + +export async function convByOutputChannelAndInputSlicing(input, filter, biases, stride, pad, dilationRate, sliceSize) { + const inChannels = input.shape[4] + const outChannels = filter.shape[4] + + // Create an empty array to hold the output channels + let outputChannels = null + + // Slice the input tensor and process one output channel at a time + for (let channel = 0; channel < outChannels; channel++) { + const numSlices = Math.ceil(inChannels / sliceSize) + const biasesSlice = biases.slice([channel], [1]) + let outputChannel = null + + for (let i = 0; i < numSlices; i++) { + const startChannel = i * sliceSize + const endChannel = Math.min((i + 1) * sliceSize, inChannels) + + // Only proceed if there are channels to process + if (startChannel < inChannels) { + const resultSlice = tf.tidy(() => { + const inputSlice = input.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, endChannel - startChannel]) + const filterSlice = filter.slice([0, 0, 0, startChannel, channel], [-1, -1, -1, endChannel - startChannel, 1]) + // Perform the convolution for the current slice and output channel + return tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate) + }) + + if (outputChannel === null) { + outputChannel = resultSlice + } else { + const updatedOutputChannel = outputChannel.add(resultSlice) + outputChannel.dispose() + resultSlice.dispose() + outputChannel = updatedOutputChannel + } + } + } + + // Add the biases to the accumulated convolutions for this channel + const biasedOutputChannel = outputChannel.add(biasesSlice) + outputChannel.dispose() + biasesSlice.dispose() + + // Accumulate the channel to the output array + if (outputChannels == null) { + outputChannels = biasedOutputChannel + } else { + const updatedOutputChannels = await tf.concat([outputChannels, biasedOutputChannel], 4) + biasedOutputChannel.dispose() + outputChannels.dispose() + outputChannels = updatedOutputChannels + } + } + + return outputChannels +} + +export async function draw3dObjBoundingVolume(unstackOutVolumeTensor, opts, modelEntry, callbackImg) { + const allOutputSlices3DCC = [] + + // dataSync() using to flatten array. Takes around 1.5 s + for (let sliceTensorIdx = 0; sliceTensorIdx < unstackOutVolumeTensor.length; sliceTensorIdx++) { + allOutputSlices3DCC[sliceTensorIdx] = Array.from(unstackOutVolumeTensor[sliceTensorIdx].dataSync()) + } + + // Use this conversion to download output slices as nii file. Takes around 30 ms + // does not use `push` to avoid stack overflows. In future: consider .set() with typed arrays + const allOutputSlices3DCC1DimArray = new Array(allOutputSlices3DCC[0].length * allOutputSlices3DCC.length) + let index = 0 + for (let sliceIdx = 0; sliceIdx < allOutputSlices3DCC.length; sliceIdx++) { + for (let i = 0; i < allOutputSlices3DCC[sliceIdx].length; i++) { + allOutputSlices3DCC1DimArray[index++] = allOutputSlices3DCC[sliceIdx][i] + } + } + console.log('Done with allOutputSlices3DCC1DimArray ') + const brainMaskTensor1d = await binarizeVolumeDataTensor(tf.tensor1d(allOutputSlices3DCC1DimArray)) + const brainOut = Array.from(brainMaskTensor1d.dataSync()) + callbackImg(brainOut, opts, modelEntry) +} +// return first and last non-zero voxel in row (dim = 0), column (1) or slice (2) dimension +async function firstLastNonZero(tensor3D, dim = 0) { + let mxs = [] + if (dim === 0) { + mxs = await tensor3D.max(2).max(1).arraySync() + } else if (dim === 1) { + mxs = await tensor3D.max(2).max(0).arraySync() + } else { + mxs = await tensor3D.max(1).max(0).arraySync() + } + let mn = mxs.length + let mx = 0 + for (let i = 0; i < mxs.length; i++) { + if (mxs[i] > 0) { + mn = i + break + } + } + for (let i = mxs.length - 1; i >= 0; i--) { + if (mxs[i] > 0) { + mx = i + break + } + } + return [mn, mx] +} + +export async function firstLastNonZero3D(tensor3D) { + const [row_min, row_max] = await firstLastNonZero(tensor3D, 0) + const [col_min, col_max] = await firstLastNonZero(tensor3D, 1) + const [depth_min, depth_max] = await firstLastNonZero(tensor3D, 2) + console.log('row min and max :', row_min, row_max) + console.log('col min and max :', col_min, col_max) + console.log('depth min and max :', depth_min, depth_max) + return [row_min, row_max, col_min, col_max, depth_min, depth_max] +} + +/* +//simpler function, but x4 slower +export async function firstLastNonZero3D(tensor3D) { + const coords = await tf.whereAsync(tensor3D) + const row_min = coords.min(0).arraySync()[0] + const row_max = coords.max(0).arraySync()[0] + const col_min = coords.min(0).arraySync()[1] + const col_max = coords.max(0).arraySync()[1] + const depth_min = coords.min(0).arraySync()[2] + const depth_max = coords.max(0).arraySync()[2] + coords.dispose() + return [row_min, row_max, col_min, col_max, depth_min, depth_max] +} +*/ + +export async function generateBrainMask( + unstackOutVolumeTensor, + num_of_slices, + slice_height, + slice_width, + modelEntry, + opts, + callbackUI, + callbackImg, + isFinalImage = true +) { + if (unstackOutVolumeTensor[0].dtype !== 'int32') { + callbackUI('', -1, 'generateBrainMask assumes int32') + } + if (modelEntry.preModelPostProcess) { + callbackUI('', -1, 'generateBrainMask assumes BWLabeler instead of preModelPostProcess') + } + const numSlices = unstackOutVolumeTensor.length + const numPixels2D = unstackOutVolumeTensor[0].size + const numVox3D = numSlices * numPixels2D + // preallocate to reduce heap usage + const brainOut = new Int32Array(numVox3D) + let offset = 0 + for (let i = 0; i < numSlices; i++) { + brainOut.set(unstackOutVolumeTensor[i].dataSync(), offset) + offset += numPixels2D + } + for (let i = 0; i < numVox3D; i++) { + brainOut[i] = brainOut[i] !== 0 ? 1 : 0 + } + if (isFinalImage || opts.showPhase1Output) { + // all done + callbackImg(brainOut, opts, modelEntry) + callbackUI('Segmentation finished', 0) + } + return tf.tensor(brainOut, [num_of_slices, slice_height, slice_width]) +} + +export async function generateOutputSlicesV2( + img, + OutVolumeTensorShape, + OutVolumeTensorType, + num_of_slices, + numSegClasses, + slice_height, + slice_width, + modelEntry, + opts, + niftiImage +) { + // Convert all slices into 1 Dim array + if (opts.isPostProcessEnable) { + const BWInstance = new BWLabeler() + const dim = new Uint32Array(OutVolumeTensorShape) + const conn = 26 // Example connectivity + const binarize = true + const onlyLargestClusterPerClass = true + const [_labelCount, labeledImage] = BWInstance.bwlabel(img, dim, conn, binarize, onlyLargestClusterPerClass) + for (let i = 0; i < img.length; i++) { + img[i] *= labeledImage[i] + } + } // if isPostProcessEnable + const typedArrayConstructor = { + float32: Float32Array, + int32: Int32Array + // Add other cases as needed for different dtypes + }[OutVolumeTensorType] + // Create a new TypedArray from img with the same type as outLabelVolume + const allOutputSlices3DCC1DimArray = new Uint8Array(img) + switch (modelEntry.type) { + case 'Brain_Masking': { + const brainMask = new Uint8Array(allOutputSlices3DCC1DimArray.length) + for (let i = 0; i < allOutputSlices3DCC1DimArray.length; i++) { + brainMask[i] = allOutputSlices3DCC1DimArray[i] !== 0 ? 1 : 0 + } + return brainMask + } + case 'Brain_Extraction': { + const maskedData = new Uint8Array(allOutputSlices3DCC1DimArray.length) + for (let i = 0; i < allOutputSlices3DCC1DimArray.length; i++) { + // Create the mask - 1 where the value is non-zero, 0 where it is zero. + const maskValue = allOutputSlices3DCC1DimArray[i] !== 0 ? 1 : 0 + // Apply the mask to the data - multiply by the mask value. + maskedData[i] = niftiImage[i] * maskValue + } + return maskedData + } + } + return img +} + +export async function getAllSlicesDataAsTF3D(num_of_slices, niftiHeader, niftiImage) { + // Get nifti dimensions + const cols = niftiHeader.dims[1] // Slice width + const rows = niftiHeader.dims[2] // Slice height + let typedData + if (niftiHeader.datatypeCode === 2) { + // enum from nvimage/utils DT_UINT8 = 2 + typedData = new Uint8Array(niftiImage) + } else if (niftiHeader.datatypeCode === 4) { + // DT_INT16 = 4 + typedData = new Int16Array(niftiImage) + } else if (niftiHeader.datatypeCode === 8) { + // DT_INT32 = 8 + typedData = new Int32Array(niftiImage) + } else if (niftiHeader.datatypeCode === 16) { + // DT_FLOAT32 = 16 + typedData = new Float32Array(niftiImage) + } else if (niftiHeader.datatypeCode === 64) { + // DT_FLOAT64 = 64 + typedData = new Float64Array(niftiImage) + } else if (niftiHeader.datatypeCode === 256) { + // DT_INT8 = 256 + typedData = new Int8Array(niftiImage) + } else if (niftiHeader.datatypeCode === 512) { + // DT_UINT16 = 512 + typedData = new Uint16Array(niftiImage) + } else if (niftiHeader.datatypeCode === 768) { + // DT_UINT32 = 768 + typedData = new Uint32Array(niftiImage) + } else { + return + } + const allSlices_2D = [] + let offset3D = 0 + // Draw pixels + for (let slice = 0; slice < num_of_slices; slice++) { + const slice = new Array(rows * cols) + let offset2D = 0 + for (let row = 0; row < rows; row++) { + for (let col = 0; col < cols; col++) { + const value = typedData[offset3D++] + // Create 1Dim Array of pixel value, this 1 dim represents one channel + slice[offset2D++] = value & 0xff + } + } + allSlices_2D.push(tf.tensor(slice, [rows, cols])) // slice_height, slice_width + } + const allSlices_3D = tf.stack(allSlices_2D) + tf.dispose(allSlices_2D) + return allSlices_3D +} + +export async function getModelNumLayers(modelObj) { + return modelObj.layers.length +} + +export async function getModelNumParameters(modelObj) { + let numParameters = 0 + for (let layerIdx = 0; layerIdx < modelObj.layers.length; layerIdx++) { + numParameters += modelObj.layers[layerIdx].countParams() + } + return numParameters +} + +export async function isModelChnlLast(modelObj) { + for (let layerIdx = 0; layerIdx < modelObj.layers.length; layerIdx++) { + if (modelObj.layersByDepth[layerIdx][0].dataFormat) { + return modelObj.layersByDepth[layerIdx][0].dataFormat === 'channelsLast' + } + } +} + +export async function load_model(modelUrl) { + return await tf.loadLayersModel(modelUrl) +} + +export async function minMaxNormalizeVolumeData(volumeData) { + // Normalize the data to the range 0 - 1 using min-max scaling + const volumeData_Max = volumeData.max() + const volumeData_Min = volumeData.min() + const normalizedSlices_3d = await volumeData.sub(volumeData_Min).div(volumeData_Max.sub(volumeData_Min)) + return normalizedSlices_3d +} + +function processTensorInChunks(inputTensor, filterWeights, chunkSize) { + // Assuming inputTensor's shape: [batch, depth, height, width, inChannels] + // and filterWeights's shape: [filterDepth, filterHeight, filterWidth, inChannels, outChannels] + const stride = 1 + const pad = 0 + const dilationRate = 1 + const inChannels = inputTensor.shape[4] + const numSlices = Math.ceil(inChannels / chunkSize) + + let accumulatedResult = null + for (let i = 0; i < numSlices; i++) { + const startChannel = i * chunkSize + const endChannel = Math.min((i + 1) * chunkSize, inChannels) + const channels = endChannel - startChannel + + const inputSlice = tf.tidy(() => { + // Slice the input tensor to get the current chunk + return inputTensor.slice([0, 0, 0, 0, startChannel], [-1, -1, -1, -1, channels]) + }) + const filterSlice = tf.tidy(() => { + // Slice the filter weights to match the input tensor's current chunk + return filterWeights.slice([0, 0, 0, startChannel, 0], [-1, -1, -1, channels, -1]) + }) + + const resultSlice = tf.conv3d(inputSlice, filterSlice, stride, pad, 'NDHWC', dilationRate) + // Clean up the slices to free memory + inputSlice.dispose() + filterSlice.dispose() + + // Squeeze the result slice to remove dimensions of size 1 + const squeezedResultSlice = tf.squeeze(resultSlice) + resultSlice.dispose() // Dispose of the original resultSlice after squeezing + + if (accumulatedResult === null) { + accumulatedResult = squeezedResultSlice + } else { + // Accumulate the result by adding the new result slice to it + const newAccumulatedResult = accumulatedResult.add(squeezedResultSlice) + + // Dispose of the previous accumulatedResult and squeezedResultSlice + accumulatedResult.dispose() + // Dispose of squeezedResultSlice only if it wasn't assigned to accumulatedResult + if (accumulatedResult !== squeezedResultSlice) { + squeezedResultSlice.dispose() + } + // Update accumulatedResult with the new result + accumulatedResult = newAccumulatedResult + } + + tf.tidy(() => { + tf.matMul(tf.zeros([1, 1]), tf.zeros([1, 1])) + }) + } + + return accumulatedResult +} + +export async function quantileNormalizeVolumeData(tensor, lowerQuantile = 0.05, upperQuantile = 0.95) { + // Call calculateQuantiles and wait for the result + const { qmin, qmax } = await calculateQuantiles(tensor, lowerQuantile, upperQuantile) + + // Convert qmin and qmax back to scalars + const qminScalar = tf.scalar(qmin) + const qmaxScalar = tf.scalar(qmax) + + // Perform the operation: (tensor - qmin) / (qmax - qmin) + const resultTensor = tensor.sub(qminScalar).div(qmaxScalar.sub(qminScalar)) + + // Dispose of the created scalars to free memory + qminScalar.dispose() + qmaxScalar.dispose() + + // Return the resulting tensor + return resultTensor +} + +export async function removeZeroPaddingFrom3dTensor(tensor3d, rowPad = 1, colPad = 1, depthPad = 1) { + if (tensor3d.rank !== 3) { + throw new Error('Tensor must be 3D') + } + const [h, w, d] = tensor3d.shape + return tensor3d.slice([rowPad, colPad, depthPad], [h - 2 * rowPad, w - 2 * colPad, d - 2 * depthPad]) +} + +export async function resizeWithZeroPadding(croppedTensor3d, newDepth, newHeight, newWidth, refVoxel, boundVolSizeArr) { + const row_pad_befor = refVoxel[0] + const col_pad_befor = refVoxel[1] + const depth_pad_befor = refVoxel[2] + // last and lower volume voxel + const row_max = row_pad_befor + boundVolSizeArr[0] - 1 // size [2, 2, 2] means 2 voxels total in each dim + const col_max = col_pad_befor + boundVolSizeArr[1] - 1 + const depth_max = depth_pad_befor + boundVolSizeArr[2] - 1 + + const row_pad_after = newHeight - row_max - 1 > 0 ? newHeight - row_max - 1 : 0 + const col_pad_after = newWidth - col_max - 1 > 0 ? newWidth - col_max - 1 : 0 + const depth_pad_after = newDepth - depth_max - 1 > 0 ? newDepth - depth_max - 1 : 0 + + return croppedTensor3d.pad([ + [row_pad_befor, row_pad_after], + [col_pad_befor, col_pad_after], + [depth_pad_befor, depth_pad_after] + ]) +} + +export class SequentialConvLayer { + constructor(model, chunkSize, isChannelLast, callbackUI, isWebWorker = true) { + this.model = model + this.outChannels = model.outputLayers[0].kernel.shape[4] + this.chunkSize = chunkSize + this.isChannelLast = isChannelLast + this.callbackUI = callbackUI + this.isWebWorker = isWebWorker + } + + /** + * Apply sequential convolution layer + * @since 3.0.0 + * @member SequentialConvLayer + * @param {tf.Tensor} inputTensor e.g. [ 1, 256, 256, 256, 5 ] + * @return {outC} + */ + + async apply(inputTensor) { + const oldDeleteTextureThreshold = tf.ENV.get('WEBGL_DELETE_TEXTURE_THRESHOLD') + tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', 0) + + // eslint-disable-next-line @typescript-eslint/no-this-alias + const self = this + // Important to avoid "undefined" class var members inside the timer. + // "this" has another meaning inside the timer. + + // document.getElementById("progressBarChild").parentElement.style.visibility = "visible" + const startTime = performance.now() + + const convLayer = self.model.layers[self.model.layers.length - 1] + const weights = convLayer.getWeights()[0] // + const biases = convLayer.getWeights()[1] + const outputShape = self.isChannelLast ? inputTensor.shape.slice(1, -1) : inputTensor.shape.slice(2) + // -- e.g. outputShape : [256,256,256] or cropped Dim + // -- if inputTensor [ 1, D, H, W, 50 ], channelLast true -> outputShape : outputShape [D, H, W] + // -- if inputTensor [ 1, 50, D, H, W ], channelLast false -> outputShape : outputShape [D, H, W] + + let outB = tf.mul(tf.ones(outputShape), -10000) + // -- e.g. outB.shape [256,256,256] + let outC = tf.zeros(outputShape) + // -- e.g. outC.shape [256,256,256] + let chIdx = 0 + + // console.log("---------------------------------------------------------") + console.log(' channel loop') + + while (true) { + tf.engine().startScope() // Start TensorFlow.js scope + /* console.log('=======================') + const memoryInfo0 = await tf.memory() + console.log(`| Number of Tensors: ${memoryInfo0.numTensors}`) + console.log(`| Number of Data Buffers: ${memoryInfo0.numDataBuffers}`) */ + + const result = await tf.tidy(() => { + const filterWeights = weights.slice([0, 0, 0, 0, chIdx], [-1, -1, -1, -1, 1]) + // -- e.g. filterWeights.shape [ 1, 1, 1, 5, 1 ] + const filterBiases = biases.slice([chIdx], [1]) + // -- e.g. filterBiases.shape [1] -> Tensor [-0.7850812] + const outA = processTensorInChunks(inputTensor, filterWeights, Math.min(self.chunkSize, self.outChannels)).add( + filterBiases + ) + const greater = tf.greater(outA, outB) + const newoutB = tf.where(greater, outA, outB) + const newoutC = tf.where(greater, tf.fill(outC.shape, chIdx), outC) + // Dispose the old tensors before reassigning + tf.dispose([outB, outC, filterWeights, filterBiases, outA, greater]) + // Dummy operation to trigger cleanup + tf.tidy(() => tf.matMul(tf.ones([1, 1]), tf.ones([1, 1]))) + return [newoutC, newoutB] + }) + console.log('=======================') + self.callbackUI(`Iteration ${chIdx}`, chIdx / self.outChannels) + if (!self.isWebWorker) { + // allow user interface to refresh + await new Promise((resolve) => setTimeout(resolve, 17)) + } + const memoryInfo = await tf.memory() + console.log(`Number of Tensors: ${memoryInfo.numTensors}`) + console.log(`Number of Data Buffers: ${memoryInfo.numDataBuffers}`) + console.log(`Megabytes In Use: ${(memoryInfo.numBytes / 1048576).toFixed(3)} MB`) + if (memoryInfo.unreliable) { + console.log(`Unreliable: ${memoryInfo.unreliable}`) + } + // Dispose of previous values before assigning new tensors to outC and outB + if (typeof outC !== 'undefined') { + outC.dispose() + } + if (typeof outB !== 'undefined') { + outB.dispose() + } + // Assign the new values to outC and outB + outC = tf.keep(result[0]) + outB = tf.keep(result[1]) + // // Assign the new values to outC and outB + // outC = result[0] + // outB = result[1] + tf.engine().endScope() + + if (chIdx === self.outChannels - 1) { + // document.getElementById("progressBarChild").style.width = 0 + "%" + tf.dispose(outB) + const endTime = performance.now() + const executionTime = endTime - startTime + console.log(`Execution time for output layer: ${executionTime} milliseconds`) + tf.ENV.set('WEBGL_DELETE_TEXTURE_THRESHOLD', oldDeleteTextureThreshold) + return outC + } else { + chIdx++ + + // the seemingly strange sequence of operations + // below prevents tfjs from uncontrolably + // grabbing buffers, even when all tensors have + // already been disposed + + const outCShape = outC.shape + const outCdata = outC.dataSync() + const outBShape = outC.shape + const outBdata = outB.dataSync() + outC.dispose() + outB.dispose() + // tf.disposeVariables() + outC = tf.tensor(outCdata, outCShape) + outB = tf.tensor(outBdata, outBShape) + + // document.getElementById("progressBarChild").style.width = (chIdx + 1) * 100 / self.outChannels + "%" + } + } + } +} // <<<< End of class