forked from neurolabusc/T2lesion
-
Notifications
You must be signed in to change notification settings - Fork 0
/
brainchop-parameters.js
48 lines (46 loc) · 3.13 KB
/
brainchop-parameters.js
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
export {inferenceModelsList, brainChopOpts }
const brainChopOpts = {
// General settings for input shape [batchSize, batch_D, batch_H, batch_W, numOfChan]
batchSize: 1, // How many batches are used during each inference iteration
numOfChan: 1, // num of channel of the input shape
isColorEnable: true, // If false, grey scale will enabled
isAutoColors: true, // If false, manualColorsRange will be in use
bgLabelValue: 0, // Semenatic Segmentation background label value
drawBoundingVolume: false, // plot bounding volume used to crop the brain
isGPU: true, //use WebGL/GPU (faster) or CPU (compatibility)
isBrainCropMaskBased: true, // Check if brain masking will be used for cropping & optional show or brain tissue will be used
showPhase1Output: false, // This will load to papaya the output of phase-1 (ie. brain mask or brain tissue)
isPostProcessEnable: true, // If true 3D Connected Components filter will apply
isContoursViewEnable: false, // If true 3D contours of the labeled regions will apply
browserArrayBufferMaxZDim: 30, // This value depends on Memory available
telemetryFlag: false, // Ethical and transparent collection of browser usage while adhering to security and privacy standards
chartXaxisStepPercent: 10, // percent from total labels on Xaxis
uiSampleName: 'BC_UI_Sample', // Sample name used by interface
atlasSelectedColorTable: 'Fire' // Select from ["Hot-and-Cold", "Fire", "Grayscale", "Gold", "Spectrum"]
}
// Inference Models, the ids must start from 1 in sequence
const inferenceModelsList = [
{
id: 1,
type: 'Lesion',
path: '/models/model_11ch_ACR/model.json',
modelName: 'T2w Lesion',
colormapPath: './models/model_11ch_ACR/colormap.json',
preModelId: null, // Model run first e.g. crop the brain { null, 1, 2, .. }
preModelPostProcess: false, // If true, perform postprocessing to remove noisy regions after preModel inference generate output.
isBatchOverlapEnable: false, // create extra overlap batches for inference
numOverlapBatches: 0, // Number of extra overlap batches for inference
enableTranspose: true, // Keras and tfjs input orientation may need a tranposing step to be matched
enableCrop: true, // For speed-up inference, crop brain from background before feeding to inference model to lower memory use.
cropPadding: 18, // Padding size add to cropped brain
autoThreshold: 0, // Threshold between 0 and 1, given no preModel and tensor is normalized either min-max or by quantiles. Will remove noisy voxels around brain
enableQuantileNorm: false, // Some models needs Quantile Normaliztion.
filterOutWithPreMask: false, // Can be used to multiply final output with premodel output mask to crean noisy areas
enableSeqConv: false, // For low memory system and low configuration, enable sequential convolution instead of last layer
textureSize: 0, // Requested Texture size for the model, if unknown can be 0.
warning: null, // Warning message to show when select the model.
inferenceDelay: 100, // Delay in ms time while looping layers applying.
description:
'Detects lesion from T2-weighted image.'
},
] // inferenceModelsList