diff --git a/assets/index-CkRQFQ50.js b/assets/index-D4okLGuZ.js
similarity index 99%
rename from assets/index-CkRQFQ50.js
rename to assets/index-D4okLGuZ.js
index aeff907..9f4cf03 100644
--- a/assets/index-CkRQFQ50.js
+++ b/assets/index-D4okLGuZ.js
@@ -21956,7 +21956,7 @@ return a / b;`,DIV_PACKED=`
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
- */const kernelConfigs=[_fusedMatMulConfig,absConfig,acosConfig,acoshConfig,addConfig,addNConfig,allConfig,anyConfig,argMaxConfig,argMinConfig,asinConfig,asinhConfig,atanConfig,atan2Config,atanhConfig,avgPoolConfig,avgPool3DConfig,avgPool3DGradConfig,avgPoolGradConfig,batchMatMulConfig,batchNormConfig,batchToSpaceNDConfig,bincountConfig,bitwiseAndConfig,broadcastArgsConfig,castConfig,ceilConfig,clipByValueConfig,complexConfig,complexAbsConfig,concatConfig,conv2DConfig,conv2DBackpropFilterConfig,conv2DBackpropInputConfig,conv3DConfig,conv3DBackpropFilterV2Config,conv3DBackpropInputConfig,cosConfig,coshConfig,cropAndResizeConfig,cumprodConfig,cumsumConfig,denseBincountConfig,depthToSpaceConfig,depthwiseConv2dNativeConfig,depthwiseConv2dNativeBackpropFilterConfig,depthwiseConv2dNativeBackpropInputConfig,diagConfig,dilation2DConfig,einsumConfig,eluConfig,eluGradConfig,equalConfig,erfConfig,expConfig,expandDimsConfig,expm1Config,fftConfig,fillConfig,flipLeftRightConfig,floorConfig,floorDivConfig,fromPixelsConfig,fusedConv2DConfig,fusedDepthwiseConv2DConfig,gatherNdConfig,gatherV2Config,greaterConfig,greaterEqualConfig,identityConfig,ifftConfig,imagConfig,isFiniteConfig,isInfConfig,isNaNConfig,leakyReluConfig,lessConfig,lessEqualConfig,linSpaceConfig,logConfig,log1pConfig,logicalAndConfig,logicalNotConfig,logicalOrConfig,LRNConfig,LRNGradConfig,maxConfig,maximumConfig,maxPoolConfig,maxPool3DConfig,maxPool3DGradConfig,maxPoolGradConfig,maxPoolWithArgmaxConfig,meanConfig,minConfig,minimumConfig,mirrorPadConfig,modConfig,multinomialConfig,multiplyConfig,negConfig,nonMaxSuppressionV3Config,nonMaxSuppressionV4Config,nonMaxSuppressionV5Config,notEqualConfig,oneHotConfig,onesLikeConfig,packConfig,padV2Config,powConfig,preluConfig,prodConfig,raggedGatherConfig,raggedRangeConfig,raggedTensorToTensorConfig,rangeConfig,realConfig,realDivConfig,reciprocalConfig,reluConfig,relu6Config,reshapeConfig,resizeBilinearConfig,resizeBilinearGradConfig,resizeNearestNeighborConfig,resizeNearestNeighborGradConfig,reverseConfig,rotateWithOffsetConfig,roundConfig,rsqrtConfig,scatterNdConfig,searchSortedConfig,selectConfig,seluConfig,sigmoidConfig,signConfig,sinConfig,sinhConfig,sliceConfig,softmaxConfig,softplusConfig,spaceToBatchNDConfig,sparseFillEmptyRowsConfig,sparseReshapeConfig,sparseSegmentMeanConfig,sparseSegmentSumConfig,sparseToDenseConfig,splitVConfig,sqrtConfig,squareConfig,squaredDifferenceConfig,staticRegexReplaceConfig,stepConfig,stridedSliceConfig,stringNGramsConfig,stringSplitConfig,stringToHashBucketFastConfig,subConfig,sumConfig,tanConfig,tanhConfig,tensorScatterUpdateConfig,tileConfig,topKConfig,transformConfig,transposeConfig,uniqueConfig,unpackConfig,unsortedSegmentSumConfig,zerosLikeConfig];for(const a of kernelConfigs)registerKernel(a);class BWLabeler{idx(n,s,u,f){return u*f[0]*f[1]+s*f[0]+n}check_previous_slice(n,s,u,f,m,A,v,y,T,E){let S=0;if(!m)return 0;const I=n[this.idx(u,f,m,A)];if(v>=6){const e=this.idx(u,f,m-1,A);I===n[e]&&(T[S++]=s[e])}if(v>=18){if(u){const e=this.idx(u-1,f,m-1,A);I===n[e]&&(T[S++]=s[e])}if(f){const e=this.idx(u,f-1,m-1,A);I===n[e]&&(T[S++]=s[e])}if(u=6){if(F){const V=this.idx(F-1,e,I,s);L===n[V]&&(S[M++]=E[V])}if(e){const V=this.idx(F,e-1,I,s);L===n[V]&&(S[M++]=E[V])}}if(u>=18){if(e&&F){const V=this.idx(F-1,e-1,I,s);L===n[V]&&(S[M++]=E[V])}if(e&&F=y){y+=v;const V=new Uint32Array(y);V.set(T),T=V}T[A-1]=A,A++}}}for(let I=0;I.",inferenceDelay:100,description:"Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides the best accuracy with hard cropping for better speed"},{id:3,type:"Segmentation",path:"/models/model20chan3cls/model.json",modelName:"🔪 Tissue GWM (High Acc, Low Mem)",labelsPath:"./models/model20chan3cls/labels.json",colorsPath:"./models/model20chan3cls/colorLUT.json",colormapPath:"./models/model20chan3cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides high accuracy and fit low memory available but slower"},{id:4,type:"Atlas",path:"/models/model30chan18cls/model.json",modelName:"🪓 Subcortical + GWM (High Mem, Fast)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",colormapPath:"./models/model30chan18cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:5,type:"Atlas",path:"/models/model30chan18cls/model.json",modelName:"🪓 Subcortical + GWM (Low Mem, Slow)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",colormapPath:"./models/model30chan18cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:6,type:"Atlas",path:"/models/model18cls/model.json",modelName:"🪓 Subcortical + GWM (Low Mem, Faster)",labelsPath:"./models/model18cls/labels.json",colorsPath:"./models/model18cls/colorLUT.json",colormapPath:"./models/model18cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:7,type:"Atlas",path:"/models/model30chan18cls/model.json",modelName:"🔪🪓 Subcortical + GWM (Failsafe, Less Acc)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",colormapPath:"./models/model30chan18cls/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is not a robust model, it may work on low data quality, including varying saturation, and even clinical scans. It may work also on infant brains, but your mileage may vary."},{id:8,type:"Atlas",path:"/models/model30chan50cls/model.json",modelName:"🔪 Aparc+Aseg 50 (High Mem, Fast)",labelsPath:"./models/model30chan50cls/labels.json",colorsPath:"./models/model30chan50cls/colorLUT.json",colormapPath:"./models/model30chan50cls/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class."},{id:9,type:"Atlas",path:"/models/model30chan50cls/model.json",modelName:"🔪 Aparc+Aseg 50 (Low Mem, Slow)",labelsPath:"./models/model30chan50cls/labels.json",colorsPath:"./models/model30chan50cls/colorLUT.json",colormapPath:"./models/model30chan50cls/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time."},{id:10,type:"Brain_Extraction",path:"/models/model5_gw_ae/model.json",modelName:"⚡ Extract the Brain (FAST)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:18,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:null,inferenceDelay:100,description:"Extract the brain fast model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the failsafe version."},{id:11,type:"Brain_Extraction",path:"/models/model11_gw_ae/model.json",modelName:"🔪 Extract the Brain (High Acc, Slow)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Extract the brain high accuracy model operates on full T1 image in a single pass, but uses only 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than the fast version."},{id:12,type:"Brain_Masking",path:"/models/model5_gw_ae/model.json",modelName:"⚡ Brain Mask (FAST)",labelsPath:null,colorsPath:null,colormapPath:"./models/model5_gw_ae/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:17,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:null,inferenceDelay:100,description:"This fast masking model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than failsafe version."},{id:13,type:"Brain_Masking",path:"/models/model11_gw_ae/model.json",modelName:"🔪 Brain Mask (High Acc, Low Mem)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This masking model operates on full T1 image in a single pass, but uses 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than fast version."},{id:14,type:"Atlas",path:"/models/model21_104class/model.json",modelName:"🔪 Aparc+Aseg 104 (High Mem, Fast)",labelsPath:"./models/model21_104class/labels.json",colorsPath:"./models/model21_104class/colorLUT.json",colormapPath:"./models/model21_104class/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions."},{id:15,type:"Atlas",path:"/models/model21_104class/model.json",modelName:"🔪 Aparc+Aseg 104 (Low Mem, Slow)",labelsPath:"./models/model21_104class/labels.json",colorsPath:"./models/model21_104class/colorLUT.json",colormapPath:"./models/model21_104class/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time. "}];async function getModelNumParameters(a){let n=0;for(let s=0;se-F);const m=tensor1d(f),A=m.shape[0],v=Math.floor(A*n),y=Math.ceil(A*s)-1,T=m.slice(v,1),E=m.slice(y,1),S=(await T.array())[0],I=(await E.array())[0];return u.dispose(),m.dispose(),T.dispose(),E.dispose(),{qmin:S,qmax:I}}async function quantileNormalizeVolumeData(a,n=.05,s=.95){const{qmin:u,qmax:f}=await calculateQuantiles(a,n,s),m=scalar(u),A=scalar(f),v=a.sub(m).div(A.sub(m));return m.dispose(),A.dispose(),v}async function minMaxNormalizeVolumeData(a){const n=a.max(),s=a.min();return await a.sub(s).div(n.sub(s))}async function inferenceFullVolumeSeqCovLayer(a,n,s,u,f,m,A){window.alert("inferenceFullVolumeSeqCovLayer() is not dead code?")}async function inferenceFullVolume(a,n,s,u,f,m,A){window.alert("inferenceFullVolume() is not dead code?")}async function inferenceSubVolumes(a,n,s,u,f,m=null){window.alert("inferenceSubVolumes() is not dead code?")}async function tensor2LightBuffer(a,n){window.alert("tensor2LightBuffer() is not dead code?")}async function draw3dObjBoundingVolume(a){window.alert("draw3dObjBoundingVolume() is not dead code?")}async function argMaxLarge(a,n,s,u,f,m="float32"){window.alert("argMaxLarge() is not dead code?")}async function addZeroPaddingTo3dTensor(a,n=[1,1],s=[1,1],u=[1,1]){if(a.rank!==3)throw new Error("Tensor must be 3D");return a.pad([n,s,u])}async function removeZeroPaddingFrom3dTensor(a,n=1,s=1,u=1){if(a.rank!==3)throw new Error("Tensor must be 3D");const[f,m,A]=a.shape;return a.slice([n,s,u],[f-2*n,m-2*s,A-2*u])}async function resizeWithZeroPadding(a,n,s,u,f,m){const A=f[0],v=f[1],y=f[2],T=A+m[0]-1,E=v+m[1]-1,S=y+m[2]-1,I=s-T-1>0?s-T-1:0,e=u-E-1>0?u-E-1:0,F=n-S-1>0?n-S-1:0;return a.pad([[A,I],[v,e],[y,F]])}async function applyMriThreshold(a,n){const s=a.max(),u=s.mul(n),f=await u.data();return s.dispose(),u.dispose(),tidy(()=>a.clone().greater(f[0]))}async function binarizeVolumeDataTensor(a){return a.step(0)}async function generateBrainMask(a,n,s,u,f,m,A,v,y=!0){console.log("Generate Brain Masking ... ");let T=[];for(let F=0;F{const F="postProcessSlices3D() should be upgraded to BWLabeler";A(F,-1,F)}),console.log("Post processing done ")):console.log("Phase-1 Post processing disabled ... ");const S=new Array(T[0].length*T.length);let I=0;for(let F=0;F{const z=a.slice([0,0,0,0,L],[-1,-1,-1,-1,V-L]),b=n.slice([0,0,0,L,E],[-1,-1,-1,V-L,1]);return conv3d(z,b,u,f,"NDHWC",m)});if(e===null)e=B;else{const z=e.add(B);e.dispose(),B.dispose(),e=z}}}const F=e.add(I);if(e.dispose(),I.dispose(),T==null)T=F;else{const M=await concat$2([T,F],4);F.dispose(),T.dispose(),T=M}}return T}function processTensorInChunks(a,n,s){const A=a.shape[4],v=Math.ceil(A/s);let y=null;for(let T=0;Ta.slice([0,0,0,0,E],[-1,-1,-1,-1,I])),F=tidy(()=>n.slice([0,0,0,E,0],[-1,-1,-1,I,-1])),M=conv3d(e,F,1,0,"NDHWC",1);e.dispose(),F.dispose();const L=squeeze(M);if(M.dispose(),y===null)y=L;else{const V=y.add(L);y.dispose(),y!==L&&L.dispose(),y=V}tidy(()=>{matMul$1(zeros$1([1,1]),zeros$1([1,1]))})}return y}class SequentialConvLayer{constructor(n,s,u,f){this.model=n,this.outChannels=n.outputLayers[0].kernel.shape[4],this.chunkSize=s,this.isChannelLast=u,this.callbackUI=f}async apply(n){const s=ENV$4.get("WEBGL_DELETE_TEXTURE_THRESHOLD");ENV$4.set("WEBGL_DELETE_TEXTURE_THRESHOLD",0);const u=this;return new Promise(f=>{const m=performance.now(),A=u.model.layers[u.model.layers.length-1],v=A.getWeights()[0],y=A.getWeights()[1],T=u.isChannelLast?n.shape.slice(1,-1):n.shape.slice(2);let E=mul(ones(T),-1e4),S=zeros$1(T),I=0;console.log(" channel loop");const e=window.setInterval(async function(){engine().startScope(),console.log("=======================");const F=await memory();console.log(`| Number of Tensors: ${F.numTensors}`),console.log(`| Number of Data Buffers: ${F.numDataBuffers}`),console.log("Channel : ",I);const M=await tidy(()=>{const V=v.slice([0,0,0,0,I],[-1,-1,-1,-1,1]),B=y.slice([I],[1]),z=processTensorInChunks(n,V,Math.min(u.chunkSize,u.outChannels)).add(B),b=greater$2(z,E),W=where(b,z,E),q=where(b,fill$2(S.shape,I),S);return dispose([E,S,V,B,z,b]),tidy(()=>matMul$1(ones([1,1]),ones([1,1]))),[q,W]});console.log("=======================");const L=await memory();if(u.callbackUI(`Iteration ${I}`,I/u.outChannels),console.log(`Number of Tensors: ${L.numTensors}`),console.log(`Number of Data Buffers: ${L.numDataBuffers}`),console.log(`Megabytes In Use: ${(L.numBytes/1048576).toFixed(3)} MB`),L.unreliable&&console.log(`Unreliable: ${L.unreliable}`),typeof S<"u"&&S.dispose(),typeof E<"u"&&E.dispose(),S=keep(M[0]),E=keep(M[1]),engine().endScope(),I===u.outChannels-1){window.clearInterval(e),dispose(E);const B=performance.now()-m;console.log(`Execution time for output layer: ${B} milliseconds`),ENV$4.set("WEBGL_DELETE_TEXTURE_THRESHOLD",s),f(S)}else{I++;const V=S.shape,B=S.dataSync(),z=S.shape,b=E.dataSync();S.dispose(),E.dispose(),S=tensor(B,V),E=tensor(b,z)}await new Promise(V=>setTimeout(V,300))},0)})}}async function generateOutputSlicesV2(a,n,s,u,f,m,A,v,y,T){if(y.isPostProcessEnable){const I=new BWLabeler,e=new Uint32Array(n),F=26,M=!0,L=!0,[V,B]=I.bwlabel(a,e,F,M,L);for(let z=0;z0&&re<=1?e=await applyMriThreshold(u,re):(console.log("No valid crop threshold value"),e=await u.greater([0]).asType("bool"))}else e=await v.greater([0]).asType("bool");console.log(" mask_3d shape : ",e.shape);const F=await whereAsync(e);e.dispose();const M=F.arraySync();let L=m,V=0,B=A,z=0,b=f,W=0;for(let re=0;reM[re][0]?L=M[re][0]:VM[re][1]?B=M[re][1]:zM[re][2]?b=M[re][2]:Where'),memory().unreliable){const ae="unreliable reasons :"+memory().reasons;y(ae,NaN,ae)}}}async function inferenceFullVolumePhase2(a,n,s,u,f,m,A,v,y,T,E,S){let I=[];console.log(" ---- Start FullVolume inference phase-II ---- "),A.enableQuantileNorm?(console.log("preModel Quantile normalization enabled"),n=await quantileNormalizeVolumeData(n)):(console.log("preModel Min Max normalization enabled"),n=await minMaxNormalizeVolumeData(n));let F;if(m==null){const pe=A.autoThreshold;pe>0&&pe<=1?F=await applyMriThreshold(n,pe):(console.log("No valid crop threshold value"),F=await n.greater([0]).asType("bool"))}else F=m.greater([0]).asType("bool");console.log(" mask_3d shape : ",F.shape);const M=await whereAsync(F);F.dispose();const L=M.arraySync();let V=u,B=0,z=f,b=0,W=s,q=0;for(let pe=0;peL[pe][0]?V=L[pe][0]:BL[pe][1]?z=L[pe][1]:bL[pe][2]?W=L[pe][2]:qhere')}}async function inferenceFullVolumePhase1(a,n,s,u,f,m,A,v,y,T,E,S){if(v.No_SubVolumes=1,A.preModelId){const I=await load_model(y.rootURL+inferenceModelsList[A.preModelId-1].path),e=inferenceModelsList[A.preModelId-1].enableTranspose,F=inferenceModelsList[A.preModelId-1].enableQuantileNorm;let M=null;F?(console.log("preModel Quantile normalization enabled"),M=await quantileNormalizeVolumeData(n)):(console.log("preModel Min Max normalization enabled"),M=await minMaxNormalizeVolumeData(n)),e?(M=await M.transpose(),console.log("Input transposed for pre-model")):console.log("Transpose not enabled for pre-model"),v.Brainchop_Ver="PreModel_FV";const L=await I;try{const V=performance.now(),B=L,z=B.layers[0].batchInputShape;if(console.log(" Pre-Model batch input shape : ",z),z.length!==5){const Se="The pre-model input shape must be 5D ";return E(Se,-1,Se),0}const b=isModelChnlLast(B),W=y.batchSize,q=y.numOfChan;let Y,X,H,g;if(b){if(console.log("Pre-Model Channel Last"),isNaN(z[4])||z[4]!==1){const Se="The number of channels for pre-model input shape must be 1";return E(Se,-1,Se),0}Y=z[1],X=z[2],H=z[3],g=[W,Y,X,H,q]}else{if(console.log("Pre-Model Channel First"),isNaN(z[1])||z[1]!==1){const Se="The number of channels for pre-model input shape must be 1";return E(Se,-1,Se),0}Y=z[2],X=z[3],H=z[4],g=[W,q,Y,X,H]}v.Input_Shape=JSON.stringify(g),v.Output_Shape=JSON.stringify(B.output.shape),v.Channel_Last=b,v.Model_Param=await getModelNumParameters(B),v.Model_Layers=await getModelNumLayers(B);let J=0;const re=inferenceModelsList[A.preModelId-1].inferenceDelay;let ae=1;const fe=L.layers.length,pe=[];pe[0]=M.reshape(g),dispose(M);const me=window.setInterval(async function(){try{pe[ae]=L.layers[ae].apply(pe[ae-1])}catch(Se){const ve="Your graphics card (e.g. Intel) may not be compatible with WebGL. "+Se.message;return E(ve,-1,ve),window.clearInterval(me),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=Se.message,v.Extra_Err_Info="PreModel Failed while model layer "+ae+" apply",E("",-1,"",v),0}if(L.layers[ae].dispose(),pe[ae-1].dispose(),E("Layer "+ae.toString(),(ae+1)/fe),memory().unreliable){const Se="unreliable reasons :"+memory().reasons;E(Se,NaN,Se)}if(ae===fe-1){window.clearInterval(me);const Se=b?-1:1;console.log(" find argmax "),console.log("last Tensor shape : ",pe[ae].shape);const ve=b?pe[ae].shape[4]:pe[ae].shape[1];let we;try{console.log(" Try tf.argMax for fullVolume .."),we=await argMax$2(pe[ae],Se)}catch(pt){if(Se===-1)try{const It=performance.now();console.log(" tf.argMax failed .. try argMaxLarge ..");const Lt=tensor2LightBuffer(pe[ae].reshape([s,u,f,ve]),"float16");we=argMaxLarge(Lt,s,u,f,ve,"float16"),console.log("argMaxLarge for fullVolume takes : ",((performance.now()-It)/1e3).toFixed(4))}catch(It){const Lt="argMax buffer couldn't be created due to limited memory resources.";return E(Lt,-1,Lt),we.dispose(),window.clearInterval(me),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=It.message,v.Extra_Err_Info="preModel prediction_argmax from argMaxLarge failed",E("",-1,"",v),0}else{const It="argMax buffer couldn't be created due to limited memory resources.";return E(It,-1,It),we.dispose(),window.clearInterval(me),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=pt.message,v.Extra_Err_Info="preModel prediction_argmax from argMaxLarge not support yet channel first",E("",-1,"",v),0}}console.log(" Pre-model prediction_argmax shape : ",we.shape);const Ne=((performance.now()-V)/1e3).toFixed(4);dispose(pe[ae]),console.log(" Pre-model find array max ");const De=await we.max().dataSync()[0];Jhere')}}else console.log("--- No pre-model is selected ---"),console.log("------ Run voxel cropping ------"),m?A.enableSeqConv?(console.log("------ Seq Convoluton ------"),await inferenceFullVolumeSeqCovLayerPhase2(y,A,a,n,s,u,f,null,E,T,v,S)):inferenceFullVolumePhase2(a,n,s,u,f,null,A,v,y,T,E,S):inferenceSubVolumes(a,n,s,u,f,null)}async function enableProductionMode(a=!0){await enableProdMode(),env().set("DEBUG",!1),env().set("WEBGL_FORCE_F16_TEXTURES",a),env().set("WEBGL_DELETE_TEXTURE_THRESHOLD",0),await ready(),console.log("tf env() flags :",env().flags),console.log("tf env() features :",env().features),console.log("tf env total features: ",Object.keys(env().features).length),console.log(getBackend())}async function runInference(a,n,s,u,f,m){const A=[];A.startTime=Date.now(),m("Segmentation started",0),performance.now();const v=a.batchSize,y=a.numOfChan;if(isNaN(v)||v!==1){const H="The batch Size for input shape must be 1";return m(H,-1,H),0}if(isNaN(y)||y!==1){const H="The number of channels for input shape must be 1";return m(H,-1,H),0}engine().startScope(),console.log("Batch size: ",v),console.log("Num of Channels: ",y);const T=await load_model(a.rootURL+n.path);await enableProductionMode(!0),A.TF_Backend=getBackend();const E=T;let S=[];if(S=E.layers[0].batchInputShape,console.log(" Model batch input shape : ",S),S.length!==5){const H="The model input shape must be 5D";return m(H,-1,H),0}let I,e,F;const M=s.dims[1],L=s.dims[2],V=s.dims[3];if(await isModelChnlLast(E)){if(console.log("Model Channel Last"),isNaN(S[4])||S[4]!==1){const H="The number of channels for input shape must be 1";return m(H,-1,H),0}I=S[1],e=S[2],F=S[3]}else{if(console.log("Model Channel First"),isNaN(S[1])||S[1]!==1){const H="The number of channels for input shape must be 1";return m(H,-1,H),0}I=S[2],e=S[3],F=S[4]}let z;I===256&&e===256&&F===256?z=!0:z=!1,A.isModelFullVol=z;let b=await getAllSlicesData1D(V,s,u);const W=await getAllSlices2D(b,L,M);b=null;let q=await getSlices3D(W);dispose(W);const Y=n.enableTranspose,X=n.enableCrop;z&&(X?await inferenceFullVolumePhase1(T,q,V,L,M,z,n,A,a,f,m,u):(console.log("Cropping Disabled"),Y?(q=q.transpose(),console.log("Input transposed")):console.log("Transpose NOT Enabled"),n.enableSeqConv?(console.log("Seq Convoluton Enabled"),await inferenceFullVolumeSeqCovLayer()):(console.log("Seq Convoluton Disabled"),await inferenceFullVolume())))}async function detectBrowser(){return navigator.userAgent.indexOf("OPR/")>-1?"Opera":navigator.userAgent.indexOf("Edg/")>-1?"Edge":navigator.userAgent.indexOf("Falkon/")>-1?"Falkon":navigator.userAgent.indexOf("Chrome/")>-1?"Chrome":navigator.userAgent.indexOf("Firefox/")>-1?"Firefox":navigator.userAgent.indexOf("Safari/")>-1?"Safari":navigator.userAgent.indexOf("MSIE/")>-1||navigator.userAgent.indexOf("rv:")>-1?"IExplorer":"Unknown"}async function detectBrowserVersion(){return navigator.userAgent.indexOf("OPR/")>-1?parseInt(navigator.userAgent.split("OPR/")[1]):navigator.userAgent.indexOf("Edg/")>-1?parseInt(navigator.userAgent.split("Edg/")[1]):navigator.userAgent.indexOf("Falkon/")>-1?parseInt(navigator.userAgent.split("Falkon/")[1]):navigator.userAgent.indexOf("Chrome/")>-1?parseInt(navigator.userAgent.split("Chrome/")[1]):navigator.userAgent.indexOf("Firefox/")>-1?parseInt(navigator.userAgent.split("Firefox/")[1]):navigator.userAgent.indexOf("Safari/")>-1?parseInt(navigator.userAgent.split("Safari/")[1]):navigator.userAgent.indexOf("MSIE/")>-1||navigator.userAgent.indexOf("rv:")>-1?parseInt(navigator.userAgent.split("MSIE/")[1]):1/0}async function detectOperatingSys(){return navigator.userAgent.indexOf("Win")>-1?"Windows":navigator.userAgent.indexOf("Mac")>-1?"MacOS":navigator.userAgent.indexOf("Linux")>-1?"Linux":navigator.userAgent.indexOf("UNIX")>-1?"UNIX":"Unknown"}async function checkWebGl2(a){return a?(console.log("WebGl2 is enabled"),!0):(console.log(typeof WebGL2RenderingContext<"u"?"WebGL2 may be disabled. Please try updating video card drivers":"WebGL2 is not supported"),!1)}async function detectGPUVendor(a){let n;if(a&&(n=a.getExtension("WEBGL_debug_renderer_info"),n)){const s=a.getParameter(n.UNMASKED_VENDOR_WEBGL);return s.indexOf("(")>-1&&s.indexOf(")")>-1?s.substring(s.indexOf("(")+1,s.indexOf(")")):s}return null}async function detectGPUVendor_v0(a){if(a){const n=a.getExtension("WEBGL_debug_renderer_info");return n?a.getParameter(n.UNMASKED_VENDOR_WEBGL):null}else return null}async function detectGPUCardType_v0(a){if(a){if(detectBrowser()==="Firefox")return a.getParameter(a.RENDERER);const n=a.getExtension("WEBGL_debug_renderer_info");return n?a.getParameter(n.UNMASKED_RENDERER_WEBGL):null}else return null}async function detectGPUCardType(a){let n;if(a){if(detectBrowser()==="Firefox")return a.getParameter(a.RENDERER);if(n=a.getExtension("WEBGL_debug_renderer_info"),n){let s=a.getParameter(n.UNMASKED_RENDERER_WEBGL);return s.indexOf("(")>-1&&s.indexOf(")")>-1&&s.indexOf("(R)")===-1&&(s=s.substring(s.indexOf("(")+1,s.indexOf(")")),s.split(",").length===3)?s.split(",")[1].trim():s}}return null}async function getCPUNumCores(){return navigator.hardwareConcurrency}async function isChrome(){return/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor)}async function localSystemDetails(a,n=null){const s=new Date;if(a.isModelFullVol?a.Brainchop_Ver="FullVolume":a.Brainchop_Ver="SubVolumes",a.Total_t=(Date.now()-a.startTime)/1e3,delete a.startTime,a.Date=parseInt(s.getMonth()+1)+"/"+s.getDate()+"/"+s.getFullYear(),a.Browser=await detectBrowser(),a.Browser_Ver=await detectBrowserVersion(),a.OS=await detectOperatingSys(),a.WebGL2=await checkWebGl2(n),a.GPU_Vendor=await detectGPUVendor(n),a.GPU_Card=await detectGPUCardType(n),a.GPU_Vendor_Full=await detectGPUVendor_v0(n),a.GPU_Card_Full=await detectGPUCardType_v0(n),a.CPU_Cores=await getCPUNumCores(),a.Which_Brainchop="latest",await isChrome()&&(a.Heap_Size_MB=window.performance.memory.totalJSHeapSize/(1024*1024).toFixed(2),a.Used_Heap_MB=window.performance.memory.usedJSHeapSize/(1024*1024).toFixed(2),a.Heap_Limit_MB=window.performance.memory.jsHeapSizeLimit/(1024*1024).toFixed(2)),n){console.log("MAX_TEXTURE_SIZE :",n.getParameter(n.MAX_TEXTURE_SIZE)),console.log("MAX_RENDERBUFFER_SIZE :",n.getParameter(n.MAX_RENDERBUFFER_SIZE));const u=n.getExtension("WEBGL_debug_renderer_info");console.log("VENDOR WEBGL:",n.getParameter(u.UNMASKED_VENDOR_WEBGL)),a.Texture_Size=n.getParameter(n.MAX_TEXTURE_SIZE)}else a.Texture_Size=null;return a}function WorkerWrapper(a){return new Worker(""+new URL("brainchop-webworker-DDjdSg4F.js",import.meta.url).href,{name:a==null?void 0:a.name})}async function main(){smoothCheck.onchange=function(){S.setInterpolation(!smoothCheck.checked)},aboutBtn.onclick=function(){window.alert("Drag and drop NIfTI images. Use pulldown menu to choose brainchop model")},diagnosticsBtn.onclick=function(){if(T.length<1){window.alert("No diagnostic string generated: run a model to create diagnostics");return}navigator.clipboard.writeText(T),window.alert(`Diagnostics copied to clipboard
+ */const kernelConfigs=[_fusedMatMulConfig,absConfig,acosConfig,acoshConfig,addConfig,addNConfig,allConfig,anyConfig,argMaxConfig,argMinConfig,asinConfig,asinhConfig,atanConfig,atan2Config,atanhConfig,avgPoolConfig,avgPool3DConfig,avgPool3DGradConfig,avgPoolGradConfig,batchMatMulConfig,batchNormConfig,batchToSpaceNDConfig,bincountConfig,bitwiseAndConfig,broadcastArgsConfig,castConfig,ceilConfig,clipByValueConfig,complexConfig,complexAbsConfig,concatConfig,conv2DConfig,conv2DBackpropFilterConfig,conv2DBackpropInputConfig,conv3DConfig,conv3DBackpropFilterV2Config,conv3DBackpropInputConfig,cosConfig,coshConfig,cropAndResizeConfig,cumprodConfig,cumsumConfig,denseBincountConfig,depthToSpaceConfig,depthwiseConv2dNativeConfig,depthwiseConv2dNativeBackpropFilterConfig,depthwiseConv2dNativeBackpropInputConfig,diagConfig,dilation2DConfig,einsumConfig,eluConfig,eluGradConfig,equalConfig,erfConfig,expConfig,expandDimsConfig,expm1Config,fftConfig,fillConfig,flipLeftRightConfig,floorConfig,floorDivConfig,fromPixelsConfig,fusedConv2DConfig,fusedDepthwiseConv2DConfig,gatherNdConfig,gatherV2Config,greaterConfig,greaterEqualConfig,identityConfig,ifftConfig,imagConfig,isFiniteConfig,isInfConfig,isNaNConfig,leakyReluConfig,lessConfig,lessEqualConfig,linSpaceConfig,logConfig,log1pConfig,logicalAndConfig,logicalNotConfig,logicalOrConfig,LRNConfig,LRNGradConfig,maxConfig,maximumConfig,maxPoolConfig,maxPool3DConfig,maxPool3DGradConfig,maxPoolGradConfig,maxPoolWithArgmaxConfig,meanConfig,minConfig,minimumConfig,mirrorPadConfig,modConfig,multinomialConfig,multiplyConfig,negConfig,nonMaxSuppressionV3Config,nonMaxSuppressionV4Config,nonMaxSuppressionV5Config,notEqualConfig,oneHotConfig,onesLikeConfig,packConfig,padV2Config,powConfig,preluConfig,prodConfig,raggedGatherConfig,raggedRangeConfig,raggedTensorToTensorConfig,rangeConfig,realConfig,realDivConfig,reciprocalConfig,reluConfig,relu6Config,reshapeConfig,resizeBilinearConfig,resizeBilinearGradConfig,resizeNearestNeighborConfig,resizeNearestNeighborGradConfig,reverseConfig,rotateWithOffsetConfig,roundConfig,rsqrtConfig,scatterNdConfig,searchSortedConfig,selectConfig,seluConfig,sigmoidConfig,signConfig,sinConfig,sinhConfig,sliceConfig,softmaxConfig,softplusConfig,spaceToBatchNDConfig,sparseFillEmptyRowsConfig,sparseReshapeConfig,sparseSegmentMeanConfig,sparseSegmentSumConfig,sparseToDenseConfig,splitVConfig,sqrtConfig,squareConfig,squaredDifferenceConfig,staticRegexReplaceConfig,stepConfig,stridedSliceConfig,stringNGramsConfig,stringSplitConfig,stringToHashBucketFastConfig,subConfig,sumConfig,tanConfig,tanhConfig,tensorScatterUpdateConfig,tileConfig,topKConfig,transformConfig,transposeConfig,uniqueConfig,unpackConfig,unsortedSegmentSumConfig,zerosLikeConfig];for(const a of kernelConfigs)registerKernel(a);class BWLabeler{idx(n,s,u,f){return u*f[0]*f[1]+s*f[0]+n}check_previous_slice(n,s,u,f,m,A,v,y,T,E){let S=0;if(!m)return 0;const I=n[this.idx(u,f,m,A)];if(v>=6){const e=this.idx(u,f,m-1,A);I===n[e]&&(T[S++]=s[e])}if(v>=18){if(u){const e=this.idx(u-1,f,m-1,A);I===n[e]&&(T[S++]=s[e])}if(f){const e=this.idx(u,f-1,m-1,A);I===n[e]&&(T[S++]=s[e])}if(u=6){if(F){const V=this.idx(F-1,e,I,s);L===n[V]&&(S[M++]=E[V])}if(e){const V=this.idx(F,e-1,I,s);L===n[V]&&(S[M++]=E[V])}}if(u>=18){if(e&&F){const V=this.idx(F-1,e-1,I,s);L===n[V]&&(S[M++]=E[V])}if(e&&F=y){y+=v;const V=new Uint32Array(y);V.set(T),T=V}T[A-1]=A,A++}}}for(let I=0;I.",inferenceDelay:100,description:"Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides the best accuracy with hard cropping for better speed"},{id:3,type:"Segmentation",path:"/models/model20chan3cls/model.json",modelName:"🔪 Tissue GWM (High Acc, Low Mem)",labelsPath:"./models/model20chan3cls/labels.json",colorsPath:"./models/model20chan3cls/colorLUT.json",colormapPath:"./models/model20chan3cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides high accuracy and fit low memory available but slower"},{id:4,type:"Atlas",path:"/models/model30chan18cls/model.json",modelName:"🪓 Subcortical + GWM (High Mem, Fast)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",colormapPath:"./models/model30chan18cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:5,type:"Atlas",path:"/models/model30chan18cls/model.json",modelName:"🪓 Subcortical + GWM (Low Mem, Slow)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",colormapPath:"./models/model30chan18cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:6,type:"Atlas",path:"/models/model18cls/model.json",modelName:"🪓 Subcortical + GWM (Low Mem, Faster)",labelsPath:"./models/model18cls/labels.json",colorsPath:"./models/model18cls/colorLUT.json",colormapPath:"./models/model18cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:7,type:"Atlas",path:"/models/model30chan18cls/model.json",modelName:"🔪🪓 Subcortical + GWM (Failsafe, Less Acc)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",colormapPath:"./models/model30chan18cls/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is not a robust model, it may work on low data quality, including varying saturation, and even clinical scans. It may work also on infant brains, but your mileage may vary."},{id:8,type:"Atlas",path:"/models/model30chan50cls/model.json",modelName:"🔪 Aparc+Aseg 50 (High Mem, Fast)",labelsPath:"./models/model30chan50cls/labels.json",colorsPath:"./models/model30chan50cls/colorLUT.json",colormapPath:"./models/model30chan50cls/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class."},{id:9,type:"Atlas",path:"/models/model30chan50cls/model.json",modelName:"🔪 Aparc+Aseg 50 (Low Mem, Slow)",labelsPath:"./models/model30chan50cls/labels.json",colorsPath:"./models/model30chan50cls/colorLUT.json",colormapPath:"./models/model30chan50cls/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time."},{id:10,type:"Brain_Extraction",path:"/models/model5_gw_ae/model.json",modelName:"⚡ Extract the Brain (FAST)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:18,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:null,inferenceDelay:100,description:"Extract the brain fast model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the failsafe version."},{id:11,type:"Brain_Extraction",path:"/models/model11_gw_ae/model.json",modelName:"🔪 Extract the Brain (High Acc, Slow)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Extract the brain high accuracy model operates on full T1 image in a single pass, but uses only 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than the fast version."},{id:12,type:"Brain_Masking",path:"/models/model5_gw_ae/model.json",modelName:"⚡ Brain Mask (FAST)",labelsPath:null,colorsPath:null,colormapPath:"./models/model5_gw_ae/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:17,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:null,inferenceDelay:100,description:"This fast masking model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than failsafe version."},{id:13,type:"Brain_Masking",path:"/models/model11_gw_ae/model.json",modelName:"🔪 Brain Mask (High Acc, Low Mem)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This masking model operates on full T1 image in a single pass, but uses 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than fast version."},{id:14,type:"Atlas",path:"/models/model21_104class/model.json",modelName:"🔪 Aparc+Aseg 104 (High Mem, Fast)",labelsPath:"./models/model21_104class/labels.json",colorsPath:"./models/model21_104class/colorLUT.json",colormapPath:"./models/model21_104class/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions."},{id:15,type:"Atlas",path:"/models/model21_104class/model.json",modelName:"🔪 Aparc+Aseg 104 (Low Mem, Slow)",labelsPath:"./models/model21_104class/labels.json",colorsPath:"./models/model21_104class/colorLUT.json",colormapPath:"./models/model21_104class/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time. "}];async function getModelNumParameters(a){let n=0;for(let s=0;se-F);const m=tensor1d(f),A=m.shape[0],v=Math.floor(A*n),y=Math.ceil(A*s)-1,T=m.slice(v,1),E=m.slice(y,1),S=(await T.array())[0],I=(await E.array())[0];return u.dispose(),m.dispose(),T.dispose(),E.dispose(),{qmin:S,qmax:I}}async function quantileNormalizeVolumeData(a,n=.05,s=.95){const{qmin:u,qmax:f}=await calculateQuantiles(a,n,s),m=scalar(u),A=scalar(f),v=a.sub(m).div(A.sub(m));return m.dispose(),A.dispose(),v}async function minMaxNormalizeVolumeData(a){const n=a.max(),s=a.min();return await a.sub(s).div(n.sub(s))}async function inferenceFullVolumeSeqCovLayer(a,n,s,u,f,m,A){window.alert("inferenceFullVolumeSeqCovLayer() is not dead code?")}async function inferenceFullVolume(a,n,s,u,f,m,A){window.alert("inferenceFullVolume() is not dead code?")}async function inferenceSubVolumes(a,n,s,u,f,m=null){window.alert("inferenceSubVolumes() is not dead code?")}async function tensor2LightBuffer(a,n){window.alert("tensor2LightBuffer() is not dead code?")}async function draw3dObjBoundingVolume(a){window.alert("draw3dObjBoundingVolume() is not dead code?")}async function argMaxLarge(a,n,s,u,f,m="float32"){window.alert("argMaxLarge() is not dead code?")}async function addZeroPaddingTo3dTensor(a,n=[1,1],s=[1,1],u=[1,1]){if(a.rank!==3)throw new Error("Tensor must be 3D");return a.pad([n,s,u])}async function removeZeroPaddingFrom3dTensor(a,n=1,s=1,u=1){if(a.rank!==3)throw new Error("Tensor must be 3D");const[f,m,A]=a.shape;return a.slice([n,s,u],[f-2*n,m-2*s,A-2*u])}async function resizeWithZeroPadding(a,n,s,u,f,m){const A=f[0],v=f[1],y=f[2],T=A+m[0]-1,E=v+m[1]-1,S=y+m[2]-1,I=s-T-1>0?s-T-1:0,e=u-E-1>0?u-E-1:0,F=n-S-1>0?n-S-1:0;return a.pad([[A,I],[v,e],[y,F]])}async function applyMriThreshold(a,n){const s=a.max(),u=s.mul(n),f=await u.data();return s.dispose(),u.dispose(),tidy(()=>a.clone().greater(f[0]))}async function binarizeVolumeDataTensor(a){return a.step(0)}async function generateBrainMask(a,n,s,u,f,m,A,v,y=!0){console.log("Generate Brain Masking ... ");let T=[];for(let F=0;F{const F="postProcessSlices3D() should be upgraded to BWLabeler";A(F,-1,F)}),console.log("Post processing done ")):console.log("Phase-1 Post processing disabled ... ");const S=new Array(T[0].length*T.length);let I=0;for(let F=0;F{const z=a.slice([0,0,0,0,L],[-1,-1,-1,-1,V-L]),b=n.slice([0,0,0,L,E],[-1,-1,-1,V-L,1]);return conv3d(z,b,u,f,"NDHWC",m)});if(e===null)e=B;else{const z=e.add(B);e.dispose(),B.dispose(),e=z}}}const F=e.add(I);if(e.dispose(),I.dispose(),T==null)T=F;else{const M=await concat$2([T,F],4);F.dispose(),T.dispose(),T=M}}return T}function processTensorInChunks(a,n,s){const A=a.shape[4],v=Math.ceil(A/s);let y=null;for(let T=0;Ta.slice([0,0,0,0,E],[-1,-1,-1,-1,I])),F=tidy(()=>n.slice([0,0,0,E,0],[-1,-1,-1,I,-1])),M=conv3d(e,F,1,0,"NDHWC",1);e.dispose(),F.dispose();const L=squeeze(M);if(M.dispose(),y===null)y=L;else{const V=y.add(L);y.dispose(),y!==L&&L.dispose(),y=V}tidy(()=>{matMul$1(zeros$1([1,1]),zeros$1([1,1]))})}return y}class SequentialConvLayer{constructor(n,s,u,f){this.model=n,this.outChannels=n.outputLayers[0].kernel.shape[4],this.chunkSize=s,this.isChannelLast=u,this.callbackUI=f}async apply(n){const s=ENV$4.get("WEBGL_DELETE_TEXTURE_THRESHOLD");ENV$4.set("WEBGL_DELETE_TEXTURE_THRESHOLD",0);const u=this;return new Promise(f=>{const m=performance.now(),A=u.model.layers[u.model.layers.length-1],v=A.getWeights()[0],y=A.getWeights()[1],T=u.isChannelLast?n.shape.slice(1,-1):n.shape.slice(2);let E=mul(ones(T),-1e4),S=zeros$1(T),I=0;console.log(" channel loop");const e=window.setInterval(async function(){engine().startScope(),console.log("=======================");const F=await memory();console.log(`| Number of Tensors: ${F.numTensors}`),console.log(`| Number of Data Buffers: ${F.numDataBuffers}`),console.log("Channel : ",I);const M=await tidy(()=>{const V=v.slice([0,0,0,0,I],[-1,-1,-1,-1,1]),B=y.slice([I],[1]),z=processTensorInChunks(n,V,Math.min(u.chunkSize,u.outChannels)).add(B),b=greater$2(z,E),W=where(b,z,E),q=where(b,fill$2(S.shape,I),S);return dispose([E,S,V,B,z,b]),tidy(()=>matMul$1(ones([1,1]),ones([1,1]))),[q,W]});console.log("=======================");const L=await memory();if(u.callbackUI(`Iteration ${I}`,I/u.outChannels),console.log(`Number of Tensors: ${L.numTensors}`),console.log(`Number of Data Buffers: ${L.numDataBuffers}`),console.log(`Megabytes In Use: ${(L.numBytes/1048576).toFixed(3)} MB`),L.unreliable&&console.log(`Unreliable: ${L.unreliable}`),typeof S<"u"&&S.dispose(),typeof E<"u"&&E.dispose(),S=keep(M[0]),E=keep(M[1]),engine().endScope(),I===u.outChannels-1){window.clearInterval(e),dispose(E);const B=performance.now()-m;console.log(`Execution time for output layer: ${B} milliseconds`),ENV$4.set("WEBGL_DELETE_TEXTURE_THRESHOLD",s),f(S)}else{I++;const V=S.shape,B=S.dataSync(),z=S.shape,b=E.dataSync();S.dispose(),E.dispose(),S=tensor(B,V),E=tensor(b,z)}await new Promise(V=>setTimeout(V,300))},0)})}}async function generateOutputSlicesV2(a,n,s,u,f,m,A,v,y,T){if(y.isPostProcessEnable){const I=new BWLabeler,e=new Uint32Array(n),F=26,M=!0,L=!0,[V,B]=I.bwlabel(a,e,F,M,L);for(let z=0;z0&&re<=1?e=await applyMriThreshold(u,re):(console.log("No valid crop threshold value"),e=await u.greater([0]).asType("bool"))}else e=await v.greater([0]).asType("bool");console.log(" mask_3d shape : ",e.shape);const F=await whereAsync(e);e.dispose();const M=F.arraySync();let L=m,V=0,B=A,z=0,b=f,W=0;for(let re=0;reM[re][0]?L=M[re][0]:VM[re][1]?B=M[re][1]:zM[re][2]?b=M[re][2]:Where'),memory().unreliable){const ae="unreliable reasons :"+memory().reasons;y(ae,NaN,ae)}}}async function inferenceFullVolumePhase2(a,n,s,u,f,m,A,v,y,T,E,S){let I=[];console.log(" ---- Start FullVolume inference phase-II ---- "),A.enableQuantileNorm?(console.log("preModel Quantile normalization enabled"),n=await quantileNormalizeVolumeData(n)):(console.log("preModel Min Max normalization enabled"),n=await minMaxNormalizeVolumeData(n));let F;if(m==null){const pe=A.autoThreshold;pe>0&&pe<=1?F=await applyMriThreshold(n,pe):(console.log("No valid crop threshold value"),F=await n.greater([0]).asType("bool"))}else F=m.greater([0]).asType("bool");console.log(" mask_3d shape : ",F.shape);const M=await whereAsync(F);F.dispose();const L=M.arraySync();let V=u,B=0,z=f,b=0,W=s,q=0;for(let pe=0;peL[pe][0]?V=L[pe][0]:BL[pe][1]?z=L[pe][1]:bL[pe][2]?W=L[pe][2]:qhere')}}async function inferenceFullVolumePhase1(a,n,s,u,f,m,A,v,y,T,E,S){if(v.No_SubVolumes=1,A.preModelId){const I=await load_model(y.rootURL+inferenceModelsList[A.preModelId-1].path),e=inferenceModelsList[A.preModelId-1].enableTranspose,F=inferenceModelsList[A.preModelId-1].enableQuantileNorm;let M=null;F?(console.log("preModel Quantile normalization enabled"),M=await quantileNormalizeVolumeData(n)):(console.log("preModel Min Max normalization enabled"),M=await minMaxNormalizeVolumeData(n)),e?(M=await M.transpose(),console.log("Input transposed for pre-model")):console.log("Transpose not enabled for pre-model"),v.Brainchop_Ver="PreModel_FV";const L=await I;try{const V=performance.now(),B=L,z=B.layers[0].batchInputShape;if(console.log(" Pre-Model batch input shape : ",z),z.length!==5){const Se="The pre-model input shape must be 5D ";return E(Se,-1,Se),0}const b=isModelChnlLast(B),W=y.batchSize,q=y.numOfChan;let Y,X,H,g;if(b){if(console.log("Pre-Model Channel Last"),isNaN(z[4])||z[4]!==1){const Se="The number of channels for pre-model input shape must be 1";return E(Se,-1,Se),0}Y=z[1],X=z[2],H=z[3],g=[W,Y,X,H,q]}else{if(console.log("Pre-Model Channel First"),isNaN(z[1])||z[1]!==1){const Se="The number of channels for pre-model input shape must be 1";return E(Se,-1,Se),0}Y=z[2],X=z[3],H=z[4],g=[W,q,Y,X,H]}v.Input_Shape=JSON.stringify(g),v.Output_Shape=JSON.stringify(B.output.shape),v.Channel_Last=b,v.Model_Param=await getModelNumParameters(B),v.Model_Layers=await getModelNumLayers(B);let J=0;const re=inferenceModelsList[A.preModelId-1].inferenceDelay;let ae=1;const fe=L.layers.length,pe=[];pe[0]=M.reshape(g),dispose(M);const me=window.setInterval(async function(){try{pe[ae]=L.layers[ae].apply(pe[ae-1])}catch(Se){const ve="Your graphics card (e.g. Intel) may not be compatible with WebGL. "+Se.message;return E(ve,-1,ve),window.clearInterval(me),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=Se.message,v.Extra_Err_Info="PreModel Failed while model layer "+ae+" apply",E("",-1,"",v),0}if(L.layers[ae].dispose(),pe[ae-1].dispose(),E("Layer "+ae.toString(),(ae+1)/fe),memory().unreliable){const Se="unreliable reasons :"+memory().reasons;E(Se,NaN,Se)}if(ae===fe-1){window.clearInterval(me);const Se=b?-1:1;console.log(" find argmax "),console.log("last Tensor shape : ",pe[ae].shape);const ve=b?pe[ae].shape[4]:pe[ae].shape[1];let we;try{console.log(" Try tf.argMax for fullVolume .."),we=await argMax$2(pe[ae],Se)}catch(pt){if(Se===-1)try{const It=performance.now();console.log(" tf.argMax failed .. try argMaxLarge ..");const Lt=tensor2LightBuffer(pe[ae].reshape([s,u,f,ve]),"float16");we=argMaxLarge(Lt,s,u,f,ve,"float16"),console.log("argMaxLarge for fullVolume takes : ",((performance.now()-It)/1e3).toFixed(4))}catch(It){const Lt="argMax buffer couldn't be created due to limited memory resources.";return E(Lt,-1,Lt),we.dispose(),window.clearInterval(me),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=It.message,v.Extra_Err_Info="preModel prediction_argmax from argMaxLarge failed",E("",-1,"",v),0}else{const It="argMax buffer couldn't be created due to limited memory resources.";return E(It,-1,It),we.dispose(),window.clearInterval(me),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=pt.message,v.Extra_Err_Info="preModel prediction_argmax from argMaxLarge not support yet channel first",E("",-1,"",v),0}}console.log(" Pre-model prediction_argmax shape : ",we.shape);const Ne=((performance.now()-V)/1e3).toFixed(4);dispose(pe[ae]),console.log(" Pre-model find array max ");const De=await we.max().dataSync()[0];Jhere')}}else console.log("--- No pre-model is selected ---"),console.log("------ Run voxel cropping ------"),m?A.enableSeqConv?(console.log("------ Seq Convoluton ------"),await inferenceFullVolumeSeqCovLayerPhase2(y,A,a,n,s,u,f,null,E,T,v,S)):inferenceFullVolumePhase2(a,n,s,u,f,null,A,v,y,T,E,S):inferenceSubVolumes(a,n,s,u,f,null)}async function enableProductionMode(a=!0){await enableProdMode(),env().set("DEBUG",!1),env().set("WEBGL_FORCE_F16_TEXTURES",a),env().set("WEBGL_DELETE_TEXTURE_THRESHOLD",0),await ready(),console.log("tf env() flags :",env().flags),console.log("tf env() features :",env().features),console.log("tf env total features: ",Object.keys(env().features).length),console.log(getBackend())}async function runInference(a,n,s,u,f,m){const A=[];A.startTime=Date.now(),m("Segmentation started",0),performance.now();const v=a.batchSize,y=a.numOfChan;if(isNaN(v)||v!==1){const H="The batch Size for input shape must be 1";return m(H,-1,H),0}if(isNaN(y)||y!==1){const H="The number of channels for input shape must be 1";return m(H,-1,H),0}engine().startScope(),console.log("Batch size: ",v),console.log("Num of Channels: ",y);const T=await load_model(a.rootURL+n.path);await enableProductionMode(!0),A.TF_Backend=getBackend();const E=T;let S=[];if(S=E.layers[0].batchInputShape,console.log(" Model batch input shape : ",S),S.length!==5){const H="The model input shape must be 5D";return m(H,-1,H),0}let I,e,F;const M=s.dims[1],L=s.dims[2],V=s.dims[3];if(await isModelChnlLast(E)){if(console.log("Model Channel Last"),isNaN(S[4])||S[4]!==1){const H="The number of channels for input shape must be 1";return m(H,-1,H),0}I=S[1],e=S[2],F=S[3]}else{if(console.log("Model Channel First"),isNaN(S[1])||S[1]!==1){const H="The number of channels for input shape must be 1";return m(H,-1,H),0}I=S[2],e=S[3],F=S[4]}let z;I===256&&e===256&&F===256?z=!0:z=!1,A.isModelFullVol=z;let b=await getAllSlicesData1D(V,s,u);const W=await getAllSlices2D(b,L,M);b=null;let q=await getSlices3D(W);dispose(W);const Y=n.enableTranspose,X=n.enableCrop;z&&(X?await inferenceFullVolumePhase1(T,q,V,L,M,z,n,A,a,f,m,u):(console.log("Cropping Disabled"),Y?(q=q.transpose(),console.log("Input transposed")):console.log("Transpose NOT Enabled"),n.enableSeqConv?(console.log("Seq Convoluton Enabled"),await inferenceFullVolumeSeqCovLayer()):(console.log("Seq Convoluton Disabled"),await inferenceFullVolume())))}async function detectBrowser(){return navigator.userAgent.indexOf("OPR/")>-1?"Opera":navigator.userAgent.indexOf("Edg/")>-1?"Edge":navigator.userAgent.indexOf("Falkon/")>-1?"Falkon":navigator.userAgent.indexOf("Chrome/")>-1?"Chrome":navigator.userAgent.indexOf("Firefox/")>-1?"Firefox":navigator.userAgent.indexOf("Safari/")>-1?"Safari":navigator.userAgent.indexOf("MSIE/")>-1||navigator.userAgent.indexOf("rv:")>-1?"IExplorer":"Unknown"}async function detectBrowserVersion(){return navigator.userAgent.indexOf("OPR/")>-1?parseInt(navigator.userAgent.split("OPR/")[1]):navigator.userAgent.indexOf("Edg/")>-1?parseInt(navigator.userAgent.split("Edg/")[1]):navigator.userAgent.indexOf("Falkon/")>-1?parseInt(navigator.userAgent.split("Falkon/")[1]):navigator.userAgent.indexOf("Chrome/")>-1?parseInt(navigator.userAgent.split("Chrome/")[1]):navigator.userAgent.indexOf("Firefox/")>-1?parseInt(navigator.userAgent.split("Firefox/")[1]):navigator.userAgent.indexOf("Safari/")>-1?parseInt(navigator.userAgent.split("Safari/")[1]):navigator.userAgent.indexOf("MSIE/")>-1||navigator.userAgent.indexOf("rv:")>-1?parseInt(navigator.userAgent.split("MSIE/")[1]):1/0}async function detectOperatingSys(){return navigator.userAgent.indexOf("Win")>-1?"Windows":navigator.userAgent.indexOf("Mac")>-1?"MacOS":navigator.userAgent.indexOf("Linux")>-1?"Linux":navigator.userAgent.indexOf("UNIX")>-1?"UNIX":"Unknown"}async function checkWebGl2(a){return a?(console.log("WebGl2 is enabled"),!0):(console.log(typeof WebGL2RenderingContext<"u"?"WebGL2 may be disabled. Please try updating video card drivers":"WebGL2 is not supported"),!1)}async function detectGPUVendor(a){let n;if(a&&(n=a.getExtension("WEBGL_debug_renderer_info"),n)){const s=a.getParameter(n.UNMASKED_VENDOR_WEBGL);return s.indexOf("(")>-1&&s.indexOf(")")>-1?s.substring(s.indexOf("(")+1,s.indexOf(")")):s}return null}async function detectGPUVendor_v0(a){if(a){const n=a.getExtension("WEBGL_debug_renderer_info");return n?a.getParameter(n.UNMASKED_VENDOR_WEBGL):null}else return null}async function detectGPUCardType_v0(a){if(a){if(detectBrowser()==="Firefox")return a.getParameter(a.RENDERER);const n=a.getExtension("WEBGL_debug_renderer_info");return n?a.getParameter(n.UNMASKED_RENDERER_WEBGL):null}else return null}async function detectGPUCardType(a){let n;if(a){if(detectBrowser()==="Firefox")return a.getParameter(a.RENDERER);if(n=a.getExtension("WEBGL_debug_renderer_info"),n){let s=a.getParameter(n.UNMASKED_RENDERER_WEBGL);return s.indexOf("(")>-1&&s.indexOf(")")>-1&&s.indexOf("(R)")===-1&&(s=s.substring(s.indexOf("(")+1,s.indexOf(")")),s.split(",").length===3)?s.split(",")[1].trim():s}}return null}async function getCPUNumCores(){return navigator.hardwareConcurrency}async function isChrome(){return/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor)}async function localSystemDetails(a,n=null){const s=new Date;if(a.isModelFullVol?a.Brainchop_Ver="FullVolume":a.Brainchop_Ver="SubVolumes",a.Total_t=(Date.now()-a.startTime)/1e3,delete a.startTime,a.Date=parseInt(s.getMonth()+1)+"/"+s.getDate()+"/"+s.getFullYear(),a.Browser=await detectBrowser(),a.Browser_Ver=await detectBrowserVersion(),a.OS=await detectOperatingSys(),a.WebGL2=await checkWebGl2(n),a.GPU_Vendor=await detectGPUVendor(n),a.GPU_Card=await detectGPUCardType(n),a.GPU_Vendor_Full=await detectGPUVendor_v0(n),a.GPU_Card_Full=await detectGPUCardType_v0(n),a.CPU_Cores=await getCPUNumCores(),a.Which_Brainchop="latest",await isChrome()&&(a.Heap_Size_MB=window.performance.memory.totalJSHeapSize/(1024*1024).toFixed(2),a.Used_Heap_MB=window.performance.memory.usedJSHeapSize/(1024*1024).toFixed(2),a.Heap_Limit_MB=window.performance.memory.jsHeapSizeLimit/(1024*1024).toFixed(2)),n){console.log("MAX_TEXTURE_SIZE :",n.getParameter(n.MAX_TEXTURE_SIZE)),console.log("MAX_RENDERBUFFER_SIZE :",n.getParameter(n.MAX_RENDERBUFFER_SIZE));const u=n.getExtension("WEBGL_debug_renderer_info");console.log("VENDOR WEBGL:",n.getParameter(u.UNMASKED_VENDOR_WEBGL)),a.Texture_Size=n.getParameter(n.MAX_TEXTURE_SIZE)}else a.Texture_Size=null;return a}function WorkerWrapper(a){return new Worker(""+new URL("brainchop-webworker-DDjdSg4F.js",import.meta.url).href,{name:a==null?void 0:a.name})}async function main(){dragMode.onchange=async function(){S.opts.dragMode=this.selectedIndex},drawDrop.onchange=async function(){if(S.volumes.length<2){window.alert("No segmentation open (use the Segmentation pull down)"),drawDrop.selectedIndex=-1;return}if(!S.drawBitmap){window.alert("No drawing (hint: use the Draw pull down to select a pen)"),drawDrop.selectedIndex=-1;return}const I=parseInt(this.value);if(I===0){S.drawUndo(),drawDrop.selectedIndex=-1;return}let e=S.volumes[1].img,F=await S.saveImage({filename:"",isSaveDrawing:!0});const M=352,L=F.length;if(I===1)for(let V=0;V0&&(e[V]=1);if(I===2)for(let V=0;V0&&(e[V]=0);S.closeDrawing(),S.updateGLVolume(),S.setDrawingEnabled(!1),penDrop.selectedIndex=-1,drawDrop.selectedIndex=-1},penDrop.onchange=async function(){const I=parseInt(this.value);S.setDrawingEnabled(I>=0),I>=0&&S.setPenValue(I&7,I>7)},aboutBtn.onclick=function(){window.alert("Drag and drop NIfTI images. Use pulldown menu to choose brainchop model")},diagnosticsBtn.onclick=function(){if(T.length<1){window.alert("No diagnostic string generated: run a model to create diagnostics");return}navigator.clipboard.writeText(T),window.alert(`Diagnostics copied to clipboard
`+T)},opacitySlider0.oninput=function(){S.setOpacity(0,opacitySlider0.value/255),S.updateGLVolume()},opacitySlider1.oninput=function(){S.setOpacity(1,opacitySlider1.value/255)};async function a(){const I=S.volumes[0];let e=I.dims[1]===256&&I.dims[2]===256&&I.dims[3]===256;if((I.permRAS[0]!==-1||I.permRAS[1]!==3||I.permRAS[2]!==-2)&&(e=!1),e)return;const F=await S.conform(I,!1);await S.removeVolume(S.volumes[0]),await S.addVolume(F)}async function n(){for(;S.volumes.length>1;)await S.removeVolume(S.volumes[1])}modelSelect.onchange=async function(){this.selectedIndex<0&&(modelSelect.selectedIndex=11),await n(),await a();const I=inferenceModelsList[this.selectedIndex],e=brainChopOpts;if(e.rootURL=location.href,!!(window.location.hostname==="localhost"||window.location.hostname==="[::1]"||window.location.hostname.match(/^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/))&&(e.rootURL=location.protocol+"//"+location.host),workerCheck.checked){if(typeof E<"u"){console.log("Unable to start new segmentation: previous call has not completed");return}E=await new WorkerWrapper({type:"module"});const M={datatypeCode:S.volumes[0].hdr.datatypeCode,dims:S.volumes[0].hdr.dims},L={opts:e,modelEntry:I,niftiHeader:M,niftiImage:S.volumes[0].img};E.postMessage(L),E.onmessage=function(V){const B=V.data.cmd;B==="ui"&&(V.data.modalMessage!==""&&(E.terminate(),E=void 0),A(V.data.message,V.data.progressFrac,V.data.modalMessage,V.data.statData)),B==="img"&&(E.terminate(),E=void 0,f(V.data.img,V.data.opts,V.data.modelEntry))}}else runInference(e,I,S.volumes[0].hdr,S.volumes[0].img,f,A)},saveBtn.onclick=function(){S.volumes[1].saveToDisk("Custom.nii")},workerCheck.onchange=function(){modelSelect.onchange()},clipCheck.onchange=function(){clipCheck.checked?S.setClipPlane([0,0,90]):S.setClipPlane([2,0,90])};function s(){opacitySlider0.oninput()}async function u(I){return await(await fetch(I)).json()}async function f(I,e,F){n();const M=await S.volumes[0].clone();if(M.zeroImage(),M.hdr.scl_inter=0,M.hdr.scl_slope=1,M.img=new Uint8Array(I),F.colormapPath){const L=await u(F.colormapPath);M.setColormapLabel(L),M.hdr.intent_code=1002}else{let L=e.atlasSelectedColorTable.toLowerCase();S.colormaps().includes(L)||(L="actc"),M.colormap=L}M.opacity=opacitySlider1.value/255,await S.addVolume(M)}async function m(I){(typeof I=="string"||I instanceof String)&&(I=function(F){const M=JSON.parse(F),L=[];for(const V in M)L[V]=M[V];return L}(I)),I=await localSystemDetails(I,S.gl),T=`:: Diagnostics can help resolve issues https://github.com/neuroneural/brainchop/issues ::
`;for(const e in I)T+=e+": "+I[e]+`
-`}function A(I="",e=-1,F="",M=[]){I!==""&&(console.log(I),document.getElementById("location").innerHTML=I),isNaN(e)?(memstatus.style.color="red",memstatus.innerHTML="Memory Issue"):e>=0&&(modelProgress.value=e*modelProgress.max),F!==""&&window.alert(F),Object.keys(M).length>0&&m(M)}function v(I){document.getElementById("location").innerHTML=" "+I.string}const y={backColor:[.4,.4,.4,1],show3Dcrosshair:!0,onLocationChange:v};let T="",E;const S=new Niivue(y);S.attachToCanvas(gl1),S.opts.dragMode=S.dragModes.pan,S.opts.multiplanarForceRender=!0,S.opts.yoke3Dto2DZoom=!0,S.opts.crosshairGap=11,smoothCheck.onchange(),await S.loadVolumes([{url:"./t1_crop.nii.gz"}]);for(let I=0;I=0&&(modelProgress.value=e*modelProgress.max),F!==""&&window.alert(F),Object.keys(M).length>0&&m(M)}function v(I){document.getElementById("location").innerHTML=" "+I.string}const y={backColor:[.4,.4,.4,1],show3Dcrosshair:!0,onLocationChange:v};let T="",E;const S=new Niivue(y);S.attachToCanvas(gl1),S.opts.dragMode=S.dragModes.pan,S.opts.multiplanarForceRender=!0,S.opts.yoke3Dto2DZoom=!0,S.opts.crosshairGap=11,S.setInterpolation(!0),await S.loadVolumes([{url:"./t1_crop.nii.gz"}]);for(let I=0;I
Niivue brain chop
-
+