diff --git a/assets/index-Bs1I3iM_.js b/assets/index-mU6FTJQb.js similarity index 99% rename from assets/index-Bs1I3iM_.js rename to assets/index-mU6FTJQb.js index a4f12e2..4a02c1b 100644 --- a/assets/index-Bs1I3iM_.js +++ b/assets/index-mU6FTJQb.js @@ -20842,4 +20842,4 @@ return a / b;`,DIV_PACKED=` * limitations under the License. * ============================================================================= */const kernelConfigs=[LRNConfig,LRNGradConfig,_fusedMatMulConfig,absConfig,acosConfig,acoshConfig,addConfig,addNConfig,allConfig,anyConfig,argMaxConfig,argMinConfig,asinConfig,asinhConfig,atan2Config,atanConfig,atanhConfig,avgPool3DConfig,avgPoolConfig,avgPoolGrad3DConfig,avgPoolGradConfig,batchMatMulConfig,batchNormConfig,batchToSpaceNDConfig,bincountConfig,castConfig,ceilConfig,clipByValueConfig,complexAbsConfig,complexConfig,concatConfig,conv2DBackpropFilterConfig,conv2DBackpropInputConfig,conv2DConfig,conv3DBackpropFilterV2Config,conv3DBackpropInputConfig,conv3DConfig,cosConfig,coshConfig,cropAndResizeConfig,cumsumConfig,denseBincountConfig,depthToSpaceConfig,depthwiseConv2dNativeBackpropFilterConfig,depthwiseConv2dNativeBackpropInputConfig,depthwiseConv2dNativeConfig,diagConfig,dilation2DConfig,einsumConfig,eluConfig,eluGradConfig,equalConfig,erfConfig,expConfig,expandDimsConfig,expm1Config,fftConfig,fillConfig,flipLeftRightConfig,floorConfig,floorDivConfig,fromPixelsConfig,fusedConv2DConfig,fusedDepthwiseConv2DConfig,gatherNdConfig,gatherV2Config,greaterConfig,greaterEqualConfig,identityConfig,ifftConfig,imagConfig,isFiniteConfig,isInfConfig,isNaNConfig,leakyReluConfig,lessConfig,lessEqualConfig,linSpaceConfig,log1pConfig,logConfig,logicalAndConfig,logicalNotConfig,logicalOrConfig,maxConfig,maxPool3DConfig,maxPoolConfig,maxPoolGrad3DConfig,maxPoolGradConfig,maxPoolWithArgmaxConfig,maximumConfig,meanConfig,minConfig,minimumConfig,mirrorPadConfig,modConfig,multinomialConfig,multiplyConfig,negConfig,nonMaxSuppressionV3Config,nonMaxSuppressionV4Config,nonMaxSuppressionV5Config,notEqualConfig,oneHotConfig,onesLikeConfig,packConfig,padV2Config,powConfig,preluConfig,prodConfig,rangeConfig,realConfig,realDivConfig,reciprocalConfig,relu6Config,reluConfig,reshapeConfig,resizeBilinearConfig,resizeBilinearGradConfig,resizeNearestNeighborConfig,resizeNearestNeighborGradConfig,reverseConfig,rotateWithOffsetConfig,roundConfig,rsqrtConfig,scatterNdConfig,selectConfig,seluConfig,sigmoidConfig,signConfig,sinConfig,sinhConfig,sliceConfig,softmaxConfig,softplusConfig,spaceToBatchNDConfig,sparseFillEmptyRowsConfig,sparseReshapeConfig,sparseSegmentMeanConfig,sparseSegmentSumConfig,sparseToDenseConfig,splitVConfig,sqrtConfig,squareConfig,squaredDifferenceConfig,stepConfig,stridedSliceConfig,stringNGramsConfig,stringSplitConfig,stringToHashBucketFastConfig,subConfig,sumConfig,tanConfig,tanhConfig,tileConfig,topKConfig,transformConfig,transposeConfig,uniqueConfig,unpackConfig,unsortedSegmentSumConfig,zerosLikeConfig];for(const u of kernelConfigs)registerKernel(u);class BWLabeler{idx(n,s,c,f){return c*f[0]*f[1]+s*f[0]+n}check_previous_slice(n,s,c,f,g,A,v,y){const T=new Uint32Array(27);let I=0;if(!g)return 0;const S=n[this.idx(c,f,g,A)];if(v>=6){const k=this.idx(c,f,g-1,A);S===n[k]&&(T[I++]=s[k])}if(v>=18){if(c){const k=this.idx(c-1,f,g-1,A);S===n[k]&&(T[I++]=s[k])}if(f){const k=this.idx(c,f-1,g-1,A);S===n[k]&&(T[I++]=s[k])}if(c=6){if(k){const M=this.idx(k-1,S,I,s);N===n[M]&&(T[e++]=y[M])}if(S){const M=this.idx(k,S-1,I,s);N===n[M]&&(T[e++]=y[M])}}if(c>=18){if(S&&k){const M=this.idx(k-1,S-1,I,s);N===n[M]&&(T[e++]=y[M])}if(S&&k=A){A+=g;const M=new Uint32Array(A);M.set(v),v=M}v[f-1]=f,f++}}}for(let I=0;I100){console.log(` -Ooh no!!`);break}g[y]=T,v=Math.min(v,T)}for(let y=0;y.",inferenceDelay:100,description:"Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides the best accuracy with hard cropping for better speed"},{id:3,type:"Segmentation",path:"./models/model20chan3cls/model.json",modelName:"🔪 Tissue GWM (High Acc, Low Mem)",labelsPath:"./models/model20chan3cls/labels.json",colorsPath:"./models/model20chan3cls/colorLUT.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides high accuracy and fit low memory available but slower"},{id:4,type:"Atlas",path:"./models/model30chan18cls/model.json",modelName:"🪓 Subcortical + GWM (High Mem, Fast)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:5,type:"Atlas",path:"./models/model30chan18cls/model.json",modelName:"🪓 Subcortical + GWM (Low Mem, Slow)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:6,type:"Atlas",path:"./models/model18cls/model.json",modelName:"🪓 Subcortical + GWM (Low Mem, Faster)",labelsPath:"./models/model18cls/labels.json",colorsPath:"./models/model18cls/colorLUT.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:7,type:"Atlas",path:"./models/model30chan18cls/model.json",modelName:"🔪🪓 Subcortical + GWM (Failsafe, Less Acc)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is not a robust model, it may work on low data quality, including varying saturation, and even clinical scans. It may work also on infant brains, but your mileage may vary."},{id:8,type:"Atlas",path:"./models/model30chan50cls/model.json",modelName:"🔪 Aparc+Aseg 50 (High Mem, Fast)",labelsPath:"./models/model30chan50cls/labels.json",colorsPath:"./models/model30chan50cls/colorLUT.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class."},{id:9,type:"Atlas",path:"./models/model30chan50cls/model.json",modelName:"🔪 Aparc+Aseg 50 (Low Mem, Slow)",labelsPath:"./models/model30chan50cls/labels.json",colorsPath:"./models/model30chan50cls/colorLUT.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time."},{id:10,type:"Brain_Extraction",path:"./models/model5_gw_ae/model.json",modelName:"⚡ Extract the Brain (FAST)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:2,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:null,inferenceDelay:100,description:"Extract the brain fast model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the failsafe version."},{id:11,type:"Brain_Extraction",path:"./models/model11_gw_ae/model.json",modelName:"🔪 Extract the Brain (High Acc, Slow)",labelsPath:null,colorsPath:null,preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:2,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Extract the brain high accuracy model operates on full T1 image in a single pass, but uses only 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than the fast version."},{id:12,type:"Brain_Masking",path:"./models/model5_gw_ae/model.json",modelName:"⚡ Brain Mask (FAST)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:2,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:null,inferenceDelay:100,description:"This fast masking model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than failsafe version."},{id:13,type:"Brain_Masking",path:"./models/model11_gw_ae/model.json",modelName:"🔪 Brain Mask (High Acc, Low Mem)",labelsPath:null,colorsPath:null,preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:2,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This masking model operates on full T1 image in a single pass, but uses 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than fast version."},{id:14,type:"Atlas",path:"./models/model21_104class/model.json",modelName:"🔪 Aparc+Aseg 104 (High Mem, Fast)",labelsPath:"./models/model21_104class/labels.json",colorsPath:"./models/model21_104class/colorLUT.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions."},{id:15,type:"Atlas",path:"./models/model21_104class/model.json",modelName:"🔪 Aparc+Aseg 104 (Low Mem, Slow)",labelsPath:"./models/model21_104class/labels.json",colorsPath:"./models/model21_104class/colorLUT.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time. "}];async function checkZero(u){return u<10?u:"0"+u}async function detectBrowser(){return navigator.userAgent.indexOf("OPR/")>-1?"Opera":navigator.userAgent.indexOf("Edg/")>-1?"Edge":navigator.userAgent.indexOf("Falkon/")>-1?"Falkon":navigator.userAgent.indexOf("Chrome/")>-1?"Chrome":navigator.userAgent.indexOf("Firefox/")>-1?"Firefox":navigator.userAgent.indexOf("Safari/")>-1?"Safari":navigator.userAgent.indexOf("MSIE/")>-1||navigator.userAgent.indexOf("rv:")>-1?"IExplorer":"Unknown"}async function detectBrowserVersion(){return navigator.userAgent.indexOf("OPR/")>-1?parseInt(navigator.userAgent.split("OPR/")[1]):navigator.userAgent.indexOf("Edg/")>-1?parseInt(navigator.userAgent.split("Edg/")[1]):navigator.userAgent.indexOf("Falkon/")>-1?parseInt(navigator.userAgent.split("Falkon/")[1]):navigator.userAgent.indexOf("Chrome/")>-1?parseInt(navigator.userAgent.split("Chrome/")[1]):navigator.userAgent.indexOf("Firefox/")>-1?parseInt(navigator.userAgent.split("Firefox/")[1]):navigator.userAgent.indexOf("Safari/")>-1?parseInt(navigator.userAgent.split("Safari/")[1]):navigator.userAgent.indexOf("MSIE/")>-1||navigator.userAgent.indexOf("rv:")>-1?parseInt(navigator.userAgent.split("MSIE/")[1]):1/0}async function detectOperatingSys(){return navigator.userAgent.indexOf("Win")>-1?"Windows":navigator.userAgent.indexOf("Mac")>-1?"MacOS":navigator.userAgent.indexOf("Linux")>-1?"Linux":navigator.userAgent.indexOf("UNIX")>-1?"UNIX":"Unknown"}async function checkWebGl2(u){if(document.createElement("canvas").getContext("webgl2"))return console.log("WebGl2 is enabled"),!0;if(typeof WebGL2RenderingContext<"u"){const s="WebGL2 may be disabled. Please try updating video card drivers";u(s,-1,s)}else console.log("WebGL2 is not supported");return!1}async function detectGPUVendor(){const u=document.createElement("canvas").getContext("webgl");let n;if(u&&(n=u.getExtension("WEBGL_debug_renderer_info"),n)){const s=u.getParameter(n.UNMASKED_VENDOR_WEBGL);return s.indexOf("(")>-1&&s.indexOf(")")>-1?s.substring(s.indexOf("(")+1,s.indexOf(")")):s}return null}async function detectGPUVendor_v0(){const u=document.createElement("canvas").getContext("webgl");if(u){const n=u.getExtension("WEBGL_debug_renderer_info");return n?u.getParameter(n.UNMASKED_VENDOR_WEBGL):null}else return null}async function detectGPUCardType_v0(){const u=document.createElement("canvas").getContext("webgl");if(u){if(detectBrowser()==="Firefox")return u.getParameter(u.RENDERER);const n=u.getExtension("WEBGL_debug_renderer_info");return n?u.getParameter(n.UNMASKED_RENDERER_WEBGL):null}else return null}async function detectGPUCardType(){const u=document.createElement("canvas").getContext("webgl");let n;if(u){if(detectBrowser()==="Firefox")return u.getParameter(u.RENDERER);if(n=u.getExtension("WEBGL_debug_renderer_info"),n){let s=u.getParameter(n.UNMASKED_RENDERER_WEBGL);return s.indexOf("(")>-1&&s.indexOf(")")>-1&&s.indexOf("(R)")===-1&&(s=s.substring(s.indexOf("(")+1,s.indexOf(")")),s.split(",").length===3)?s.split(",")[1].trim():s}}return null}async function getCPUNumCores(){return navigator.hardwareConcurrency}async function isChrome(){return/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor)}async function submitTiming2GoogleSheet(u,n){if(navigator.onLine){const s="Telemetry not yet supported";n(s,-1,s),console.log(u)}else console.log(" Offline Mode ")}async function getModelNumParameters(u){let n=0;for(let s=0;se-N);const g=tensor1d(f),A=g.shape[0],v=Math.floor(A*n),y=Math.ceil(A*s)-1,T=g.slice(v,1),I=g.slice(y,1),S=(await T.array())[0],k=(await I.array())[0];return c.dispose(),g.dispose(),T.dispose(),I.dispose(),{qmin:S,qmax:k}}async function quantileNormalizeVolumeData(u,n=.05,s=.95){const{qmin:c,qmax:f}=await calculateQuantiles(u,n,s),g=scalar(c),A=scalar(f),v=u.sub(g).div(A.sub(g));return g.dispose(),A.dispose(),v}async function minMaxNormalizeVolumeData(u){const n=u.max(),s=u.min();return await u.sub(s).div(n.sub(s))}async function findArrayMax(u){return u.reduce((n,s)=>n>s?n:s)}async function inferenceFullVolumeSeqCovLayer(u,n,s,c,f,g,A){window.alert("inferenceFullVolumeSeqCovLayer() is not dead code?")}async function inferenceFullVolume(u,n,s,c,f,g,A){window.alert("inferenceFullVolume() is not dead code?")}async function inferenceSubVolumes(u,n,s,c,f,g=null){window.alert("inferenceSubVolumes() is not dead code?")}async function tensor2LightBuffer(u,n){window.alert("tensor2LightBuffer() is not dead code?")}async function draw3dObjBoundingVolume(u){window.alert("draw3dObjBoundingVolume() is not dead code?")}async function argMaxLarge(u,n,s,c,f,g="float32"){window.alert("argMaxLarge() is not dead code?")}async function addZeroPaddingTo3dTensor(u,n=[1,1],s=[1,1],c=[1,1]){if(u.rank!==3)throw new Error("Tensor must be 3D");return u.pad([n,s,c])}async function removeZeroPaddingFrom3dTensor(u,n=1,s=1,c=1){if(u.rank!==3)throw new Error("Tensor must be 3D");let f,g,A;return[f,g,A]=u.shape,u.slice([n,s,c],[f-2*n,g-2*s,A-2*c])}async function resizeWithZeroPadding(u,n,s,c,f,g){const A=f[0],v=f[1],y=f[2],T=A+g[0]-1,I=v+g[1]-1,S=y+g[2]-1,k=s-T-1>0?s-T-1:0,e=c-I-1>0?c-I-1:0,N=n-S-1>0?n-S-1:0;return u.pad([[A,k],[v,e],[y,N]])}async function applyMriThreshold(u,n){const s=u.max(),c=s.mul(n),f=await c.data();return s.dispose(),c.dispose(),tidy(()=>u.clone().greater(f[0]))}async function binarizeVolumeDataTensor(u){return u.step(0)}async function generateBrainMask(u,n,s,c,f,g,A,v){console.log("Generate Brain Masking ... ");let y=[];for(let k=0;k{const k="postProcessSlices3D() should be upgraded to BWLabeler";A(k,-1,k)}),console.log("Post processing done ")):console.log("Phase-1 Post processing disabled ... ");const I=[];for(let k=0;k{const V=u.slice([0,0,0,0,O],[-1,-1,-1,-1,z-O]),b=n.slice([0,0,0,O,I],[-1,-1,-1,z-O,1]);return conv3d(V,b,c,f,"NDHWC",g)});if(e===null)e=B;else{const V=e.add(B);e.dispose(),B.dispose(),e=V}}}const N=e.add(k);if(e.dispose(),k.dispose(),T==null)T=N;else{const M=await concat$2([T,N],4);N.dispose(),T.dispose(),T=M}}return T}function processTensorInChunks(u,n,s){const A=u.shape[4],v=Math.ceil(A/s);let y=null;for(let T=0;Tu.slice([0,0,0,0,I],[-1,-1,-1,-1,k])),N=tidy(()=>n.slice([0,0,0,I,0],[-1,-1,-1,k,-1])),M=conv3d(e,N,1,0,"NDHWC",1);e.dispose(),N.dispose();const O=squeeze(M);if(M.dispose(),y===null)y=O;else{const z=y.add(O);y.dispose(),y!==O&&O.dispose(),y=z}tidy(()=>{matMul$1(zeros$1([1,1]),zeros$1([1,1]))})}return y}class SequentialConvLayer{constructor(n,s,c,f){this.model=n,this.outChannels=n.outputLayers[0].kernel.shape[4],this.chunkSize=s,this.isChannelLast=c,this.callbackUI=f}async apply(n){const s=ENV$2.get("WEBGL_DELETE_TEXTURE_THRESHOLD");ENV$2.set("WEBGL_DELETE_TEXTURE_THRESHOLD",0);const c=this;return new Promise(f=>{const g=performance.now(),A=c.model.layers[c.model.layers.length-1],v=A.getWeights()[0],y=A.getWeights()[1],T=c.isChannelLast?n.shape.slice(1,-1):n.shape.slice(2);let I=mul(ones(T),-1e4),S=zeros$1(T),k=0;console.log(" channel loop");const e=window.setInterval(async function(){engine().startScope(),console.log("=======================");const N=memory();console.log(`| Number of Tensors: ${N.numTensors}`),console.log(`| Number of Data Buffers: ${N.numDataBuffers}`),console.log("Channel : ",k);const M=await tidy(()=>{const B=v.slice([0,0,0,0,k],[-1,-1,-1,-1,1]),V=y.slice([k],[1]),b=processTensorInChunks(n,B,Math.min(c.chunkSize,c.outChannels)).add(V),W=greater$2(b,I),q=where(W,b,I),Y=where(W,fill$2(S.shape,k),S);return dispose([I,S,B,V,b,W]),tidy(()=>matMul$1(ones([1,1]),ones([1,1]))),[Y,q]}),O=memory();console.log(`| Number of Tensors: ${O.numTensors}`),console.log(`| Number of Data Buffers: ${O.numDataBuffers}`),console.log("=======================");const z=memory();if(c.callbackUI(`Iteration ${k}`,k/c.outChannels),console.log(`Number of Tensors: ${z.numTensors}`),console.log(`Number of Data Buffers: ${z.numDataBuffers}`),console.log(`Bytes In Use: ${z.numBytes}`),console.log(`Megabytes In Use: ${(z.numBytes/1048576).toFixed(3)} MB`),z.unreliable&&console.log(`Unreliable: ${z.unreliable}`),typeof S<"u"&&S.dispose(),typeof I<"u"&&I.dispose(),S=keep(M[0]),I=keep(M[1]),engine().endScope(),k===c.outChannels-1){window.clearInterval(e),dispose(I);const V=performance.now()-g;console.log(`Execution time for output layer: ${V} milliseconds`),ENV$2.set("WEBGL_DELETE_TEXTURE_THRESHOLD",s),f(S)}else{k++;const B=S.shape,V=S.dataSync(),b=S.shape,W=I.dataSync();S.dispose(),I.dispose(),S=tensor(V,B),I=tensor(W,b)}await new Promise(B=>setTimeout(B,300))},0)})}}async function generateOutputSlicesV2(u,n,s,c,f,g,A,v,y,T){if(y.isPostProcessEnable){const k=new BWLabeler,e=new Uint32Array(n),N=26,M=!0,O=!0,[z,B]=k.bwlabel(u,e,N,M,O);for(let V=0;V0&&ee<=1?e=await applyMriThreshold(c,ee):(console.log("No valid crop threshold value"),e=await c.greater([0]).asType("bool"))}else e=await v.greater([0]).asType("bool");console.log(" mask_3d shape : ",e.shape);const N=await whereAsync(e);e.dispose();const M=N.arraySync();let O=g,z=0,B=A,V=0,b=f,W=0;for(let ee=0;eeM[ee][0]?O=M[ee][0]:zM[ee][1]?B=M[ee][1]:VM[ee][2]?b=M[ee][2]:W{Tt=await ft.apply(ye[Se])});if(console.log("profileInfo : ",Lt),dispose(ye[Se]),console.log(" Output tensor",Tt),console.log(" Output tensor shape : ",Tt.shape),Tt.shape.length!==3){const be="Output tensor shape should be 3 dims but it is "+Tt.shape.length;y(be,-1,be)}const Ot=((performance.now()-ee)/1e3).toFixed(4);console.log(" find array max ");const Ft=await findArrayMax(Array.from(Tt.dataSync()));fehere'),memory().unreliable){const oe="unreliable reasons :"+memory().reasons;y(oe,NaN,oe)}}}async function inferenceFullVolumePhase2(u,n,s,c,f,g,A,v,y,T,I,S){let k=[];console.log(" ---- Start FullVolume inference phase-II ---- "),A.enableQuantileNorm?(console.log("preModel Quantile normalization enabled"),n=await quantileNormalizeVolumeData(n)):(console.log("preModel Min Max normalization enabled"),n=await minMaxNormalizeVolumeData(n));let N;if(g==null){const me=A.autoThreshold;me>0&&me<=1?N=await applyMriThreshold(n,me):(console.log("No valid crop threshold value"),N=await n.greater([0]).asType("bool"))}else N=g.greater([0]).asType("bool");console.log(" mask_3d shape : ",N.shape);const M=await whereAsync(N);N.dispose();const O=M.arraySync();let z=c,B=0,V=f,b=0,W=s,q=0;for(let me=0;meO[me][0]?z=O[me][0]:BO[me][1]?V=O[me][1]:bO[me][2]?W=O[me][2]:qhere')}}async function inferenceFullVolumePhase1(u,n,s,c,f,g,A,v,y,T,I,S){if(v.No_SubVolumes=1,A.preModelId){const k=await load_model(inferenceModelsList[A.preModelId-1].path),e=inferenceModelsList[A.preModelId-1].enableTranspose,N=inferenceModelsList[A.preModelId-1].enableQuantileNorm;let M=null;N?(console.log("preModel Quantile normalization enabled"),M=await quantileNormalizeVolumeData(n)):(console.log("preModel Min Max normalization enabled"),M=await minMaxNormalizeVolumeData(n)),e?(M=M.transpose(),console.log("Input transposed for pre-model")):console.log("Transpose not enabled for pre-model"),v.Brainchop_Ver="PreModel_FV";const O=await k;try{const z=performance.now(),B=O,V=B.layers[0].batchInputShape;if(console.log(" Pre-Model batch input shape : ",V),V.length!==5){const Se="The pre-model input shape must be 5D ";return I(Se,-1,Se),0}const b=isModelChnlLast(B),W=y.batchSize,q=y.numOfChan;let Y,X,Q,m;if(b){if(console.log("Pre-Model Channel Last"),isNaN(V[4])||V[4]!==1){const Se="The number of channels for pre-model input shape must be 1";return I(Se,-1,Se),0}Y=V[1],X=V[2],Q=V[3],m=[W,Y,X,Q,q]}else{if(console.log("Pre-Model Channel First"),isNaN(V[1])||V[1]!==1){const Se="The number of channels for pre-model input shape must be 1";return I(Se,-1,Se),0}Y=V[2],X=V[3],Q=V[4],m=[W,q,Y,X,Q]}v.Input_Shape=JSON.stringify(m),v.Output_Shape=JSON.stringify(B.output.shape),v.Channel_Last=b,v.Model_Param=await getModelNumParameters(B),v.Model_Layers=await getModelNumLayers(B);let Z=0;const ee=inferenceModelsList[A.preModelId-1].inferenceDelay;let oe=1;const fe=O.layers.length,me=[];me[0]=M.reshape(m),dispose(M);const pe=window.setInterval(async function(){try{me[oe]=O.layers[oe].apply(me[oe-1])}catch(Se){return I(Se.message,-1,Se.message),window.clearInterval(pe),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=Se.message,v.Extra_Err_Info="PreModel Failed while model layer "+oe+" apply",y.telemetryFlag&&await submitTiming2GoogleSheet(v,I),0}if(O.layers[oe].dispose(),me[oe-1].dispose(),I("Layer "+oe.toString(),(oe+1)/fe),memory().unreliable){const Se="unreliable reasons :"+memory().reasons;I(Se,NaN,Se)}if(oe===fe-1){window.clearInterval(pe);const Se=b?-1:1;console.log(" find argmax "),console.log("last Tensor shape : ",me[oe].shape);const xe=b?me[oe].shape[4]:me[oe].shape[1];let we;try{console.log(" Try tf.argMax for fullVolume .."),we=await argMax$2(me[oe],Se)}catch(ft){if(Se===-1)try{const Tt=performance.now();console.log(" tf.argMax failed .. try argMaxLarge ..");const Lt=tensor2LightBuffer(me[oe].reshape([s,c,f,xe]),"float16");we=argMaxLarge(Lt,s,c,f,xe,"float16"),console.log("argMaxLarge for fullVolume takes : ",((performance.now()-Tt)/1e3).toFixed(4))}catch(Tt){const Lt="argMax buffer couldn't be created due to limited memory resources.";return I(Lt,-1,Lt),we.dispose(),window.clearInterval(pe),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=Tt.message,v.Extra_Err_Info="preModel prediction_argmax from argMaxLarge failed",y.telemetryFlag&&await submitTiming2GoogleSheet(v,I),0}else{const Tt="argMax buffer couldn't be created due to limited memory resources.";return I(Tt,-1,Tt),we.dispose(),window.clearInterval(pe),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=ft.message,v.Extra_Err_Info="preModel prediction_argmax from argMaxLarge not support yet channel first",y.telemetryFlag&&await submitTiming2GoogleSheet(v,I),0}}console.log(" Pre-model prediction_argmax shape : ",we.shape);const Fe=((performance.now()-z)/1e3).toFixed(4);dispose(me[oe]),console.log(" Pre-model find array max ");const De=await findArrayMax(Array.from(we.dataSync()));Zhere')}}else console.log("--- No pre-model is selected ---"),console.log("------ Run voxel cropping ------"),g?A.enableSeqConv?(console.log("------ Seq Convoluton ------"),await inferenceFullVolumeSeqCovLayerPhase2(y,A,u,n,s,c,f,null,I,T,v,S)):inferenceFullVolumePhase2(u,n,s,c,f,null,A,v,y,T,I,S):inferenceSubVolumes(u,n,s,c,f,null)}async function enableProductionMode(u=!0){await enableProdMode(),env().set("DEBUG",!1),env().set("WEBGL_FORCE_F16_TEXTURES",u),env().set("WEBGL_DELETE_TEXTURE_THRESHOLD",0),await ready(),console.log("tf env() flags :",env().flags),console.log("tf env() features :",env().features),console.log("tf env total features: ",Object.keys(env().features).length),console.log(getBackend())}async function runInference(u,n,s,c,f,g){g("Segmentation started",0);const A=performance.now(),v=u.batchSize,y=u.numOfChan;if(isNaN(v)||v!==1){const m="The batch Size for input shape must be 1";return g(m,-1,m),0}if(isNaN(y)||y!==1){const m="The number of channels for input shape must be 1";return g(m,-1,m),0}engine().startScope(),console.log("Batch size: ",v),console.log("Num of Channels: ",y);const T=await load_model(n.path);await enableProductionMode(!0);const I=T;let S=[];if(S=I.layers[0].batchInputShape,console.log(" Model batch input shape : ",S),S.length!==5){const m="The model input shape must be 5D";return g(m,-1,m),0}let k,e,N;const M=s.dims[1],O=s.dims[2],z=s.dims[3],B=await isModelChnlLast(I);if(B){if(console.log("Model Channel Last"),isNaN(S[4])||S[4]!==1){const m="The number of channels for input shape must be 1";return g(m,-1,m),0}k=S[1],e=S[2],N=S[3]}else{if(console.log("Model Channel First"),isNaN(S[1])||S[1]!==1){const m="The number of channels for input shape must be 1";return g(m,-1,m),0}k=S[2],e=S[3],N=S[4]}let V;k===256&&e===256&&N===256?V=!0:V=!1;let b=await getAllSlicesData1D(z,s,c);const W=await getAllSlices2D(b,O,M);b=null;let q=await getSlices3D(W);dispose(W);const Y=[];if(u.telemetryFlag){const m=((performance.now()-A)/1e3).toFixed(4),Z=new Date;V?Y.Brainchop_Ver="FullVolume":Y.Brainchop_Ver="SubVolumes",Y.Date=parseInt(Z.getMonth()+1)+"/"+Z.getDate()+"/"+Z.getFullYear(),Y.Time=await checkZero(Z.getHours())+":"+checkZero(Z.getMinutes())+":"+checkZero(Z.getSeconds()),Y.Input_Shape=JSON.stringify(S),Y.Output_Shape=JSON.stringify(I.output.shape),Y.Channel_Last=B,Y.Model_Param=await getModelNumParameters(I),Y.Model_Layers=await getModelNumLayers(I),Y.Preprocess_t=m,Y.Model=n.modelName,Y.Browser=await detectBrowser(),Y.Browser_Ver=await detectBrowserVersion(),Y.OS=await detectOperatingSys(),Y.WebGL2=await checkWebGl2(g),Y.GPU_Vendor=await detectGPUVendor(),Y.GPU_Card=await detectGPUCardType(),Y.GPU_Vendor_Full=await detectGPUVendor_v0(),Y.GPU_Card_Full=await detectGPUCardType_v0(),Y.CPU_Cores=await getCPUNumCores(),Y.TF_Backend=getBackend(),Y.Which_Brainchop="latest",Y.Seq_Conv=n.enableSeqConv,Y.Actual_Labels=1/0,Y.Expect_Labels=1/0,Y.NumLabels_Match=null,Y.Inference_t=1/0,Y.Merge_t=1/0,Y.Postprocess_t=1/0,Y.Status=null,Y.Error_Type=null,Y.Extra_Err_Info=null,Y.Extra_Info=null,isChrome()&&(Y.Heap_Size_MB=window.performance.memory.totalJSHeapSize/(1024*1024).toFixed(2),Y.Used_Heap_MB=window.performance.memory.usedJSHeapSize/(1024*1024).toFixed(2),Y.Heap_Limit_MB=window.performance.memory.jsHeapSizeLimit/(1024*1024).toFixed(2));const ee=checkWebGl2()?document.createElement("canvas").getContext("webgl2"):null;console.log("MAX_TEXTURE_SIZE :",ee.getParameter(ee.MAX_TEXTURE_SIZE)),console.log("MAX_RENDERBUFFER_SIZE :",ee.getParameter(ee.MAX_RENDERBUFFER_SIZE));const oe=ee.getExtension("WEBGL_debug_renderer_info");console.log("VENDOR WEBGL:",ee.getParameter(oe.UNMASKED_VENDOR_WEBGL)),ee?Y.Texture_Size=ee.getParameter(ee.MAX_TEXTURE_SIZE):Y.Texture_Size=null}const X=n.enableTranspose,Q=n.enableCrop;V&&(Q?await inferenceFullVolumePhase1(T,q,z,O,M,V,n,Y,u,f,g,c):(console.log("Cropping Disabled"),X?(q=q.transpose(),console.log("Input transposed")):console.log("Transpose NOT Enabled"),n.enableSeqConv?(console.log("Seq Convoluton Enabled"),await inferenceFullVolumeSeqCovLayer()):(console.log("Seq Convoluton Disabled"),await inferenceFullVolume())))}async function main(){let u={backColor:[.4,.4,.4,1],show3Dcrosshair:!0,onLocationChange:g},n=new Niivue(u);n.attachToCanvas(gl1),n.opts.dragMode=n.dragModes.pan,n.opts.multiplanarForceRender=!0,n.opts.yoke3Dto2DZoom=!0,await n.loadVolumes([{url:"./t1_crop.nii.gz"}]),aboutBtn.onclick=function(){window.alert("BrainChop models https://github.com/neuroneural/brainchop")},opacitySlider.oninput=function(){n.setOpacity(1,opacitySlider.value/255)};async function s(){let v=n.volumes[0],y=v.dims[1]===256&&v.dims[2]===256&&v.dims[3]===256;if((v.permRAS[0]!==-1||v.permRAS[1]!==3||v.permRAS[2]!==-2)&&(y=!1),y)return;let T=await n.conform(v,!1);n.removeVolume(n.volumes[0]),n.addVolume(T)}modelSelect.onchange=async function(){await s();let v=inferenceModelsList[this.selectedIndex];runInference(brainChopOpts,v,n.volumes[0].hdr,n.volumes[0].img,c,f)},saveBtn.onclick=function(){n.volumes[1].saveToDisk("Custom.nii")};async function c(v,y,T){for(;n.volumes.length>1;)n.removeVolume(n.volumes[1]);let I=await n.volumes[0].clone();I.zeroImage(),I.hdr.scl_inter=0,I.hdr.scl_slope=1,I.img=new Uint8Array(v);let S=y.atlasSelectedColorTable.toLowerCase();n.colormaps().includes(S)||(S="actc",T.type==="Atlas"&&(S="random")),I.colormap=S,I.opacity=opacitySlider.value/255,n.addVolume(I)}function f(v="",y=-1,T=""){console.log(v),document.getElementById("location").innerHTML=v,isNaN(y)?(memstatus.style.color="red",memstatus.innerHTML="Memory Issue"):y>=0&&(modelProgress.value=y*modelProgress.max),T!==""&&window.alert(T)}function g(v){document.getElementById("location").innerHTML="  "+v.string}for(let v=0;v.",inferenceDelay:100,description:"Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides the best accuracy with hard cropping for better speed"},{id:3,type:"Segmentation",path:"./models/model20chan3cls/model.json",modelName:"🔪 Tissue GWM (High Acc, Low Mem)",labelsPath:"./models/model20chan3cls/labels.json",colorsPath:"./models/model20chan3cls/colorLUT.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides high accuracy and fit low memory available but slower"},{id:4,type:"Atlas",path:"./models/model30chan18cls/model.json",modelName:"🪓 Subcortical + GWM (High Mem, Fast)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:5,type:"Atlas",path:"./models/model30chan18cls/model.json",modelName:"🪓 Subcortical + GWM (Low Mem, Slow)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:6,type:"Atlas",path:"./models/model18cls/model.json",modelName:"🪓 Subcortical + GWM (Low Mem, Faster)",labelsPath:"./models/model18cls/labels.json",colorsPath:"./models/model18cls/colorLUT.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:7,type:"Atlas",path:"./models/model30chan18cls/model.json",modelName:"🔪🪓 Subcortical + GWM (Failsafe, Less Acc)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is not a robust model, it may work on low data quality, including varying saturation, and even clinical scans. It may work also on infant brains, but your mileage may vary."},{id:8,type:"Atlas",path:"./models/model30chan50cls/model.json",modelName:"🔪 Aparc+Aseg 50 (High Mem, Fast)",labelsPath:"./models/model30chan50cls/labels.json",colorsPath:"./models/model30chan50cls/colorLUT.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class."},{id:9,type:"Atlas",path:"./models/model30chan50cls/model.json",modelName:"🔪 Aparc+Aseg 50 (Low Mem, Slow)",labelsPath:"./models/model30chan50cls/labels.json",colorsPath:"./models/model30chan50cls/colorLUT.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time."},{id:10,type:"Brain_Extraction",path:"./models/model5_gw_ae/model.json",modelName:"⚡ Extract the Brain (FAST)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:2,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:null,inferenceDelay:100,description:"Extract the brain fast model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the failsafe version."},{id:11,type:"Brain_Extraction",path:"./models/model11_gw_ae/model.json",modelName:"🔪 Extract the Brain (High Acc, Slow)",labelsPath:null,colorsPath:null,preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:2,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Extract the brain high accuracy model operates on full T1 image in a single pass, but uses only 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than the fast version."},{id:12,type:"Brain_Masking",path:"./models/model5_gw_ae/model.json",modelName:"⚡ Brain Mask (FAST)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:2,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:null,inferenceDelay:100,description:"This fast masking model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than failsafe version."},{id:13,type:"Brain_Masking",path:"./models/model11_gw_ae/model.json",modelName:"🔪 Brain Mask (High Acc, Low Mem)",labelsPath:null,colorsPath:null,preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:2,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This masking model operates on full T1 image in a single pass, but uses 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than fast version."},{id:14,type:"Atlas",path:"./models/model21_104class/model.json",modelName:"🔪 Aparc+Aseg 104 (High Mem, Fast)",labelsPath:"./models/model21_104class/labels.json",colorsPath:"./models/model21_104class/colorLUT.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions."},{id:15,type:"Atlas",path:"./models/model21_104class/model.json",modelName:"🔪 Aparc+Aseg 104 (Low Mem, Slow)",labelsPath:"./models/model21_104class/labels.json",colorsPath:"./models/model21_104class/colorLUT.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time. "}];async function checkZero(u){return u<10?u:"0"+u}async function detectBrowser(){return navigator.userAgent.indexOf("OPR/")>-1?"Opera":navigator.userAgent.indexOf("Edg/")>-1?"Edge":navigator.userAgent.indexOf("Falkon/")>-1?"Falkon":navigator.userAgent.indexOf("Chrome/")>-1?"Chrome":navigator.userAgent.indexOf("Firefox/")>-1?"Firefox":navigator.userAgent.indexOf("Safari/")>-1?"Safari":navigator.userAgent.indexOf("MSIE/")>-1||navigator.userAgent.indexOf("rv:")>-1?"IExplorer":"Unknown"}async function detectBrowserVersion(){return navigator.userAgent.indexOf("OPR/")>-1?parseInt(navigator.userAgent.split("OPR/")[1]):navigator.userAgent.indexOf("Edg/")>-1?parseInt(navigator.userAgent.split("Edg/")[1]):navigator.userAgent.indexOf("Falkon/")>-1?parseInt(navigator.userAgent.split("Falkon/")[1]):navigator.userAgent.indexOf("Chrome/")>-1?parseInt(navigator.userAgent.split("Chrome/")[1]):navigator.userAgent.indexOf("Firefox/")>-1?parseInt(navigator.userAgent.split("Firefox/")[1]):navigator.userAgent.indexOf("Safari/")>-1?parseInt(navigator.userAgent.split("Safari/")[1]):navigator.userAgent.indexOf("MSIE/")>-1||navigator.userAgent.indexOf("rv:")>-1?parseInt(navigator.userAgent.split("MSIE/")[1]):1/0}async function detectOperatingSys(){return navigator.userAgent.indexOf("Win")>-1?"Windows":navigator.userAgent.indexOf("Mac")>-1?"MacOS":navigator.userAgent.indexOf("Linux")>-1?"Linux":navigator.userAgent.indexOf("UNIX")>-1?"UNIX":"Unknown"}async function checkWebGl2(u){if(document.createElement("canvas").getContext("webgl2"))return console.log("WebGl2 is enabled"),!0;if(typeof WebGL2RenderingContext<"u"){const s="WebGL2 may be disabled. Please try updating video card drivers";u(s,-1,s)}else console.log("WebGL2 is not supported");return!1}async function detectGPUVendor(){const u=document.createElement("canvas").getContext("webgl");let n;if(u&&(n=u.getExtension("WEBGL_debug_renderer_info"),n)){const s=u.getParameter(n.UNMASKED_VENDOR_WEBGL);return s.indexOf("(")>-1&&s.indexOf(")")>-1?s.substring(s.indexOf("(")+1,s.indexOf(")")):s}return null}async function detectGPUVendor_v0(){const u=document.createElement("canvas").getContext("webgl");if(u){const n=u.getExtension("WEBGL_debug_renderer_info");return n?u.getParameter(n.UNMASKED_VENDOR_WEBGL):null}else return null}async function detectGPUCardType_v0(){const u=document.createElement("canvas").getContext("webgl");if(u){if(detectBrowser()==="Firefox")return u.getParameter(u.RENDERER);const n=u.getExtension("WEBGL_debug_renderer_info");return n?u.getParameter(n.UNMASKED_RENDERER_WEBGL):null}else return null}async function detectGPUCardType(){const u=document.createElement("canvas").getContext("webgl");let n;if(u){if(detectBrowser()==="Firefox")return u.getParameter(u.RENDERER);if(n=u.getExtension("WEBGL_debug_renderer_info"),n){let s=u.getParameter(n.UNMASKED_RENDERER_WEBGL);return s.indexOf("(")>-1&&s.indexOf(")")>-1&&s.indexOf("(R)")===-1&&(s=s.substring(s.indexOf("(")+1,s.indexOf(")")),s.split(",").length===3)?s.split(",")[1].trim():s}}return null}async function getCPUNumCores(){return navigator.hardwareConcurrency}async function isChrome(){return/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor)}async function submitTiming2GoogleSheet(u,n){if(navigator.onLine){const s="Telemetry not yet supported";n(s,-1,s),console.log(u)}else console.log(" Offline Mode ")}async function getModelNumParameters(u){let n=0;for(let s=0;se-N);const g=tensor1d(f),A=g.shape[0],v=Math.floor(A*n),y=Math.ceil(A*s)-1,T=g.slice(v,1),I=g.slice(y,1),S=(await T.array())[0],k=(await I.array())[0];return c.dispose(),g.dispose(),T.dispose(),I.dispose(),{qmin:S,qmax:k}}async function quantileNormalizeVolumeData(u,n=.05,s=.95){const{qmin:c,qmax:f}=await calculateQuantiles(u,n,s),g=scalar(c),A=scalar(f),v=u.sub(g).div(A.sub(g));return g.dispose(),A.dispose(),v}async function minMaxNormalizeVolumeData(u){const n=u.max(),s=u.min();return await u.sub(s).div(n.sub(s))}async function findArrayMax(u){return u.reduce((n,s)=>n>s?n:s)}async function inferenceFullVolumeSeqCovLayer(u,n,s,c,f,g,A){window.alert("inferenceFullVolumeSeqCovLayer() is not dead code?")}async function inferenceFullVolume(u,n,s,c,f,g,A){window.alert("inferenceFullVolume() is not dead code?")}async function inferenceSubVolumes(u,n,s,c,f,g=null){window.alert("inferenceSubVolumes() is not dead code?")}async function tensor2LightBuffer(u,n){window.alert("tensor2LightBuffer() is not dead code?")}async function draw3dObjBoundingVolume(u){window.alert("draw3dObjBoundingVolume() is not dead code?")}async function argMaxLarge(u,n,s,c,f,g="float32"){window.alert("argMaxLarge() is not dead code?")}async function addZeroPaddingTo3dTensor(u,n=[1,1],s=[1,1],c=[1,1]){if(u.rank!==3)throw new Error("Tensor must be 3D");return u.pad([n,s,c])}async function removeZeroPaddingFrom3dTensor(u,n=1,s=1,c=1){if(u.rank!==3)throw new Error("Tensor must be 3D");let f,g,A;return[f,g,A]=u.shape,u.slice([n,s,c],[f-2*n,g-2*s,A-2*c])}async function resizeWithZeroPadding(u,n,s,c,f,g){const A=f[0],v=f[1],y=f[2],T=A+g[0]-1,I=v+g[1]-1,S=y+g[2]-1,k=s-T-1>0?s-T-1:0,e=c-I-1>0?c-I-1:0,N=n-S-1>0?n-S-1:0;return u.pad([[A,k],[v,e],[y,N]])}async function applyMriThreshold(u,n){const s=u.max(),c=s.mul(n),f=await c.data();return s.dispose(),c.dispose(),tidy(()=>u.clone().greater(f[0]))}async function binarizeVolumeDataTensor(u){return u.step(0)}async function generateBrainMask(u,n,s,c,f,g,A,v){console.log("Generate Brain Masking ... ");let y=[];for(let e=0;e{const e="postProcessSlices3D() should be upgraded to BWLabeler";A(e,-1,e)}),console.log("Post processing done ")):console.log("Phase-1 Post processing disabled ... ");const I=new Array(y[0].length*y.length);let S=0;for(let e=0;e{const V=u.slice([0,0,0,0,O],[-1,-1,-1,-1,z-O]),b=n.slice([0,0,0,O,I],[-1,-1,-1,z-O,1]);return conv3d(V,b,c,f,"NDHWC",g)});if(e===null)e=B;else{const V=e.add(B);e.dispose(),B.dispose(),e=V}}}const N=e.add(k);if(e.dispose(),k.dispose(),T==null)T=N;else{const M=await concat$2([T,N],4);N.dispose(),T.dispose(),T=M}}return T}function processTensorInChunks(u,n,s){const A=u.shape[4],v=Math.ceil(A/s);let y=null;for(let T=0;Tu.slice([0,0,0,0,I],[-1,-1,-1,-1,k])),N=tidy(()=>n.slice([0,0,0,I,0],[-1,-1,-1,k,-1])),M=conv3d(e,N,1,0,"NDHWC",1);e.dispose(),N.dispose();const O=squeeze(M);if(M.dispose(),y===null)y=O;else{const z=y.add(O);y.dispose(),y!==O&&O.dispose(),y=z}tidy(()=>{matMul$1(zeros$1([1,1]),zeros$1([1,1]))})}return y}class SequentialConvLayer{constructor(n,s,c,f){this.model=n,this.outChannels=n.outputLayers[0].kernel.shape[4],this.chunkSize=s,this.isChannelLast=c,this.callbackUI=f}async apply(n){const s=ENV$2.get("WEBGL_DELETE_TEXTURE_THRESHOLD");ENV$2.set("WEBGL_DELETE_TEXTURE_THRESHOLD",0);const c=this;return new Promise(f=>{const g=performance.now(),A=c.model.layers[c.model.layers.length-1],v=A.getWeights()[0],y=A.getWeights()[1],T=c.isChannelLast?n.shape.slice(1,-1):n.shape.slice(2);let I=mul(ones(T),-1e4),S=zeros$1(T),k=0;console.log(" channel loop");const e=window.setInterval(async function(){engine().startScope(),console.log("=======================");const N=memory();console.log(`| Number of Tensors: ${N.numTensors}`),console.log(`| Number of Data Buffers: ${N.numDataBuffers}`),console.log("Channel : ",k);const M=await tidy(()=>{const B=v.slice([0,0,0,0,k],[-1,-1,-1,-1,1]),V=y.slice([k],[1]),b=processTensorInChunks(n,B,Math.min(c.chunkSize,c.outChannels)).add(V),W=greater$2(b,I),q=where(W,b,I),Y=where(W,fill$2(S.shape,k),S);return dispose([I,S,B,V,b,W]),tidy(()=>matMul$1(ones([1,1]),ones([1,1]))),[Y,q]}),O=memory();console.log(`| Number of Tensors: ${O.numTensors}`),console.log(`| Number of Data Buffers: ${O.numDataBuffers}`),console.log("=======================");const z=memory();if(c.callbackUI(`Iteration ${k}`,k/c.outChannels),console.log(`Number of Tensors: ${z.numTensors}`),console.log(`Number of Data Buffers: ${z.numDataBuffers}`),console.log(`Bytes In Use: ${z.numBytes}`),console.log(`Megabytes In Use: ${(z.numBytes/1048576).toFixed(3)} MB`),z.unreliable&&console.log(`Unreliable: ${z.unreliable}`),typeof S<"u"&&S.dispose(),typeof I<"u"&&I.dispose(),S=keep(M[0]),I=keep(M[1]),engine().endScope(),k===c.outChannels-1){window.clearInterval(e),dispose(I);const V=performance.now()-g;console.log(`Execution time for output layer: ${V} milliseconds`),ENV$2.set("WEBGL_DELETE_TEXTURE_THRESHOLD",s),f(S)}else{k++;const B=S.shape,V=S.dataSync(),b=S.shape,W=I.dataSync();S.dispose(),I.dispose(),S=tensor(V,B),I=tensor(W,b)}await new Promise(B=>setTimeout(B,300))},0)})}}async function generateOutputSlicesV2(u,n,s,c,f,g,A,v,y,T){if(y.isPostProcessEnable){const k=new BWLabeler,e=new Uint32Array(n),N=26,M=!0,O=!0,[z,B]=k.bwlabel(u,e,N,M,O);for(let V=0;V0&&ee<=1?e=await applyMriThreshold(c,ee):(console.log("No valid crop threshold value"),e=await c.greater([0]).asType("bool"))}else e=await v.greater([0]).asType("bool");console.log(" mask_3d shape : ",e.shape);const N=await whereAsync(e);e.dispose();const M=N.arraySync();let O=g,z=0,B=A,V=0,b=f,W=0;for(let ee=0;eeM[ee][0]?O=M[ee][0]:zM[ee][1]?B=M[ee][1]:VM[ee][2]?b=M[ee][2]:W{Tt=await ft.apply(ye[Se])});if(console.log("profileInfo : ",Lt),dispose(ye[Se]),console.log(" Output tensor",Tt),console.log(" Output tensor shape : ",Tt.shape),Tt.shape.length!==3){const be="Output tensor shape should be 3 dims but it is "+Tt.shape.length;y(be,-1,be)}const Ot=((performance.now()-ee)/1e3).toFixed(4);console.log(" find array max ");const Ft=await findArrayMax(Array.from(Tt.dataSync()));fehere'),memory().unreliable){const oe="unreliable reasons :"+memory().reasons;y(oe,NaN,oe)}}}async function inferenceFullVolumePhase2(u,n,s,c,f,g,A,v,y,T,I,S){let k=[];console.log(" ---- Start FullVolume inference phase-II ---- "),A.enableQuantileNorm?(console.log("preModel Quantile normalization enabled"),n=await quantileNormalizeVolumeData(n)):(console.log("preModel Min Max normalization enabled"),n=await minMaxNormalizeVolumeData(n));let N;if(g==null){const me=A.autoThreshold;me>0&&me<=1?N=await applyMriThreshold(n,me):(console.log("No valid crop threshold value"),N=await n.greater([0]).asType("bool"))}else N=g.greater([0]).asType("bool");console.log(" mask_3d shape : ",N.shape);const M=await whereAsync(N);N.dispose();const O=M.arraySync();let z=c,B=0,V=f,b=0,W=s,q=0;for(let me=0;meO[me][0]?z=O[me][0]:BO[me][1]?V=O[me][1]:bO[me][2]?W=O[me][2]:qhere')}}async function inferenceFullVolumePhase1(u,n,s,c,f,g,A,v,y,T,I,S){if(v.No_SubVolumes=1,A.preModelId){const k=await load_model(inferenceModelsList[A.preModelId-1].path),e=inferenceModelsList[A.preModelId-1].enableTranspose,N=inferenceModelsList[A.preModelId-1].enableQuantileNorm;let M=null;N?(console.log("preModel Quantile normalization enabled"),M=await quantileNormalizeVolumeData(n)):(console.log("preModel Min Max normalization enabled"),M=await minMaxNormalizeVolumeData(n)),e?(M=M.transpose(),console.log("Input transposed for pre-model")):console.log("Transpose not enabled for pre-model"),v.Brainchop_Ver="PreModel_FV";const O=await k;try{const z=performance.now(),B=O,V=B.layers[0].batchInputShape;if(console.log(" Pre-Model batch input shape : ",V),V.length!==5){const Se="The pre-model input shape must be 5D ";return I(Se,-1,Se),0}const b=isModelChnlLast(B),W=y.batchSize,q=y.numOfChan;let Y,X,Q,m;if(b){if(console.log("Pre-Model Channel Last"),isNaN(V[4])||V[4]!==1){const Se="The number of channels for pre-model input shape must be 1";return I(Se,-1,Se),0}Y=V[1],X=V[2],Q=V[3],m=[W,Y,X,Q,q]}else{if(console.log("Pre-Model Channel First"),isNaN(V[1])||V[1]!==1){const Se="The number of channels for pre-model input shape must be 1";return I(Se,-1,Se),0}Y=V[2],X=V[3],Q=V[4],m=[W,q,Y,X,Q]}v.Input_Shape=JSON.stringify(m),v.Output_Shape=JSON.stringify(B.output.shape),v.Channel_Last=b,v.Model_Param=await getModelNumParameters(B),v.Model_Layers=await getModelNumLayers(B);let Z=0;const ee=inferenceModelsList[A.preModelId-1].inferenceDelay;let oe=1;const fe=O.layers.length,me=[];me[0]=M.reshape(m),dispose(M);const pe=window.setInterval(async function(){try{me[oe]=O.layers[oe].apply(me[oe-1])}catch(Se){return I(Se.message,-1,Se.message),window.clearInterval(pe),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=Se.message,v.Extra_Err_Info="PreModel Failed while model layer "+oe+" apply",y.telemetryFlag&&await submitTiming2GoogleSheet(v,I),0}if(O.layers[oe].dispose(),me[oe-1].dispose(),I("Layer "+oe.toString(),(oe+1)/fe),memory().unreliable){const Se="unreliable reasons :"+memory().reasons;I(Se,NaN,Se)}if(oe===fe-1){window.clearInterval(pe);const Se=b?-1:1;console.log(" find argmax "),console.log("last Tensor shape : ",me[oe].shape);const xe=b?me[oe].shape[4]:me[oe].shape[1];let we;try{console.log(" Try tf.argMax for fullVolume .."),we=await argMax$2(me[oe],Se)}catch(ft){if(Se===-1)try{const Tt=performance.now();console.log(" tf.argMax failed .. try argMaxLarge ..");const Lt=tensor2LightBuffer(me[oe].reshape([s,c,f,xe]),"float16");we=argMaxLarge(Lt,s,c,f,xe,"float16"),console.log("argMaxLarge for fullVolume takes : ",((performance.now()-Tt)/1e3).toFixed(4))}catch(Tt){const Lt="argMax buffer couldn't be created due to limited memory resources.";return I(Lt,-1,Lt),we.dispose(),window.clearInterval(pe),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=Tt.message,v.Extra_Err_Info="preModel prediction_argmax from argMaxLarge failed",y.telemetryFlag&&await submitTiming2GoogleSheet(v,I),0}else{const Tt="argMax buffer couldn't be created due to limited memory resources.";return I(Tt,-1,Tt),we.dispose(),window.clearInterval(pe),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=ft.message,v.Extra_Err_Info="preModel prediction_argmax from argMaxLarge not support yet channel first",y.telemetryFlag&&await submitTiming2GoogleSheet(v,I),0}}console.log(" Pre-model prediction_argmax shape : ",we.shape);const Fe=((performance.now()-z)/1e3).toFixed(4);dispose(me[oe]),console.log(" Pre-model find array max ");const De=await findArrayMax(Array.from(we.dataSync()));Zhere')}}else console.log("--- No pre-model is selected ---"),console.log("------ Run voxel cropping ------"),g?A.enableSeqConv?(console.log("------ Seq Convoluton ------"),await inferenceFullVolumeSeqCovLayerPhase2(y,A,u,n,s,c,f,null,I,T,v,S)):inferenceFullVolumePhase2(u,n,s,c,f,null,A,v,y,T,I,S):inferenceSubVolumes(u,n,s,c,f,null)}async function enableProductionMode(u=!0){await enableProdMode(),env().set("DEBUG",!1),env().set("WEBGL_FORCE_F16_TEXTURES",u),env().set("WEBGL_DELETE_TEXTURE_THRESHOLD",0),await ready(),console.log("tf env() flags :",env().flags),console.log("tf env() features :",env().features),console.log("tf env total features: ",Object.keys(env().features).length),console.log(getBackend())}async function runInference(u,n,s,c,f,g){g("Segmentation started",0);const A=performance.now(),v=u.batchSize,y=u.numOfChan;if(isNaN(v)||v!==1){const m="The batch Size for input shape must be 1";return g(m,-1,m),0}if(isNaN(y)||y!==1){const m="The number of channels for input shape must be 1";return g(m,-1,m),0}engine().startScope(),console.log("Batch size: ",v),console.log("Num of Channels: ",y);const T=await load_model(n.path);await enableProductionMode(!0);const I=T;let S=[];if(S=I.layers[0].batchInputShape,console.log(" Model batch input shape : ",S),S.length!==5){const m="The model input shape must be 5D";return g(m,-1,m),0}let k,e,N;const M=s.dims[1],O=s.dims[2],z=s.dims[3],B=await isModelChnlLast(I);if(B){if(console.log("Model Channel Last"),isNaN(S[4])||S[4]!==1){const m="The number of channels for input shape must be 1";return g(m,-1,m),0}k=S[1],e=S[2],N=S[3]}else{if(console.log("Model Channel First"),isNaN(S[1])||S[1]!==1){const m="The number of channels for input shape must be 1";return g(m,-1,m),0}k=S[2],e=S[3],N=S[4]}let V;k===256&&e===256&&N===256?V=!0:V=!1;let b=await getAllSlicesData1D(z,s,c);const W=await getAllSlices2D(b,O,M);b=null;let q=await getSlices3D(W);dispose(W);const Y=[];if(u.telemetryFlag){const m=((performance.now()-A)/1e3).toFixed(4),Z=new Date;V?Y.Brainchop_Ver="FullVolume":Y.Brainchop_Ver="SubVolumes",Y.Date=parseInt(Z.getMonth()+1)+"/"+Z.getDate()+"/"+Z.getFullYear(),Y.Time=await checkZero(Z.getHours())+":"+checkZero(Z.getMinutes())+":"+checkZero(Z.getSeconds()),Y.Input_Shape=JSON.stringify(S),Y.Output_Shape=JSON.stringify(I.output.shape),Y.Channel_Last=B,Y.Model_Param=await getModelNumParameters(I),Y.Model_Layers=await getModelNumLayers(I),Y.Preprocess_t=m,Y.Model=n.modelName,Y.Browser=await detectBrowser(),Y.Browser_Ver=await detectBrowserVersion(),Y.OS=await detectOperatingSys(),Y.WebGL2=await checkWebGl2(g),Y.GPU_Vendor=await detectGPUVendor(),Y.GPU_Card=await detectGPUCardType(),Y.GPU_Vendor_Full=await detectGPUVendor_v0(),Y.GPU_Card_Full=await detectGPUCardType_v0(),Y.CPU_Cores=await getCPUNumCores(),Y.TF_Backend=getBackend(),Y.Which_Brainchop="latest",Y.Seq_Conv=n.enableSeqConv,Y.Actual_Labels=1/0,Y.Expect_Labels=1/0,Y.NumLabels_Match=null,Y.Inference_t=1/0,Y.Merge_t=1/0,Y.Postprocess_t=1/0,Y.Status=null,Y.Error_Type=null,Y.Extra_Err_Info=null,Y.Extra_Info=null,isChrome()&&(Y.Heap_Size_MB=window.performance.memory.totalJSHeapSize/(1024*1024).toFixed(2),Y.Used_Heap_MB=window.performance.memory.usedJSHeapSize/(1024*1024).toFixed(2),Y.Heap_Limit_MB=window.performance.memory.jsHeapSizeLimit/(1024*1024).toFixed(2));const ee=checkWebGl2()?document.createElement("canvas").getContext("webgl2"):null;console.log("MAX_TEXTURE_SIZE :",ee.getParameter(ee.MAX_TEXTURE_SIZE)),console.log("MAX_RENDERBUFFER_SIZE :",ee.getParameter(ee.MAX_RENDERBUFFER_SIZE));const oe=ee.getExtension("WEBGL_debug_renderer_info");console.log("VENDOR WEBGL:",ee.getParameter(oe.UNMASKED_VENDOR_WEBGL)),ee?Y.Texture_Size=ee.getParameter(ee.MAX_TEXTURE_SIZE):Y.Texture_Size=null}const X=n.enableTranspose,Q=n.enableCrop;V&&(Q?await inferenceFullVolumePhase1(T,q,z,O,M,V,n,Y,u,f,g,c):(console.log("Cropping Disabled"),X?(q=q.transpose(),console.log("Input transposed")):console.log("Transpose NOT Enabled"),n.enableSeqConv?(console.log("Seq Convoluton Enabled"),await inferenceFullVolumeSeqCovLayer()):(console.log("Seq Convoluton Disabled"),await inferenceFullVolume())))}async function main(){let u={backColor:[.4,.4,.4,1],show3Dcrosshair:!0,onLocationChange:g},n=new Niivue(u);n.attachToCanvas(gl1),n.opts.dragMode=n.dragModes.pan,n.opts.multiplanarForceRender=!0,n.opts.yoke3Dto2DZoom=!0,await n.loadVolumes([{url:"./t1_crop.nii.gz"}]),aboutBtn.onclick=function(){window.alert("BrainChop models https://github.com/neuroneural/brainchop")},opacitySlider.oninput=function(){n.setOpacity(1,opacitySlider.value/255)};async function s(){let v=n.volumes[0],y=v.dims[1]===256&&v.dims[2]===256&&v.dims[3]===256;if((v.permRAS[0]!==-1||v.permRAS[1]!==3||v.permRAS[2]!==-2)&&(y=!1),y)return;let T=await n.conform(v,!1);n.removeVolume(n.volumes[0]),n.addVolume(T)}modelSelect.onchange=async function(){await s();let v=inferenceModelsList[this.selectedIndex];runInference(brainChopOpts,v,n.volumes[0].hdr,n.volumes[0].img,c,f)},saveBtn.onclick=function(){n.volumes[1].saveToDisk("Custom.nii")};async function c(v,y,T){for(;n.volumes.length>1;)n.removeVolume(n.volumes[1]);let I=await n.volumes[0].clone();I.zeroImage(),I.hdr.scl_inter=0,I.hdr.scl_slope=1,I.img=new Uint8Array(v);let S=y.atlasSelectedColorTable.toLowerCase();n.colormaps().includes(S)||(S="actc",T.type==="Atlas"&&(S="random")),I.colormap=S,I.opacity=opacitySlider.value/255,n.addVolume(I)}function f(v="",y=-1,T=""){console.log(v),document.getElementById("location").innerHTML=v,isNaN(y)?(memstatus.style.color="red",memstatus.innerHTML="Memory Issue"):y>=0&&(modelProgress.value=y*modelProgress.max),T!==""&&window.alert(T)}function g(v){document.getElementById("location").innerHTML="  "+v.string}for(let v=0;v Niivue brain chop - +