diff --git a/assets/index-Aa9ag2VR.js b/assets/index-Xi1zeeR_.js similarity index 99% rename from assets/index-Aa9ag2VR.js rename to assets/index-Xi1zeeR_.js index 5b2627b..68f48fb 100644 --- a/assets/index-Aa9ag2VR.js +++ b/assets/index-Xi1zeeR_.js @@ -21956,7 +21956,7 @@ return a / b;`,DIV_PACKED=` * See the License for the specific language governing permissions and * limitations under the License. * ============================================================================= - */const kernelConfigs=[_fusedMatMulConfig,absConfig,acosConfig,acoshConfig,addConfig,addNConfig,allConfig,anyConfig,argMaxConfig,argMinConfig,asinConfig,asinhConfig,atanConfig,atan2Config,atanhConfig,avgPoolConfig,avgPool3DConfig,avgPool3DGradConfig,avgPoolGradConfig,batchMatMulConfig,batchNormConfig,batchToSpaceNDConfig,bincountConfig,bitwiseAndConfig,broadcastArgsConfig,castConfig,ceilConfig,clipByValueConfig,complexConfig,complexAbsConfig,concatConfig,conv2DConfig,conv2DBackpropFilterConfig,conv2DBackpropInputConfig,conv3DConfig,conv3DBackpropFilterV2Config,conv3DBackpropInputConfig,cosConfig,coshConfig,cropAndResizeConfig,cumprodConfig,cumsumConfig,denseBincountConfig,depthToSpaceConfig,depthwiseConv2dNativeConfig,depthwiseConv2dNativeBackpropFilterConfig,depthwiseConv2dNativeBackpropInputConfig,diagConfig,dilation2DConfig,einsumConfig,eluConfig,eluGradConfig,equalConfig,erfConfig,expConfig,expandDimsConfig,expm1Config,fftConfig,fillConfig,flipLeftRightConfig,floorConfig,floorDivConfig,fromPixelsConfig,fusedConv2DConfig,fusedDepthwiseConv2DConfig,gatherNdConfig,gatherV2Config,greaterConfig,greaterEqualConfig,identityConfig,ifftConfig,imagConfig,isFiniteConfig,isInfConfig,isNaNConfig,leakyReluConfig,lessConfig,lessEqualConfig,linSpaceConfig,logConfig,log1pConfig,logicalAndConfig,logicalNotConfig,logicalOrConfig,LRNConfig,LRNGradConfig,maxConfig,maximumConfig,maxPoolConfig,maxPool3DConfig,maxPool3DGradConfig,maxPoolGradConfig,maxPoolWithArgmaxConfig,meanConfig,minConfig,minimumConfig,mirrorPadConfig,modConfig,multinomialConfig,multiplyConfig,negConfig,nonMaxSuppressionV3Config,nonMaxSuppressionV4Config,nonMaxSuppressionV5Config,notEqualConfig,oneHotConfig,onesLikeConfig,packConfig,padV2Config,powConfig,preluConfig,prodConfig,raggedGatherConfig,raggedRangeConfig,raggedTensorToTensorConfig,rangeConfig,realConfig,realDivConfig,reciprocalConfig,reluConfig,relu6Config,reshapeConfig,resizeBilinearConfig,resizeBilinearGradConfig,resizeNearestNeighborConfig,resizeNearestNeighborGradConfig,reverseConfig,rotateWithOffsetConfig,roundConfig,rsqrtConfig,scatterNdConfig,searchSortedConfig,selectConfig,seluConfig,sigmoidConfig,signConfig,sinConfig,sinhConfig,sliceConfig,softmaxConfig,softplusConfig,spaceToBatchNDConfig,sparseFillEmptyRowsConfig,sparseReshapeConfig,sparseSegmentMeanConfig,sparseSegmentSumConfig,sparseToDenseConfig,splitVConfig,sqrtConfig,squareConfig,squaredDifferenceConfig,staticRegexReplaceConfig,stepConfig,stridedSliceConfig,stringNGramsConfig,stringSplitConfig,stringToHashBucketFastConfig,subConfig,sumConfig,tanConfig,tanhConfig,tensorScatterUpdateConfig,tileConfig,topKConfig,transformConfig,transposeConfig,uniqueConfig,unpackConfig,unsortedSegmentSumConfig,zerosLikeConfig];for(const a of kernelConfigs)registerKernel(a);class BWLabeler{idx(n,s,u,f){return u*f[0]*f[1]+s*f[0]+n}check_previous_slice(n,s,u,f,m,A,v,y,T,I){let S=0;if(!m)return 0;const E=n[this.idx(u,f,m,A)];if(v>=6){const e=this.idx(u,f,m-1,A);E===n[e]&&(T[S++]=s[e])}if(v>=18){if(u){const e=this.idx(u-1,f,m-1,A);E===n[e]&&(T[S++]=s[e])}if(f){const e=this.idx(u,f-1,m-1,A);E===n[e]&&(T[S++]=s[e])}if(u=6){if(F){const V=this.idx(F-1,e,E,s);L===n[V]&&(S[M++]=I[V])}if(e){const V=this.idx(F,e-1,E,s);L===n[V]&&(S[M++]=I[V])}}if(u>=18){if(e&&F){const V=this.idx(F-1,e-1,E,s);L===n[V]&&(S[M++]=I[V])}if(e&&F=y){y+=v;const V=new Uint32Array(y);V.set(T),T=V}T[A-1]=A,A++}}}for(let E=0;E.",inferenceDelay:100,description:"Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides the best accuracy with hard cropping for better speed"},{id:3,type:"Segmentation",path:"/models/model20chan3cls/model.json",modelName:"🔪 Tissue GWM (High Acc, Low Mem)",labelsPath:"./models/model20chan3cls/labels.json",colorsPath:"./models/model20chan3cls/colorLUT.json",colormapPath:"./models/model20chan3cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides high accuracy and fit low memory available but slower"},{id:4,type:"Atlas",path:"/models/model30chan18cls/model.json",modelName:"🪓 Subcortical + GWM (High Mem, Fast)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",colormapPath:"./models/model30chan18cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:5,type:"Atlas",path:"/models/model30chan18cls/model.json",modelName:"🪓 Subcortical + GWM (Low Mem, Slow)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",colormapPath:"./models/model30chan18cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:6,type:"Atlas",path:"/models/model18cls/model.json",modelName:"🪓 Subcortical + GWM (Low Mem, Faster)",labelsPath:"./models/model18cls/labels.json",colorsPath:"./models/model18cls/colorLUT.json",colormapPath:"./models/model18cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:7,type:"Atlas",path:"/models/model30chan18cls/model.json",modelName:"🔪🪓 Subcortical + GWM (Failsafe, Less Acc)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",colormapPath:"./models/model30chan18cls/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is not a robust model, it may work on low data quality, including varying saturation, and even clinical scans. It may work also on infant brains, but your mileage may vary."},{id:8,type:"Atlas",path:"/models/model30chan50cls/model.json",modelName:"🔪 Aparc+Aseg 50 (High Mem, Fast)",labelsPath:"./models/model30chan50cls/labels.json",colorsPath:"./models/model30chan50cls/colorLUT.json",colormapPath:"./models/model30chan50cls/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class."},{id:9,type:"Atlas",path:"/models/model30chan50cls/model.json",modelName:"🔪 Aparc+Aseg 50 (Low Mem, Slow)",labelsPath:"./models/model30chan50cls/labels.json",colorsPath:"./models/model30chan50cls/colorLUT.json",colormapPath:"./models/model30chan50cls/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time."},{id:10,type:"Brain_Extraction",path:"/models/model5_gw_ae/model.json",modelName:"⚡ Extract the Brain (FAST)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:18,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:null,inferenceDelay:100,description:"Extract the brain fast model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the failsafe version."},{id:11,type:"Brain_Extraction",path:"/models/model11_gw_ae/model.json",modelName:"🔪 Extract the Brain (High Acc, Slow)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Extract the brain high accuracy model operates on full T1 image in a single pass, but uses only 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than the fast version."},{id:12,type:"Brain_Masking",path:"/models/model5_gw_ae/model.json",modelName:"⚡ Brain Mask (FAST)",labelsPath:null,colorsPath:null,colormapPath:"./models/model5_gw_ae/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:17,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:null,inferenceDelay:100,description:"This fast masking model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than failsafe version."},{id:13,type:"Brain_Masking",path:"/models/model11_gw_ae/model.json",modelName:"🔪 Brain Mask (High Acc, Low Mem)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This masking model operates on full T1 image in a single pass, but uses 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than fast version."},{id:14,type:"Atlas",path:"/models/model21_104class/model.json",modelName:"🔪 Aparc+Aseg 104 (High Mem, Fast)",labelsPath:"./models/model21_104class/labels.json",colorsPath:"./models/model21_104class/colorLUT.json",colormapPath:"./models/model21_104class/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions."},{id:15,type:"Atlas",path:"/models/model21_104class/model.json",modelName:"🔪 Aparc+Aseg 104 (Low Mem, Slow)",labelsPath:"./models/model21_104class/labels.json",colorsPath:"./models/model21_104class/colorLUT.json",colormapPath:"./models/model21_104class/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time. "}];async function getModelNumParameters(a){let n=0;for(let s=0;se-F);const m=tensor1d(f),A=m.shape[0],v=Math.floor(A*n),y=Math.ceil(A*s)-1,T=m.slice(v,1),I=m.slice(y,1),S=(await T.array())[0],E=(await I.array())[0];return u.dispose(),m.dispose(),T.dispose(),I.dispose(),{qmin:S,qmax:E}}async function quantileNormalizeVolumeData(a,n=.05,s=.95){const{qmin:u,qmax:f}=await calculateQuantiles(a,n,s),m=scalar(u),A=scalar(f),v=a.sub(m).div(A.sub(m));return m.dispose(),A.dispose(),v}async function minMaxNormalizeVolumeData(a){const n=a.max(),s=a.min();return await a.sub(s).div(n.sub(s))}async function inferenceFullVolumeSeqCovLayer(a,n,s,u,f,m,A){window.alert("inferenceFullVolumeSeqCovLayer() is not dead code?")}async function inferenceFullVolume(a,n,s,u,f,m,A){window.alert("inferenceFullVolume() is not dead code?")}async function inferenceSubVolumes(a,n,s,u,f,m=null){window.alert("inferenceSubVolumes() is not dead code?")}async function tensor2LightBuffer(a,n){window.alert("tensor2LightBuffer() is not dead code?")}async function draw3dObjBoundingVolume(a){window.alert("draw3dObjBoundingVolume() is not dead code?")}async function argMaxLarge(a,n,s,u,f,m="float32"){window.alert("argMaxLarge() is not dead code?")}async function addZeroPaddingTo3dTensor(a,n=[1,1],s=[1,1],u=[1,1]){if(a.rank!==3)throw new Error("Tensor must be 3D");return a.pad([n,s,u])}async function removeZeroPaddingFrom3dTensor(a,n=1,s=1,u=1){if(a.rank!==3)throw new Error("Tensor must be 3D");let f,m,A;return[f,m,A]=a.shape,a.slice([n,s,u],[f-2*n,m-2*s,A-2*u])}async function resizeWithZeroPadding(a,n,s,u,f,m){const A=f[0],v=f[1],y=f[2],T=A+m[0]-1,I=v+m[1]-1,S=y+m[2]-1,E=s-T-1>0?s-T-1:0,e=u-I-1>0?u-I-1:0,F=n-S-1>0?n-S-1:0;return a.pad([[A,E],[v,e],[y,F]])}async function applyMriThreshold(a,n){const s=a.max(),u=s.mul(n),f=await u.data();return s.dispose(),u.dispose(),tidy(()=>a.clone().greater(f[0]))}async function binarizeVolumeDataTensor(a){return a.step(0)}async function generateBrainMask(a,n,s,u,f,m,A,v,y=!0){console.log("Generate Brain Masking ... ");let T=[];for(let F=0;F{const F="postProcessSlices3D() should be upgraded to BWLabeler";A(F,-1,F)}),console.log("Post processing done ")):console.log("Phase-1 Post processing disabled ... ");const S=new Array(T[0].length*T.length);let E=0;for(let F=0;F{const z=a.slice([0,0,0,0,L],[-1,-1,-1,-1,V-L]),b=n.slice([0,0,0,L,I],[-1,-1,-1,V-L,1]);return conv3d(z,b,u,f,"NDHWC",m)});if(e===null)e=B;else{const z=e.add(B);e.dispose(),B.dispose(),e=z}}}const F=e.add(E);if(e.dispose(),E.dispose(),T==null)T=F;else{const M=await concat$2([T,F],4);F.dispose(),T.dispose(),T=M}}return T}function processTensorInChunks(a,n,s){const A=a.shape[4],v=Math.ceil(A/s);let y=null;for(let T=0;Ta.slice([0,0,0,0,I],[-1,-1,-1,-1,E])),F=tidy(()=>n.slice([0,0,0,I,0],[-1,-1,-1,E,-1])),M=conv3d(e,F,1,0,"NDHWC",1);e.dispose(),F.dispose();const L=squeeze(M);if(M.dispose(),y===null)y=L;else{const V=y.add(L);y.dispose(),y!==L&&L.dispose(),y=V}tidy(()=>{matMul$1(zeros$1([1,1]),zeros$1([1,1]))})}return y}class SequentialConvLayer{constructor(n,s,u,f){this.model=n,this.outChannels=n.outputLayers[0].kernel.shape[4],this.chunkSize=s,this.isChannelLast=u,this.callbackUI=f}async apply(n){const s=ENV$4.get("WEBGL_DELETE_TEXTURE_THRESHOLD");ENV$4.set("WEBGL_DELETE_TEXTURE_THRESHOLD",0);const u=this;return new Promise(f=>{const m=performance.now(),A=u.model.layers[u.model.layers.length-1],v=A.getWeights()[0],y=A.getWeights()[1],T=u.isChannelLast?n.shape.slice(1,-1):n.shape.slice(2);let I=mul(ones(T),-1e4),S=zeros$1(T),E=0;console.log(" channel loop");const e=window.setInterval(async function(){engine().startScope(),console.log("=======================");const F=await memory();console.log(`| Number of Tensors: ${F.numTensors}`),console.log(`| Number of Data Buffers: ${F.numDataBuffers}`),console.log("Channel : ",E);const M=await tidy(()=>{const V=v.slice([0,0,0,0,E],[-1,-1,-1,-1,1]),B=y.slice([E],[1]),z=processTensorInChunks(n,V,Math.min(u.chunkSize,u.outChannels)).add(B),b=greater$2(z,I),W=where(b,z,I),q=where(b,fill$2(S.shape,E),S);return dispose([I,S,V,B,z,b]),tidy(()=>matMul$1(ones([1,1]),ones([1,1]))),[q,W]});console.log("=======================");const L=await memory();if(u.callbackUI(`Iteration ${E}`,E/u.outChannels),console.log(`Number of Tensors: ${L.numTensors}`),console.log(`Number of Data Buffers: ${L.numDataBuffers}`),console.log(`Megabytes In Use: ${(L.numBytes/1048576).toFixed(3)} MB`),L.unreliable&&console.log(`Unreliable: ${L.unreliable}`),typeof S<"u"&&S.dispose(),typeof I<"u"&&I.dispose(),S=keep(M[0]),I=keep(M[1]),engine().endScope(),E===u.outChannels-1){window.clearInterval(e),dispose(I);const B=performance.now()-m;console.log(`Execution time for output layer: ${B} milliseconds`),ENV$4.set("WEBGL_DELETE_TEXTURE_THRESHOLD",s),f(S)}else{E++;const V=S.shape,B=S.dataSync(),z=S.shape,b=I.dataSync();S.dispose(),I.dispose(),S=tensor(B,V),I=tensor(b,z)}await new Promise(V=>setTimeout(V,300))},0)})}}async function generateOutputSlicesV2(a,n,s,u,f,m,A,v,y,T){if(y.isPostProcessEnable){const E=new BWLabeler,e=new Uint32Array(n),F=26,M=!0,L=!0,[V,B]=E.bwlabel(a,e,F,M,L);for(let z=0;z0&&re<=1?e=await applyMriThreshold(u,re):(console.log("No valid crop threshold value"),e=await u.greater([0]).asType("bool"))}else e=await v.greater([0]).asType("bool");console.log(" mask_3d shape : ",e.shape);const F=await whereAsync(e);e.dispose();const M=F.arraySync();let L=m,V=0,B=A,z=0,b=f,W=0;for(let re=0;reM[re][0]?L=M[re][0]:VM[re][1]?B=M[re][1]:zM[re][2]?b=M[re][2]:Where'),memory().unreliable){const ae="unreliable reasons :"+memory().reasons;y(ae,NaN,ae)}}}async function inferenceFullVolumePhase2(a,n,s,u,f,m,A,v,y,T,I,S){let E=[];console.log(" ---- Start FullVolume inference phase-II ---- "),A.enableQuantileNorm?(console.log("preModel Quantile normalization enabled"),n=await quantileNormalizeVolumeData(n)):(console.log("preModel Min Max normalization enabled"),n=await minMaxNormalizeVolumeData(n));let F;if(m==null){const pe=A.autoThreshold;pe>0&&pe<=1?F=await applyMriThreshold(n,pe):(console.log("No valid crop threshold value"),F=await n.greater([0]).asType("bool"))}else F=m.greater([0]).asType("bool");console.log(" mask_3d shape : ",F.shape);const M=await whereAsync(F);F.dispose();const L=M.arraySync();let V=u,B=0,z=f,b=0,W=s,q=0;for(let pe=0;peL[pe][0]?V=L[pe][0]:BL[pe][1]?z=L[pe][1]:bL[pe][2]?W=L[pe][2]:qhere')}}async function inferenceFullVolumePhase1(a,n,s,u,f,m,A,v,y,T,I,S){if(v.No_SubVolumes=1,A.preModelId){const E=await load_model(y.rootURL+inferenceModelsList[A.preModelId-1].path),e=inferenceModelsList[A.preModelId-1].enableTranspose,F=inferenceModelsList[A.preModelId-1].enableQuantileNorm;let M=null;F?(console.log("preModel Quantile normalization enabled"),M=await quantileNormalizeVolumeData(n)):(console.log("preModel Min Max normalization enabled"),M=await minMaxNormalizeVolumeData(n)),e?(M=await M.transpose(),console.log("Input transposed for pre-model")):console.log("Transpose not enabled for pre-model"),v.Brainchop_Ver="PreModel_FV";const L=await E;try{const V=performance.now(),B=L,z=B.layers[0].batchInputShape;if(console.log(" Pre-Model batch input shape : ",z),z.length!==5){const Se="The pre-model input shape must be 5D ";return I(Se,-1,Se),0}const b=isModelChnlLast(B),W=y.batchSize,q=y.numOfChan;let Y,X,H,g;if(b){if(console.log("Pre-Model Channel Last"),isNaN(z[4])||z[4]!==1){const Se="The number of channels for pre-model input shape must be 1";return I(Se,-1,Se),0}Y=z[1],X=z[2],H=z[3],g=[W,Y,X,H,q]}else{if(console.log("Pre-Model Channel First"),isNaN(z[1])||z[1]!==1){const Se="The number of channels for pre-model input shape must be 1";return I(Se,-1,Se),0}Y=z[2],X=z[3],H=z[4],g=[W,q,Y,X,H]}v.Input_Shape=JSON.stringify(g),v.Output_Shape=JSON.stringify(B.output.shape),v.Channel_Last=b,v.Model_Param=await getModelNumParameters(B),v.Model_Layers=await getModelNumLayers(B);let J=0;const re=inferenceModelsList[A.preModelId-1].inferenceDelay;let ae=1;const fe=L.layers.length,pe=[];pe[0]=M.reshape(g),dispose(M);const me=window.setInterval(async function(){try{pe[ae]=L.layers[ae].apply(pe[ae-1])}catch(Se){const ve="Your graphics card (e.g. Intel) may not be compatible with WebGL. "+Se.message;return I(ve,-1,ve),window.clearInterval(me),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=Se.message,v.Extra_Err_Info="PreModel Failed while model layer "+ae+" apply",I("",-1,"",v),0}if(L.layers[ae].dispose(),pe[ae-1].dispose(),I("Layer "+ae.toString(),(ae+1)/fe),memory().unreliable){const Se="unreliable reasons :"+memory().reasons;I(Se,NaN,Se)}if(ae===fe-1){window.clearInterval(me);const Se=b?-1:1;console.log(" find argmax "),console.log("last Tensor shape : ",pe[ae].shape);const ve=b?pe[ae].shape[4]:pe[ae].shape[1];let we;try{console.log(" Try tf.argMax for fullVolume .."),we=await argMax$2(pe[ae],Se)}catch(pt){if(Se===-1)try{const It=performance.now();console.log(" tf.argMax failed .. try argMaxLarge ..");const Lt=tensor2LightBuffer(pe[ae].reshape([s,u,f,ve]),"float16");we=argMaxLarge(Lt,s,u,f,ve,"float16"),console.log("argMaxLarge for fullVolume takes : ",((performance.now()-It)/1e3).toFixed(4))}catch(It){const Lt="argMax buffer couldn't be created due to limited memory resources.";return I(Lt,-1,Lt),we.dispose(),window.clearInterval(me),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=It.message,v.Extra_Err_Info="preModel prediction_argmax from argMaxLarge failed",I("",-1,"",v),0}else{const It="argMax buffer couldn't be created due to limited memory resources.";return I(It,-1,It),we.dispose(),window.clearInterval(me),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=pt.message,v.Extra_Err_Info="preModel prediction_argmax from argMaxLarge not support yet channel first",I("",-1,"",v),0}}console.log(" Pre-model prediction_argmax shape : ",we.shape);const Ne=((performance.now()-V)/1e3).toFixed(4);dispose(pe[ae]),console.log(" Pre-model find array max ");const De=await we.max().dataSync()[0];Jhere')}}else console.log("--- No pre-model is selected ---"),console.log("------ Run voxel cropping ------"),m?A.enableSeqConv?(console.log("------ Seq Convoluton ------"),await inferenceFullVolumeSeqCovLayerPhase2(y,A,a,n,s,u,f,null,I,T,v,S)):inferenceFullVolumePhase2(a,n,s,u,f,null,A,v,y,T,I,S):inferenceSubVolumes(a,n,s,u,f,null)}async function enableProductionMode(a=!0){await enableProdMode(),env().set("DEBUG",!1),env().set("WEBGL_FORCE_F16_TEXTURES",a),env().set("WEBGL_DELETE_TEXTURE_THRESHOLD",0),await ready(),console.log("tf env() flags :",env().flags),console.log("tf env() features :",env().features),console.log("tf env total features: ",Object.keys(env().features).length),console.log(getBackend())}async function runInference(a,n,s,u,f,m){const A=[];A.startTime=Date.now(),m("Segmentation started",0),performance.now();const v=a.batchSize,y=a.numOfChan;if(isNaN(v)||v!==1){const H="The batch Size for input shape must be 1";return m(H,-1,H),0}if(isNaN(y)||y!==1){const H="The number of channels for input shape must be 1";return m(H,-1,H),0}engine().startScope(),console.log("Batch size: ",v),console.log("Num of Channels: ",y);const T=await load_model(a.rootURL+n.path);await enableProductionMode(!0),A.TF_Backend=getBackend();const I=T;let S=[];if(S=I.layers[0].batchInputShape,console.log(" Model batch input shape : ",S),S.length!==5){const H="The model input shape must be 5D";return m(H,-1,H),0}let E,e,F;const M=s.dims[1],L=s.dims[2],V=s.dims[3];if(await isModelChnlLast(I)){if(console.log("Model Channel Last"),isNaN(S[4])||S[4]!==1){const H="The number of channels for input shape must be 1";return m(H,-1,H),0}E=S[1],e=S[2],F=S[3]}else{if(console.log("Model Channel First"),isNaN(S[1])||S[1]!==1){const H="The number of channels for input shape must be 1";return m(H,-1,H),0}E=S[2],e=S[3],F=S[4]}let z;E===256&&e===256&&F===256?z=!0:z=!1,A.isModelFullVol=z;let b=await getAllSlicesData1D(V,s,u);const W=await getAllSlices2D(b,L,M);b=null;let q=await getSlices3D(W);dispose(W);const Y=n.enableTranspose,X=n.enableCrop;z&&(X?await inferenceFullVolumePhase1(T,q,V,L,M,z,n,A,a,f,m,u):(console.log("Cropping Disabled"),Y?(q=q.transpose(),console.log("Input transposed")):console.log("Transpose NOT Enabled"),n.enableSeqConv?(console.log("Seq Convoluton Enabled"),await inferenceFullVolumeSeqCovLayer()):(console.log("Seq Convoluton Disabled"),await inferenceFullVolume())))}async function detectBrowser(){return navigator.userAgent.indexOf("OPR/")>-1?"Opera":navigator.userAgent.indexOf("Edg/")>-1?"Edge":navigator.userAgent.indexOf("Falkon/")>-1?"Falkon":navigator.userAgent.indexOf("Chrome/")>-1?"Chrome":navigator.userAgent.indexOf("Firefox/")>-1?"Firefox":navigator.userAgent.indexOf("Safari/")>-1?"Safari":navigator.userAgent.indexOf("MSIE/")>-1||navigator.userAgent.indexOf("rv:")>-1?"IExplorer":"Unknown"}async function detectBrowserVersion(){return navigator.userAgent.indexOf("OPR/")>-1?parseInt(navigator.userAgent.split("OPR/")[1]):navigator.userAgent.indexOf("Edg/")>-1?parseInt(navigator.userAgent.split("Edg/")[1]):navigator.userAgent.indexOf("Falkon/")>-1?parseInt(navigator.userAgent.split("Falkon/")[1]):navigator.userAgent.indexOf("Chrome/")>-1?parseInt(navigator.userAgent.split("Chrome/")[1]):navigator.userAgent.indexOf("Firefox/")>-1?parseInt(navigator.userAgent.split("Firefox/")[1]):navigator.userAgent.indexOf("Safari/")>-1?parseInt(navigator.userAgent.split("Safari/")[1]):navigator.userAgent.indexOf("MSIE/")>-1||navigator.userAgent.indexOf("rv:")>-1?parseInt(navigator.userAgent.split("MSIE/")[1]):1/0}async function detectOperatingSys(){return navigator.userAgent.indexOf("Win")>-1?"Windows":navigator.userAgent.indexOf("Mac")>-1?"MacOS":navigator.userAgent.indexOf("Linux")>-1?"Linux":navigator.userAgent.indexOf("UNIX")>-1?"UNIX":"Unknown"}async function checkWebGl2(a){return a?(console.log("WebGl2 is enabled"),!0):(typeof WebGL2RenderingContext<"u"||console.log("WebGL2 is not supported"),!1)}async function detectGPUVendor(a){let n;if(a&&(n=a.getExtension("WEBGL_debug_renderer_info"),n)){const s=a.getParameter(n.UNMASKED_VENDOR_WEBGL);return s.indexOf("(")>-1&&s.indexOf(")")>-1?s.substring(s.indexOf("(")+1,s.indexOf(")")):s}return null}async function detectGPUVendor_v0(a){if(a){const n=a.getExtension("WEBGL_debug_renderer_info");return n?a.getParameter(n.UNMASKED_VENDOR_WEBGL):null}else return null}async function detectGPUCardType_v0(a){if(a){if(detectBrowser()==="Firefox")return a.getParameter(a.RENDERER);const n=a.getExtension("WEBGL_debug_renderer_info");return n?a.getParameter(n.UNMASKED_RENDERER_WEBGL):null}else return null}async function detectGPUCardType(a){let n;if(a){if(detectBrowser()==="Firefox")return a.getParameter(a.RENDERER);if(n=a.getExtension("WEBGL_debug_renderer_info"),n){let s=a.getParameter(n.UNMASKED_RENDERER_WEBGL);return s.indexOf("(")>-1&&s.indexOf(")")>-1&&s.indexOf("(R)")===-1&&(s=s.substring(s.indexOf("(")+1,s.indexOf(")")),s.split(",").length===3)?s.split(",")[1].trim():s}}return null}async function getCPUNumCores(){return navigator.hardwareConcurrency}async function isChrome(){return/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor)}async function localSystemDetails(a,n=null){const s=new Date;if(a.isModelFullVol?a.Brainchop_Ver="FullVolume":a.Brainchop_Ver="SubVolumes",a.Total_t=(Date.now()-a.startTime)/1e3,delete a.startTime,a.Date=parseInt(s.getMonth()+1)+"/"+s.getDate()+"/"+s.getFullYear(),a.Browser=await detectBrowser(),a.Browser_Ver=await detectBrowserVersion(),a.OS=await detectOperatingSys(),a.WebGL2=await checkWebGl2(n),a.GPU_Vendor=await detectGPUVendor(n),a.GPU_Card=await detectGPUCardType(n),a.GPU_Vendor_Full=await detectGPUVendor_v0(n),a.GPU_Card_Full=await detectGPUCardType_v0(n),a.CPU_Cores=await getCPUNumCores(),a.Which_Brainchop="latest",await isChrome()&&(a.Heap_Size_MB=window.performance.memory.totalJSHeapSize/(1024*1024).toFixed(2),a.Used_Heap_MB=window.performance.memory.usedJSHeapSize/(1024*1024).toFixed(2),a.Heap_Limit_MB=window.performance.memory.jsHeapSizeLimit/(1024*1024).toFixed(2)),n){console.log("MAX_TEXTURE_SIZE :",n.getParameter(n.MAX_TEXTURE_SIZE)),console.log("MAX_RENDERBUFFER_SIZE :",n.getParameter(n.MAX_RENDERBUFFER_SIZE));const u=n.getExtension("WEBGL_debug_renderer_info");console.log("VENDOR WEBGL:",n.getParameter(u.UNMASKED_VENDOR_WEBGL)),a.Texture_Size=n.getParameter(n.MAX_TEXTURE_SIZE)}else a.Texture_Size=null;return a}function WorkerWrapper(a){return new Worker(""+new URL("brainchop-webworker-CBK43U_A.js",import.meta.url).href,{name:a==null?void 0:a.name})}async function main(){smoothCheck.onchange=function(){I.setInterpolation(!smoothCheck.checked)},aboutBtn.onclick=function(){window.alert("BrainChop models https://github.com/neuroneural/brainchop")},diagnosticsBtn.onclick=function(){if(y.length<1){window.alert("No diagnostic string generated: run a model to create diagnostics");return}navigator.clipboard.writeText(y),window.alert(`Diagnostics copied to clipboard -`+y)},opacitySlider0.oninput=function(){I.setOpacity(0,opacitySlider0.value/255),I.updateGLVolume()},opacitySlider1.oninput=function(){I.setOpacity(1,opacitySlider1.value/255)};async function a(){let E=I.volumes[0],e=E.dims[1]===256&&E.dims[2]===256&&E.dims[3]===256;if((E.permRAS[0]!==-1||E.permRAS[1]!==3||E.permRAS[2]!==-2)&&(e=!1),e)return;let F=await I.conform(E,!1);I.removeVolume(I.volumes[0]),I.addVolume(F)}async function n(){for(;I.volumes.length>1;)await I.removeVolume(I.volumes[1])}modelSelect.onchange=async function(){this.selectedIndex<0&&(modelSelect.selectedIndex=11),await n(),await a();let E=inferenceModelsList[this.selectedIndex],e=brainChopOpts;if(e.rootURL=location.href,!!(window.location.hostname==="localhost"||window.location.hostname==="[::1]"||window.location.hostname.match(/^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/))&&(e.rootURL=location.protocol+"//"+location.host),workerCheck.checked){if(typeof T<"u"){console.log("Unable to start new segmentation: previous call has not completed");return}T=await new WorkerWrapper({type:"module"});let M={datatypeCode:I.volumes[0].hdr.datatypeCode,dims:I.volumes[0].hdr.dims},L={opts:e,modelEntry:E,niftiHeader:M,niftiImage:I.volumes[0].img};T.postMessage(L),T.onmessage=function(V){let B=V.data.cmd;B==="ui"&&(V.data.modalMessage!==""&&(T.terminate(),T=void 0),m(V.data.message,V.data.progressFrac,V.data.modalMessage,V.data.statData)),B==="img"&&(T.terminate(),T=void 0,u(V.data.img,V.data.opts,V.data.modelEntry))}}else runInference(e,E,I.volumes[0].hdr,I.volumes[0].img,u,m)},saveBtn.onclick=function(){I.volumes[1].saveToDisk("Custom.nii")},sceneBtn.onclick=function(){I.saveDocument("niivue.brainchop.nvd")},workerCheck.onchange=function(){modelSelect.onchange()},clipCheck.onchange=function(){clipCheck.checked?I.setClipPlane([0,0,90]):I.setClipPlane([2,0,90])};async function s(E){return await(await fetch(E)).json()}async function u(E,e,F){n();let M=await I.volumes[0].clone();if(M.zeroImage(),M.hdr.scl_inter=0,M.hdr.scl_slope=1,M.img=new Uint8Array(E),F.colormapPath){let L=await s(F.colormapPath);M.setColormapLabel(L),M.hdr.intent_code=1002}else{let L=e.atlasSelectedColorTable.toLowerCase();I.colormaps().includes(L)||(L="actc"),M.colormap=L}M.opacity=opacitySlider1.value/255,await I.addVolume(M)}async function f(E){(typeof E=="string"||E instanceof String)&&(E=function(M){const L=JSON.parse(M),V=[];for(const B in L)V[B]=L[B];return V}(E)),E=await localSystemDetails(E,I.gl),y=`:: Diagnostics can help resolve issues https://github.com/neuroneural/brainchop/issues :: -`;for(var e in E)y+=e+": "+E[e]+` -`}function m(E="",e=-1,F="",M=[]){E!==""&&(console.log(E),document.getElementById("location").innerHTML=E),isNaN(e)?(memstatus.style.color="red",memstatus.innerHTML="Memory Issue"):e>=0&&(modelProgress.value=e*modelProgress.max),F!==""&&window.alert(F),Object.keys(M).length>0&&f(M)}function A(E){document.getElementById("location").innerHTML="  "+E.string}let v={backColor:[.4,.4,.4,1],show3Dcrosshair:!0,onLocationChange:A};var y="",T;let I=new Niivue(v);I.attachToCanvas(gl1),I.opts.dragMode=I.dragModes.pan,I.opts.multiplanarForceRender=!0,I.opts.yoke3Dto2DZoom=!0,I.opts.crosshairGap=11,smoothCheck.onchange(),await I.loadVolumes([{url:"./t1_crop.nii.gz"}]);for(let E=0;E=6){const e=this.idx(u,f,m-1,A);E===n[e]&&(T[S++]=s[e])}if(v>=18){if(u){const e=this.idx(u-1,f,m-1,A);E===n[e]&&(T[S++]=s[e])}if(f){const e=this.idx(u,f-1,m-1,A);E===n[e]&&(T[S++]=s[e])}if(u=6){if(F){const V=this.idx(F-1,e,E,s);L===n[V]&&(S[M++]=I[V])}if(e){const V=this.idx(F,e-1,E,s);L===n[V]&&(S[M++]=I[V])}}if(u>=18){if(e&&F){const V=this.idx(F-1,e-1,E,s);L===n[V]&&(S[M++]=I[V])}if(e&&F=y){y+=v;const V=new Uint32Array(y);V.set(T),T=V}T[A-1]=A,A++}}}for(let E=0;E.",inferenceDelay:100,description:"Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides the best accuracy with hard cropping for better speed"},{id:3,type:"Segmentation",path:"/models/model20chan3cls/model.json",modelName:"🔪 Tissue GWM (High Acc, Low Mem)",labelsPath:"./models/model20chan3cls/labels.json",colorsPath:"./models/model20chan3cls/colorLUT.json",colormapPath:"./models/model20chan3cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Gray and white matter segmentation model. Operates on full T1 image in a single pass but needs a dedicated graphics card to operate. Provides high accuracy and fit low memory available but slower"},{id:4,type:"Atlas",path:"/models/model30chan18cls/model.json",modelName:"🪓 Subcortical + GWM (High Mem, Fast)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",colormapPath:"./models/model30chan18cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:5,type:"Atlas",path:"/models/model30chan18cls/model.json",modelName:"🪓 Subcortical + GWM (Low Mem, Slow)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",colormapPath:"./models/model30chan18cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:6,type:"Atlas",path:"/models/model18cls/model.json",modelName:"🪓 Subcortical + GWM (Low Mem, Faster)",labelsPath:"./models/model18cls/labels.json",colorsPath:"./models/model18cls/colorLUT.json",colormapPath:"./models/model18cls/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:.2,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is a robust model able to handle range of data quality, including varying saturation, and even clinical scans. It may work on infant brains, but your mileage may vary."},{id:7,type:"Atlas",path:"/models/model30chan18cls/model.json",modelName:"🔪🪓 Subcortical + GWM (Failsafe, Less Acc)",labelsPath:"./models/model30chan18cls/labels.json",colorsPath:"./models/model30chan18cls/colorLUT.json",colormapPath:"./models/model30chan18cls/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Parcellation of the brain into 17 regions: gray and white matter plus subcortical areas. This is not a robust model, it may work on low data quality, including varying saturation, and even clinical scans. It may work also on infant brains, but your mileage may vary."},{id:8,type:"Atlas",path:"/models/model30chan50cls/model.json",modelName:"🔪 Aparc+Aseg 50 (High Mem, Fast)",labelsPath:"./models/model30chan50cls/labels.json",colorsPath:"./models/model30chan50cls/colorLUT.json",colormapPath:"./models/model30chan50cls/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class."},{id:9,type:"Atlas",path:"/models/model30chan50cls/model.json",modelName:"🔪 Aparc+Aseg 50 (Low Mem, Slow)",labelsPath:"./models/model30chan50cls/labels.json",colorsPath:"./models/model30chan50cls/colorLUT.json",colormapPath:"./models/model30chan50cls/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This is a 50-class model, that segments the brain into the Aparc+Aseg Freesurfer Atlas but one where cortical homologues are merged into a single class. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time."},{id:10,type:"Brain_Extraction",path:"/models/model5_gw_ae/model.json",modelName:"⚡ Extract the Brain (FAST)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:18,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:null,inferenceDelay:100,description:"Extract the brain fast model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than the failsafe version."},{id:11,type:"Brain_Extraction",path:"/models/model11_gw_ae/model.json",modelName:"🔪 Extract the Brain (High Acc, Slow)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"Extract the brain high accuracy model operates on full T1 image in a single pass, but uses only 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than the fast version."},{id:12,type:"Brain_Masking",path:"/models/model5_gw_ae/model.json",modelName:"⚡ Brain Mask (FAST)",labelsPath:null,colorsPath:null,colormapPath:"./models/model5_gw_ae/colormap.json",preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:17,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:null,inferenceDelay:100,description:"This fast masking model operates on full T1 image in a single pass, but uses only 5 filters per layer. Can work on integrated graphics cards but is barely large enough to provide good accuracy. Still more accurate than failsafe version."},{id:13,type:"Brain_Masking",path:"/models/model11_gw_ae/model.json",modelName:"🔪 Brain Mask (High Acc, Low Mem)",labelsPath:null,colorsPath:null,preModelId:null,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:0,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!0,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"This masking model operates on full T1 image in a single pass, but uses 11 filters per layer. Can work on dedicated graphics cards. Still more accurate than fast version."},{id:14,type:"Atlas",path:"/models/model21_104class/model.json",modelName:"🔪 Aparc+Aseg 104 (High Mem, Fast)",labelsPath:"./models/model21_104class/labels.json",colorsPath:"./models/model21_104class/colorLUT.json",colormapPath:"./models/model21_104class/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!1,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions."},{id:15,type:"Atlas",path:"/models/model21_104class/model.json",modelName:"🔪 Aparc+Aseg 104 (Low Mem, Slow)",labelsPath:"./models/model21_104class/labels.json",colorsPath:"./models/model21_104class/colorLUT.json",colormapPath:"./models/model21_104class/colormap.json",preModelId:1,preModelPostProcess:!1,isBatchOverlapEnable:!1,numOverlapBatches:200,enableTranspose:!0,enableCrop:!0,cropPadding:0,autoThreshold:0,enableQuantileNorm:!1,filterOutWithPreMask:!1,enableSeqConv:!0,textureSize:0,warning:"This model may need dedicated graphics card. For more info please check with Browser Resources .",inferenceDelay:100,description:"FreeSurfer aparc+aseg atlas 104 parcellate brain areas into 104 regions. It contains a combination of the Desikan-Killiany atlas for cortical area and also segmentation of subcortical regions. The model use sequential convolution for inference to overcome browser memory limitations but leads to longer computation time. "}];async function getModelNumParameters(a){let n=0;for(let s=0;se-F);const m=tensor1d(f),A=m.shape[0],v=Math.floor(A*n),y=Math.ceil(A*s)-1,T=m.slice(v,1),I=m.slice(y,1),S=(await T.array())[0],E=(await I.array())[0];return u.dispose(),m.dispose(),T.dispose(),I.dispose(),{qmin:S,qmax:E}}async function quantileNormalizeVolumeData(a,n=.05,s=.95){const{qmin:u,qmax:f}=await calculateQuantiles(a,n,s),m=scalar(u),A=scalar(f),v=a.sub(m).div(A.sub(m));return m.dispose(),A.dispose(),v}async function minMaxNormalizeVolumeData(a){const n=a.max(),s=a.min();return await a.sub(s).div(n.sub(s))}async function inferenceFullVolumeSeqCovLayer(a,n,s,u,f,m,A){window.alert("inferenceFullVolumeSeqCovLayer() is not dead code?")}async function inferenceFullVolume(a,n,s,u,f,m,A){window.alert("inferenceFullVolume() is not dead code?")}async function inferenceSubVolumes(a,n,s,u,f,m=null){window.alert("inferenceSubVolumes() is not dead code?")}async function tensor2LightBuffer(a,n){window.alert("tensor2LightBuffer() is not dead code?")}async function draw3dObjBoundingVolume(a){window.alert("draw3dObjBoundingVolume() is not dead code?")}async function argMaxLarge(a,n,s,u,f,m="float32"){window.alert("argMaxLarge() is not dead code?")}async function addZeroPaddingTo3dTensor(a,n=[1,1],s=[1,1],u=[1,1]){if(a.rank!==3)throw new Error("Tensor must be 3D");return a.pad([n,s,u])}async function removeZeroPaddingFrom3dTensor(a,n=1,s=1,u=1){if(a.rank!==3)throw new Error("Tensor must be 3D");let f,m,A;return[f,m,A]=a.shape,a.slice([n,s,u],[f-2*n,m-2*s,A-2*u])}async function resizeWithZeroPadding(a,n,s,u,f,m){const A=f[0],v=f[1],y=f[2],T=A+m[0]-1,I=v+m[1]-1,S=y+m[2]-1,E=s-T-1>0?s-T-1:0,e=u-I-1>0?u-I-1:0,F=n-S-1>0?n-S-1:0;return a.pad([[A,E],[v,e],[y,F]])}async function applyMriThreshold(a,n){const s=a.max(),u=s.mul(n),f=await u.data();return s.dispose(),u.dispose(),tidy(()=>a.clone().greater(f[0]))}async function binarizeVolumeDataTensor(a){return a.step(0)}async function generateBrainMask(a,n,s,u,f,m,A,v,y=!0){console.log("Generate Brain Masking ... ");let T=[];for(let F=0;F{const F="postProcessSlices3D() should be upgraded to BWLabeler";A(F,-1,F)}),console.log("Post processing done ")):console.log("Phase-1 Post processing disabled ... ");const S=new Array(T[0].length*T.length);let E=0;for(let F=0;F{const z=a.slice([0,0,0,0,L],[-1,-1,-1,-1,V-L]),b=n.slice([0,0,0,L,I],[-1,-1,-1,V-L,1]);return conv3d(z,b,u,f,"NDHWC",m)});if(e===null)e=B;else{const z=e.add(B);e.dispose(),B.dispose(),e=z}}}const F=e.add(E);if(e.dispose(),E.dispose(),T==null)T=F;else{const M=await concat$2([T,F],4);F.dispose(),T.dispose(),T=M}}return T}function processTensorInChunks(a,n,s){const A=a.shape[4],v=Math.ceil(A/s);let y=null;for(let T=0;Ta.slice([0,0,0,0,I],[-1,-1,-1,-1,E])),F=tidy(()=>n.slice([0,0,0,I,0],[-1,-1,-1,E,-1])),M=conv3d(e,F,1,0,"NDHWC",1);e.dispose(),F.dispose();const L=squeeze(M);if(M.dispose(),y===null)y=L;else{const V=y.add(L);y.dispose(),y!==L&&L.dispose(),y=V}tidy(()=>{matMul$1(zeros$1([1,1]),zeros$1([1,1]))})}return y}class SequentialConvLayer{constructor(n,s,u,f){this.model=n,this.outChannels=n.outputLayers[0].kernel.shape[4],this.chunkSize=s,this.isChannelLast=u,this.callbackUI=f}async apply(n){const s=ENV$4.get("WEBGL_DELETE_TEXTURE_THRESHOLD");ENV$4.set("WEBGL_DELETE_TEXTURE_THRESHOLD",0);const u=this;return new Promise(f=>{const m=performance.now(),A=u.model.layers[u.model.layers.length-1],v=A.getWeights()[0],y=A.getWeights()[1],T=u.isChannelLast?n.shape.slice(1,-1):n.shape.slice(2);let I=mul(ones(T),-1e4),S=zeros$1(T),E=0;console.log(" channel loop");const e=window.setInterval(async function(){engine().startScope(),console.log("=======================");const F=await memory();console.log(`| Number of Tensors: ${F.numTensors}`),console.log(`| Number of Data Buffers: ${F.numDataBuffers}`),console.log("Channel : ",E);const M=await tidy(()=>{const V=v.slice([0,0,0,0,E],[-1,-1,-1,-1,1]),B=y.slice([E],[1]),z=processTensorInChunks(n,V,Math.min(u.chunkSize,u.outChannels)).add(B),b=greater$2(z,I),W=where(b,z,I),q=where(b,fill$2(S.shape,E),S);return dispose([I,S,V,B,z,b]),tidy(()=>matMul$1(ones([1,1]),ones([1,1]))),[q,W]});console.log("=======================");const L=await memory();if(u.callbackUI(`Iteration ${E}`,E/u.outChannels),console.log(`Number of Tensors: ${L.numTensors}`),console.log(`Number of Data Buffers: ${L.numDataBuffers}`),console.log(`Megabytes In Use: ${(L.numBytes/1048576).toFixed(3)} MB`),L.unreliable&&console.log(`Unreliable: ${L.unreliable}`),typeof S<"u"&&S.dispose(),typeof I<"u"&&I.dispose(),S=keep(M[0]),I=keep(M[1]),engine().endScope(),E===u.outChannels-1){window.clearInterval(e),dispose(I);const B=performance.now()-m;console.log(`Execution time for output layer: ${B} milliseconds`),ENV$4.set("WEBGL_DELETE_TEXTURE_THRESHOLD",s),f(S)}else{E++;const V=S.shape,B=S.dataSync(),z=S.shape,b=I.dataSync();S.dispose(),I.dispose(),S=tensor(B,V),I=tensor(b,z)}await new Promise(V=>setTimeout(V,300))},0)})}}async function generateOutputSlicesV2(a,n,s,u,f,m,A,v,y,T){if(y.isPostProcessEnable){const E=new BWLabeler,e=new Uint32Array(n),F=26,M=!0,L=!0,[V,B]=E.bwlabel(a,e,F,M,L);for(let z=0;z0&&re<=1?e=await applyMriThreshold(u,re):(console.log("No valid crop threshold value"),e=await u.greater([0]).asType("bool"))}else e=await v.greater([0]).asType("bool");console.log(" mask_3d shape : ",e.shape);const F=await whereAsync(e);e.dispose();const M=F.arraySync();let L=m,V=0,B=A,z=0,b=f,W=0;for(let re=0;reM[re][0]?L=M[re][0]:VM[re][1]?B=M[re][1]:zM[re][2]?b=M[re][2]:Where'),memory().unreliable){const ae="unreliable reasons :"+memory().reasons;y(ae,NaN,ae)}}}async function inferenceFullVolumePhase2(a,n,s,u,f,m,A,v,y,T,I,S){let E=[];console.log(" ---- Start FullVolume inference phase-II ---- "),A.enableQuantileNorm?(console.log("preModel Quantile normalization enabled"),n=await quantileNormalizeVolumeData(n)):(console.log("preModel Min Max normalization enabled"),n=await minMaxNormalizeVolumeData(n));let F;if(m==null){const pe=A.autoThreshold;pe>0&&pe<=1?F=await applyMriThreshold(n,pe):(console.log("No valid crop threshold value"),F=await n.greater([0]).asType("bool"))}else F=m.greater([0]).asType("bool");console.log(" mask_3d shape : ",F.shape);const M=await whereAsync(F);F.dispose();const L=M.arraySync();let V=u,B=0,z=f,b=0,W=s,q=0;for(let pe=0;peL[pe][0]?V=L[pe][0]:BL[pe][1]?z=L[pe][1]:bL[pe][2]?W=L[pe][2]:qhere')}}async function inferenceFullVolumePhase1(a,n,s,u,f,m,A,v,y,T,I,S){if(v.No_SubVolumes=1,A.preModelId){const E=await load_model(y.rootURL+inferenceModelsList[A.preModelId-1].path),e=inferenceModelsList[A.preModelId-1].enableTranspose,F=inferenceModelsList[A.preModelId-1].enableQuantileNorm;let M=null;F?(console.log("preModel Quantile normalization enabled"),M=await quantileNormalizeVolumeData(n)):(console.log("preModel Min Max normalization enabled"),M=await minMaxNormalizeVolumeData(n)),e?(M=await M.transpose(),console.log("Input transposed for pre-model")):console.log("Transpose not enabled for pre-model"),v.Brainchop_Ver="PreModel_FV";const L=await E;try{const V=performance.now(),B=L,z=B.layers[0].batchInputShape;if(console.log(" Pre-Model batch input shape : ",z),z.length!==5){const Se="The pre-model input shape must be 5D ";return I(Se,-1,Se),0}const b=isModelChnlLast(B),W=y.batchSize,q=y.numOfChan;let Y,X,H,g;if(b){if(console.log("Pre-Model Channel Last"),isNaN(z[4])||z[4]!==1){const Se="The number of channels for pre-model input shape must be 1";return I(Se,-1,Se),0}Y=z[1],X=z[2],H=z[3],g=[W,Y,X,H,q]}else{if(console.log("Pre-Model Channel First"),isNaN(z[1])||z[1]!==1){const Se="The number of channels for pre-model input shape must be 1";return I(Se,-1,Se),0}Y=z[2],X=z[3],H=z[4],g=[W,q,Y,X,H]}v.Input_Shape=JSON.stringify(g),v.Output_Shape=JSON.stringify(B.output.shape),v.Channel_Last=b,v.Model_Param=await getModelNumParameters(B),v.Model_Layers=await getModelNumLayers(B);let J=0;const re=inferenceModelsList[A.preModelId-1].inferenceDelay;let ae=1;const fe=L.layers.length,pe=[];pe[0]=M.reshape(g),dispose(M);const me=window.setInterval(async function(){try{pe[ae]=L.layers[ae].apply(pe[ae-1])}catch(Se){const ve="Your graphics card (e.g. Intel) may not be compatible with WebGL. "+Se.message;return I(ve,-1,ve),window.clearInterval(me),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=Se.message,v.Extra_Err_Info="PreModel Failed while model layer "+ae+" apply",I("",-1,"",v),0}if(L.layers[ae].dispose(),pe[ae-1].dispose(),I("Layer "+ae.toString(),(ae+1)/fe),memory().unreliable){const Se="unreliable reasons :"+memory().reasons;I(Se,NaN,Se)}if(ae===fe-1){window.clearInterval(me);const Se=b?-1:1;console.log(" find argmax "),console.log("last Tensor shape : ",pe[ae].shape);const ve=b?pe[ae].shape[4]:pe[ae].shape[1];let we;try{console.log(" Try tf.argMax for fullVolume .."),we=await argMax$2(pe[ae],Se)}catch(pt){if(Se===-1)try{const It=performance.now();console.log(" tf.argMax failed .. try argMaxLarge ..");const Lt=tensor2LightBuffer(pe[ae].reshape([s,u,f,ve]),"float16");we=argMaxLarge(Lt,s,u,f,ve,"float16"),console.log("argMaxLarge for fullVolume takes : ",((performance.now()-It)/1e3).toFixed(4))}catch(It){const Lt="argMax buffer couldn't be created due to limited memory resources.";return I(Lt,-1,Lt),we.dispose(),window.clearInterval(me),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=It.message,v.Extra_Err_Info="preModel prediction_argmax from argMaxLarge failed",I("",-1,"",v),0}else{const It="argMax buffer couldn't be created due to limited memory resources.";return I(It,-1,It),we.dispose(),window.clearInterval(me),engine().endScope(),engine().disposeVariables(),v.Inference_t=1/0,v.Postprocess_t=1/0,v.Status="Fail",v.Error_Type=pt.message,v.Extra_Err_Info="preModel prediction_argmax from argMaxLarge not support yet channel first",I("",-1,"",v),0}}console.log(" Pre-model prediction_argmax shape : ",we.shape);const Ne=((performance.now()-V)/1e3).toFixed(4);dispose(pe[ae]),console.log(" Pre-model find array max ");const De=await we.max().dataSync()[0];Jhere')}}else console.log("--- No pre-model is selected ---"),console.log("------ Run voxel cropping ------"),m?A.enableSeqConv?(console.log("------ Seq Convoluton ------"),await inferenceFullVolumeSeqCovLayerPhase2(y,A,a,n,s,u,f,null,I,T,v,S)):inferenceFullVolumePhase2(a,n,s,u,f,null,A,v,y,T,I,S):inferenceSubVolumes(a,n,s,u,f,null)}async function enableProductionMode(a=!0){await enableProdMode(),env().set("DEBUG",!1),env().set("WEBGL_FORCE_F16_TEXTURES",a),env().set("WEBGL_DELETE_TEXTURE_THRESHOLD",0),await ready(),console.log("tf env() flags :",env().flags),console.log("tf env() features :",env().features),console.log("tf env total features: ",Object.keys(env().features).length),console.log(getBackend())}async function runInference(a,n,s,u,f,m){const A=[];A.startTime=Date.now(),m("Segmentation started",0),performance.now();const v=a.batchSize,y=a.numOfChan;if(isNaN(v)||v!==1){const H="The batch Size for input shape must be 1";return m(H,-1,H),0}if(isNaN(y)||y!==1){const H="The number of channels for input shape must be 1";return m(H,-1,H),0}engine().startScope(),console.log("Batch size: ",v),console.log("Num of Channels: ",y);const T=await load_model(a.rootURL+n.path);await enableProductionMode(!0),A.TF_Backend=getBackend();const I=T;let S=[];if(S=I.layers[0].batchInputShape,console.log(" Model batch input shape : ",S),S.length!==5){const H="The model input shape must be 5D";return m(H,-1,H),0}let E,e,F;const M=s.dims[1],L=s.dims[2],V=s.dims[3];if(await isModelChnlLast(I)){if(console.log("Model Channel Last"),isNaN(S[4])||S[4]!==1){const H="The number of channels for input shape must be 1";return m(H,-1,H),0}E=S[1],e=S[2],F=S[3]}else{if(console.log("Model Channel First"),isNaN(S[1])||S[1]!==1){const H="The number of channels for input shape must be 1";return m(H,-1,H),0}E=S[2],e=S[3],F=S[4]}let z;E===256&&e===256&&F===256?z=!0:z=!1,A.isModelFullVol=z;let b=await getAllSlicesData1D(V,s,u);const W=await getAllSlices2D(b,L,M);b=null;let q=await getSlices3D(W);dispose(W);const Y=n.enableTranspose,X=n.enableCrop;z&&(X?await inferenceFullVolumePhase1(T,q,V,L,M,z,n,A,a,f,m,u):(console.log("Cropping Disabled"),Y?(q=q.transpose(),console.log("Input transposed")):console.log("Transpose NOT Enabled"),n.enableSeqConv?(console.log("Seq Convoluton Enabled"),await inferenceFullVolumeSeqCovLayer()):(console.log("Seq Convoluton Disabled"),await inferenceFullVolume())))}async function detectBrowser(){return navigator.userAgent.indexOf("OPR/")>-1?"Opera":navigator.userAgent.indexOf("Edg/")>-1?"Edge":navigator.userAgent.indexOf("Falkon/")>-1?"Falkon":navigator.userAgent.indexOf("Chrome/")>-1?"Chrome":navigator.userAgent.indexOf("Firefox/")>-1?"Firefox":navigator.userAgent.indexOf("Safari/")>-1?"Safari":navigator.userAgent.indexOf("MSIE/")>-1||navigator.userAgent.indexOf("rv:")>-1?"IExplorer":"Unknown"}async function detectBrowserVersion(){return navigator.userAgent.indexOf("OPR/")>-1?parseInt(navigator.userAgent.split("OPR/")[1]):navigator.userAgent.indexOf("Edg/")>-1?parseInt(navigator.userAgent.split("Edg/")[1]):navigator.userAgent.indexOf("Falkon/")>-1?parseInt(navigator.userAgent.split("Falkon/")[1]):navigator.userAgent.indexOf("Chrome/")>-1?parseInt(navigator.userAgent.split("Chrome/")[1]):navigator.userAgent.indexOf("Firefox/")>-1?parseInt(navigator.userAgent.split("Firefox/")[1]):navigator.userAgent.indexOf("Safari/")>-1?parseInt(navigator.userAgent.split("Safari/")[1]):navigator.userAgent.indexOf("MSIE/")>-1||navigator.userAgent.indexOf("rv:")>-1?parseInt(navigator.userAgent.split("MSIE/")[1]):1/0}async function detectOperatingSys(){return navigator.userAgent.indexOf("Win")>-1?"Windows":navigator.userAgent.indexOf("Mac")>-1?"MacOS":navigator.userAgent.indexOf("Linux")>-1?"Linux":navigator.userAgent.indexOf("UNIX")>-1?"UNIX":"Unknown"}async function checkWebGl2(a){return a?(console.log("WebGl2 is enabled"),!0):(typeof WebGL2RenderingContext<"u"||console.log("WebGL2 is not supported"),!1)}async function detectGPUVendor(a){let n;if(a&&(n=a.getExtension("WEBGL_debug_renderer_info"),n)){const s=a.getParameter(n.UNMASKED_VENDOR_WEBGL);return s.indexOf("(")>-1&&s.indexOf(")")>-1?s.substring(s.indexOf("(")+1,s.indexOf(")")):s}return null}async function detectGPUVendor_v0(a){if(a){const n=a.getExtension("WEBGL_debug_renderer_info");return n?a.getParameter(n.UNMASKED_VENDOR_WEBGL):null}else return null}async function detectGPUCardType_v0(a){if(a){if(detectBrowser()==="Firefox")return a.getParameter(a.RENDERER);const n=a.getExtension("WEBGL_debug_renderer_info");return n?a.getParameter(n.UNMASKED_RENDERER_WEBGL):null}else return null}async function detectGPUCardType(a){let n;if(a){if(detectBrowser()==="Firefox")return a.getParameter(a.RENDERER);if(n=a.getExtension("WEBGL_debug_renderer_info"),n){let s=a.getParameter(n.UNMASKED_RENDERER_WEBGL);return s.indexOf("(")>-1&&s.indexOf(")")>-1&&s.indexOf("(R)")===-1&&(s=s.substring(s.indexOf("(")+1,s.indexOf(")")),s.split(",").length===3)?s.split(",")[1].trim():s}}return null}async function getCPUNumCores(){return navigator.hardwareConcurrency}async function isChrome(){return/Chrome/.test(navigator.userAgent)&&/Google Inc/.test(navigator.vendor)}async function localSystemDetails(a,n=null){const s=new Date;if(a.isModelFullVol?a.Brainchop_Ver="FullVolume":a.Brainchop_Ver="SubVolumes",a.Total_t=(Date.now()-a.startTime)/1e3,delete a.startTime,a.Date=parseInt(s.getMonth()+1)+"/"+s.getDate()+"/"+s.getFullYear(),a.Browser=await detectBrowser(),a.Browser_Ver=await detectBrowserVersion(),a.OS=await detectOperatingSys(),a.WebGL2=await checkWebGl2(n),a.GPU_Vendor=await detectGPUVendor(n),a.GPU_Card=await detectGPUCardType(n),a.GPU_Vendor_Full=await detectGPUVendor_v0(n),a.GPU_Card_Full=await detectGPUCardType_v0(n),a.CPU_Cores=await getCPUNumCores(),a.Which_Brainchop="latest",await isChrome()&&(a.Heap_Size_MB=window.performance.memory.totalJSHeapSize/(1024*1024).toFixed(2),a.Used_Heap_MB=window.performance.memory.usedJSHeapSize/(1024*1024).toFixed(2),a.Heap_Limit_MB=window.performance.memory.jsHeapSizeLimit/(1024*1024).toFixed(2)),n){console.log("MAX_TEXTURE_SIZE :",n.getParameter(n.MAX_TEXTURE_SIZE)),console.log("MAX_RENDERBUFFER_SIZE :",n.getParameter(n.MAX_RENDERBUFFER_SIZE));const u=n.getExtension("WEBGL_debug_renderer_info");console.log("VENDOR WEBGL:",n.getParameter(u.UNMASKED_VENDOR_WEBGL)),a.Texture_Size=n.getParameter(n.MAX_TEXTURE_SIZE)}else a.Texture_Size=null;return a}function WorkerWrapper(a){return new Worker(""+new URL("brainchop-webworker-CBK43U_A.js",import.meta.url).href,{name:a==null?void 0:a.name})}async function main(){smoothCheck.onchange=function(){S.setInterpolation(!smoothCheck.checked)},aboutBtn.onclick=function(){window.alert("BrainChop models https://github.com/neuroneural/brainchop")},diagnosticsBtn.onclick=function(){if(T.length<1){window.alert("No diagnostic string generated: run a model to create diagnostics");return}navigator.clipboard.writeText(T),window.alert(`Diagnostics copied to clipboard +`+T)},opacitySlider0.oninput=function(){S.setOpacity(0,opacitySlider0.value/255),S.updateGLVolume()},opacitySlider1.oninput=function(){S.setOpacity(1,opacitySlider1.value/255)};async function a(){let e=S.volumes[0],F=e.dims[1]===256&&e.dims[2]===256&&e.dims[3]===256;if((e.permRAS[0]!==-1||e.permRAS[1]!==3||e.permRAS[2]!==-2)&&(F=!1),F)return;let M=await S.conform(e,!1);S.removeVolume(S.volumes[0]),S.addVolume(M)}async function n(){for(;S.volumes.length>1;)await S.removeVolume(S.volumes[1])}modelSelect.onchange=async function(){this.selectedIndex<0&&(modelSelect.selectedIndex=11),await n(),await a();let e=inferenceModelsList[this.selectedIndex],F=brainChopOpts;if(F.rootURL=location.href,!!(window.location.hostname==="localhost"||window.location.hostname==="[::1]"||window.location.hostname.match(/^127(?:\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}$/))&&(F.rootURL=location.protocol+"//"+location.host),workerCheck.checked){if(typeof I<"u"){console.log("Unable to start new segmentation: previous call has not completed");return}I=await new WorkerWrapper({type:"module"});let L={datatypeCode:S.volumes[0].hdr.datatypeCode,dims:S.volumes[0].hdr.dims},V={opts:F,modelEntry:e,niftiHeader:L,niftiImage:S.volumes[0].img};I.postMessage(V),I.onmessage=function(B){let z=B.data.cmd;z==="ui"&&(B.data.modalMessage!==""&&(I.terminate(),I=void 0),A(B.data.message,B.data.progressFrac,B.data.modalMessage,B.data.statData)),z==="img"&&(I.terminate(),I=void 0,f(B.data.img,B.data.opts,B.data.modelEntry))}}else runInference(F,e,S.volumes[0].hdr,S.volumes[0].img,f,A)},saveBtn.onclick=function(){S.volumes[1].saveToDisk("Custom.nii")},sceneBtn.onclick=function(){S.saveDocument("niivue.brainchop.nvd")},workerCheck.onchange=function(){modelSelect.onchange()},clipCheck.onchange=function(){clipCheck.checked?S.setClipPlane([0,0,90]):S.setClipPlane([2,0,90])};function s(){opacitySlider0.oninput()}async function u(e){return await(await fetch(e)).json()}async function f(e,F,M){n();let L=await S.volumes[0].clone();if(L.zeroImage(),L.hdr.scl_inter=0,L.hdr.scl_slope=1,L.img=new Uint8Array(e),M.colormapPath){let V=await u(M.colormapPath);L.setColormapLabel(V),L.hdr.intent_code=1002}else{let V=F.atlasSelectedColorTable.toLowerCase();S.colormaps().includes(V)||(V="actc"),L.colormap=V}L.opacity=opacitySlider1.value/255,await S.addVolume(L)}async function m(e){(typeof e=="string"||e instanceof String)&&(e=function(L){const V=JSON.parse(L),B=[];for(const z in V)B[z]=V[z];return B}(e)),e=await localSystemDetails(e,S.gl),T=`:: Diagnostics can help resolve issues https://github.com/neuroneural/brainchop/issues :: +`;for(var F in e)T+=F+": "+e[F]+` +`}function A(e="",F=-1,M="",L=[]){e!==""&&(console.log(e),document.getElementById("location").innerHTML=e),isNaN(F)?(memstatus.style.color="red",memstatus.innerHTML="Memory Issue"):F>=0&&(modelProgress.value=F*modelProgress.max),M!==""&&window.alert(M),Object.keys(L).length>0&&m(L)}function v(e){document.getElementById("location").innerHTML="  "+e.string}let y={backColor:[.4,.4,.4,1],show3Dcrosshair:!0,onLocationChange:v};var T="",I;let S=new Niivue(y);S.attachToCanvas(gl1),S.opts.dragMode=S.dragModes.pan,S.opts.multiplanarForceRender=!0,S.opts.yoke3Dto2DZoom=!0,S.opts.crosshairGap=11,smoothCheck.onchange(),await S.loadVolumes([{url:"./t1_crop.nii.gz"}]);for(let e=0;e Niivue brain chop - +