Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DCE-dro #39

Open
wants to merge 12 commits into
base: main
Choose a base branch
from
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,7 @@ docs/source/generated
docs/source/sg_execution_times.rst
docs-mk/site
docs-mk/docs/generated
src/osipi/DRO/data
src/osipi/DRO/__pycache__/
src/osipi/DRO/ROI_saved/
output
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ repos:
rev: v0.4.8 # Use the latest version
hooks:
- id: ruff
args: [--fix] # Optional: to enable lint fixes
args: ["--fix"]
- id: ruff-format
- repo: https://github.com/pycqa/flake8
rev: 7.0.0
Expand All @@ -12,7 +12,7 @@ repos:
args:
- --exclude=venv,.git,__pycache__,.eggs,.mypy_cache,.pytest_cache
- --max-line-length=100
- --ignore=E203
- --ignore=E203, W503
- --per-file-ignores=__init__.py:F401
- repo: https://github.com/pre-commit/pre-commit-hooks
rev: v4.6.0
Expand Down
16 changes: 15 additions & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,7 @@ numpy = "^1.21.2"
scipy = "^1.7.3"
matplotlib = "^3.4.3"
requests = "^2.32.3"
pydicom = "^2.4.4"

[tool.poetry.group.dev.dependencies]
flake8 = "^7.0.0"
Expand Down
18 changes: 18 additions & 0 deletions src/osipi/DRO/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
from ._model import (extended_tofts_model,
tofts_model,
extended_tofts_model_1vox,
forward_extended_tofts)

from _dicom_processing import (read_dicom_slices_as_4d_signal,
signal_enhancement_extract)

from _roi_selection import (ic_from_roi, roi)

from _utils import (animate_mri, save_dicoms)
from _filters_and_noise import median_filter

from _conc_2_dro_signal import (Conc2Sig_Linear,
calcR1_R2,
STDmap,
createR10_withref,
addnoise)
46 changes: 46 additions & 0 deletions src/osipi/DRO/_conc_2_dro_signal.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
"""
@author: eveshalom
"""

import numpy as np


def createR10_withref(S0ref, S0, Tr, fa, T1ref, datashape):
R10_ref = 1 / T1ref
ref_frac = (1 - np.cos(fa) * np.exp(-Tr * R10_ref)) / (1 - np.exp(-Tr * R10_ref))
R10 = (-1 / Tr) * np.log((S0 - (ref_frac * S0ref)) / ((S0 * np.cos(fa)) - (ref_frac * S0ref)))
R10 = np.tile(R10[:, :, :, np.newaxis], (1, 1, 1, datashape[-1]))
return R10


def calcR1_R2(R10, R20st, r1, r2st, Ctiss):
R1 = R10 + r1 * Ctiss
R2st = R20st + r2st * Ctiss
return R1, R2st


def Conc2Sig_Linear(Tr, Te, fa, R1, R2st, S, scale, scalevisit1):
dro_S = ((1 - np.exp(-Tr * R1)) / (1 - (np.cos(fa) * np.exp(-Tr * R1)))) * (
np.sin(fa) * np.exp(-Te * R2st)
)

if scale == 1:
ScaleConst = np.percentile(S, 98) / np.percentile(dro_S, 98)
elif scale == 2:
ScaleConst = scalevisit1

dro_S = dro_S * ScaleConst
return dro_S, ScaleConst


def STDmap(S, t0):
stdev = np.std(S[:, :, :, 0:t0], axis=3)
return stdev


def addnoise(a, std, Sexact, Nt):
from numpy.random import normal as gaussian

std = np.tile(std[:, :, :, np.newaxis], (1, 1, 1, Nt))
Snoise = abs(Sexact + (a * std * gaussian(0, 1, Sexact.shape)))
return Snoise
55 changes: 55 additions & 0 deletions src/osipi/DRO/_dicom_processing.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,55 @@
import os

import numpy as np
import pydicom

from osipi.DRO._filters_and_noise import median_filter


def read_dicom_slices_as_4d_signal(folder_path):
"""
Read a DICOM series from a folder path.
Returns the signal data as a 4D numpy array (x, y, z, t).
"""
slices = {}
for root, _, files in os.walk(folder_path):
for file in files:
if file.endswith(".dcm"):
dicom_file = os.path.join(root, file)
slice = pydicom.read_file(dicom_file)
if slice.SliceLocation not in slices:
slices[slice.SliceLocation] = []
slices[slice.SliceLocation].append((slice.AcquisitionTime, slice))

# Sort each list of slices by the first element (AcquisitionTime)
for slice_location in slices:
slices[slice_location].sort(key=lambda x: x[0])

spatial_shape = slices[slice_location][0][1].pixel_array.shape

data_shape = (spatial_shape[0], spatial_shape[1], len(slices), len(slices[slice_location]))

signal = np.zeros(data_shape)

for z, slice_location in enumerate(sorted(slices.keys())): # Sort by slice location
slices[z] = slices.pop(slice_location)
for t, (_, slice) in enumerate(
sorted(slices[z], key=lambda x: x[0])
): # Sort by acquisition time
signal[:, :, z, t] = slice.pixel_array
slices[z][t] = slice

return signal, slices, slices[0][0]


def signal_enhancement_extract(S, datashape, baselinepoints):
# Take baseline average
S0 = np.average(S[:, :, :, 0:baselinepoints], axis=3) # Take baseline signal
E = np.zeros_like(S)

# Calcualte siganl enhancement
for i in range(0, datashape[-1]):
E[:, :, :, i] = S[:, :, :, i] - S0
E[:, :, :, i] = median_filter(E[:, :, :, i]) # Median filter size (3,3)

return E, S0, S
28 changes: 28 additions & 0 deletions src/osipi/DRO/_filters_and_noise.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
import numpy as np
from scipy import ndimage


def median_filter(param_map):
"""
Apply a median filter to a signal.
"""
for i in range(param_map.shape[-1]):
param_map[:, :, i] = ndimage.median_filter(param_map[:, :, i], size=(3, 3))
return param_map


def add_gaussian_noise(signal, mean=0, std=1):
"""
Add Gaussian noise to the 4 MR data.
"""
return signal + np.random.normal(mean, std, signal.shape)


def add_rician_noise(signal, mean=0, std=1):
"""
Add Rician noise to the 4D MR data.
"""
noise_real = np.random.normal(0, std, signal.shape)
noise_imag = np.random.normal(0, std, signal.shape)
noisy_signal = np.sqrt(signal**2 + noise_real**2 + noise_imag**2)
return noisy_signal
103 changes: 103 additions & 0 deletions src/osipi/DRO/_model.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
import multiprocessing as mp

import numpy as np
from scipy.optimize import curve_fit

import osipi


def fit_single_voxel_extended_tofts(ct, ca, time):
def fit_func_ET(t, kt, ve, vp):
return osipi.extended_tofts(t, ca, kt, ve, vp)

ini = [0, 0, 0]
popt, pcov = curve_fit(fit_func_ET, time, ct, p0=ini)
return popt


def fit_single_voxel_tofts(ct, ca, time):
def fit_func_T(t, kt, ve):
return osipi.tofts(t, ca, kt, ve)

ini = [0, 0]
popt, pcov = curve_fit(fit_func_T, time, ct, p0=ini)
return popt


def process_voxel(j, i, k, c_tiss, ca, t, type="ET"):
ct = c_tiss[j, i, k, :]
if type == "ET":
popt = fit_single_voxel_extended_tofts(ct, ca, t)
elif type == "T":
popt = fit_single_voxel_tofts(ct, ca, t)
return j, i, k, popt


def extended_tofts_model(ca, c_tiss, t):
ktrans = np.zeros(c_tiss.shape[:-1])
ve = np.zeros(c_tiss.shape[:-1])
vp = np.zeros(c_tiss.shape[:-1])

tasks = [
(j, i, k, c_tiss, ca, t)
for k in range(c_tiss.shape[2])
for j in range(c_tiss.shape[0])
for i in range(c_tiss.shape[1])
]

with mp.Pool(processes=mp.cpu_count()) as pool:
results = pool.starmap(process_voxel, tasks)

for j, i, k, popt in results:
ktrans[j, i, k], ve[j, i, k], vp[j, i, k] = popt

return ktrans, ve, vp


def extended_tofts_model_1vox(ca, c_tiss, t):
"""
Extended Tofts model for DCE-MRI DRO
ca -- arterial input function
c_tiss -- 1D array of tissue concentration data (time)
dt -- time interval between samples
"""

ct = c_tiss[:]
popt = fit_single_voxel_extended_tofts(ct, ca, t)

return popt


def forward_extended_tofts(K1, Ve, Vp, Ca, time):
x, y, z = K1.shape
t = Ca.shape[0]
c_tiss = np.zeros((y, x, z, t))

for k in range(0, K1.shape[2]):
for j in range(0, K1.shape[0]):
for i in range(0, K1.shape[1]):
c_tiss[i, j, k, :] = osipi.extended_tofts(
time, Ca, K1[i, j, k], Ve[i, j, k], Vp[i, j, k]
)

return c_tiss


def tofts_model(ca, c_tiss, t):
ktrans = np.zeros(c_tiss.shape[:-1])
ve = np.zeros(c_tiss.shape[:-1])

tasks = [
(j, i, k, c_tiss, ca, t, "T")
for k in range(c_tiss.shape[2])
for j in range(c_tiss.shape[0])
for i in range(c_tiss.shape[1])
]

with mp.Pool(processes=mp.cpu_count()) as pool:
results = pool.starmap(process_voxel, tasks)

for j, i, k, popt in results:
ktrans[j, i, k], ve[j, i, k] = popt

return ktrans, ve
60 changes: 60 additions & 0 deletions src/osipi/DRO/_roi_selection.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
import numpy as np
from matplotlib import path
from matplotlib import pyplot as plt
from matplotlib.widgets import LassoSelector


def roi(signal, slice_num, data_shape, save=False):
"""
Select a region of interest (ROI) in a slice of a 4D signal.
"""

fig = plt.figure()
ax1 = fig.add_subplot(121)
ax1.set_title("Slice:")
ax1.imshow(signal[:, :, slice_num], cmap="gray", interpolation="nearest")

# Empty array to be filled with lasso selector
array = np.zeros_like(signal[:, :, slice_num])
ax2 = fig.add_subplot(122)
ax2.set_title("numpy array:")
mask = ax2.imshow(array, vmax=1, interpolation="nearest")

# Pixel coordinates
pix = np.arange(signal.shape[1])
xv, yv = np.meshgrid(pix, pix)
pix = np.vstack((xv.flatten(), yv.flatten())).T

def updateArray(array, indices):
lin = np.arange(array.size)
newArray = array.flatten()
newArray[lin[indices]] = 1
return newArray.reshape(array.shape)

def onselect(verts):
array = mask._A._data
p = path.Path(verts)
ind = p.contains_points(pix, radius=1)
array = updateArray(array, ind)
mask.set_data(array)
fig.canvas.draw_idle()

lasso = LassoSelector(ax1, onselect, button=1)
plt.show()
mask2d = mask._A._data.copy()
roivox = np.sum(mask2d)
timemask = np.tile(mask2d[:, :, np.newaxis], (1, 1, signal.shape[-1]))
mask4d = np.zeros(data_shape)
mask4d[:, :, slice_num, :] = timemask
if save:
np.save("ROI_saved/aif_mask.npy", mask4d)
np.save("ROI_saved/roi_voxels.npy", roivox)
return mask4d, roivox, lasso


def ic_from_roi(E, mask, roi_vox, numaxis):
E_roi = (
np.sum(mask * E, axis=tuple(range(0, numaxis)))
) / roi_vox # calculates average roi signal enhancement

return E_roi
Loading
Loading