From b8a926663194656e6052ee2bedb92c60a95cda0f Mon Sep 17 00:00:00 2001 From: koustavghosh05 Date: Thu, 20 Nov 2025 16:13:16 +0530 Subject: [PATCH] added smoothening changes --- ...ead_world_example_for_PET-checkpoint.ipynb | 404 ++++++++++++++++++ rt_utils/nifti2rt.py | 22 - setup.py | 8 +- .../rt_utils/NIFTI_conversion.py | 0 {rt_utils => src/rt_utils}/__init__.py | 0 {rt_utils => src/rt_utils}/ds_helper.py | 0 {rt_utils => src/rt_utils}/image_helper.py | 43 +- nifti2rt.py => src/rt_utils/nifti2rt.py | 0 {rt_utils => src/rt_utils}/rtstruct.py | 96 ++--- .../rt_utils}/rtstruct_builder.py | 0 {rt_utils => src/rt_utils}/rtstruct_merger.py | 0 src/rt_utils/smoothing.py | 171 ++++++++ {rt_utils => src/rt_utils}/utils.py | 9 +- 13 files changed, 653 insertions(+), 100 deletions(-) create mode 100644 .ipynb_checkpoints/RTUTILS_read_world_example_for_PET-checkpoint.ipynb delete mode 100644 rt_utils/nifti2rt.py rename NIFTI_conversion.py => src/rt_utils/NIFTI_conversion.py (100%) rename {rt_utils => src/rt_utils}/__init__.py (100%) rename {rt_utils => src/rt_utils}/ds_helper.py (100%) rename {rt_utils => src/rt_utils}/image_helper.py (89%) rename nifti2rt.py => src/rt_utils/nifti2rt.py (100%) rename {rt_utils => src/rt_utils}/rtstruct.py (57%) rename {rt_utils => src/rt_utils}/rtstruct_builder.py (100%) rename {rt_utils => src/rt_utils}/rtstruct_merger.py (100%) create mode 100644 src/rt_utils/smoothing.py rename {rt_utils => src/rt_utils}/utils.py (96%) diff --git a/.ipynb_checkpoints/RTUTILS_read_world_example_for_PET-checkpoint.ipynb b/.ipynb_checkpoints/RTUTILS_read_world_example_for_PET-checkpoint.ipynb new file mode 100644 index 0000000..7007755 --- /dev/null +++ b/.ipynb_checkpoints/RTUTILS_read_world_example_for_PET-checkpoint.ipynb @@ -0,0 +1,404 @@ +{ + "nbformat": 4, + "nbformat_minor": 0, + "metadata": { + "colab": { + "provenance": [] + }, + "kernelspec": { + "name": "python3", + "display_name": "Python 3" + }, + "language_info": { + "name": "python" + } + }, + "cells": [ + { + "cell_type": "markdown", + "source": [ + "**RT-utils example:**" + ], + "metadata": { + "id": "63LVdz4Al2qo" + } + }, + { + "cell_type": "code", + "source": [ + "pip install rt_utils" + ], + "metadata": { + "id": "zakgJlGwkUkz" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "pip install SimpleITK" + ], + "metadata": { + "id": "KqUfelKIkXTQ" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "!pip install pydicom rt-utils SimpleITK pandas numpy" + ], + "metadata": { + "id": "HNtDfFjZkbks" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "markdown", + "source": [ + "Folder Structure" + ], + "metadata": { + "id": "w6pAaRmtluj2" + } + }, + { + "cell_type": "code", + "source": [ + "# ./data/\n", + "# patient001/\n", + "# IMAGES/ (PET in this example)\n", + "# ...DICOM slices...\n", + "# RTSTRUCT/\n", + "# rt_struct_file.dcm\n", + "\n", + "# patient002/\n", + "# IMAGES/\n", + "# ...DICOM slices...\n", + "# RTSTRUCT/\n", + "# rt_struct_file.dcm\n", + "\n", + "# patient003/\n", + "# IMAGES/\n", + "# ...DICOM slices...\n", + "# RTSTRUCT/\n", + "# rt_struct_file.dcm\n" + ], + "metadata": { + "id": "r_rXpLrhlmlz" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "id": "VesEruhCjna2" + }, + "outputs": [], + "source": [ + "import os\n", + "import pydicom\n", + "import numpy as np\n", + "import SimpleITK as sitk\n", + "from rt_utils import RTStructBuilder\n", + "import csv\n", + "import json\n", + "import dateutil\n", + "\n", + "# Define your DICOM root directory here:\n", + "IMAGE_FOLDER_PATH = \"./data\"\n", + "\n", + "ATTRIBUTE_FILE_NAME = \"attributes.csv\" #if you need to apply the code on a folder of different patients\n", + "HEADERS_FILE_NAME = \"headers.json\"\n", + "SAVE_JSON = False\n", + "\n", + "def winapi_path(dos_path, encoding=None):\n", + " # Simplified for non-Windows usage:\n", + " return os.path.abspath(dos_path)\n", + "\n", + "def bqml_to_suv(dcm_file: pydicom.FileDataset) -> float:\n", + " '''\n", + " Calculates the SUV conversion factor from Bq/mL to g/mL using DICOM header info.\n", + " This simplified version returns only the SUV factor.\n", + " '''\n", + " nuclide_dose = dcm_file[0x054, 0x0016][0][0x0018, 0x1074].value # Injected dose (Bq)\n", + " weight = dcm_file[0x0010, 0x1030].value # Patient weight (kg)\n", + " half_life = float(dcm_file[0x054, 0x0016][0][0x0018, 0x1075].value) # Half life (s)\n", + "\n", + " series_time = str(dcm_file[0x0008, 0x0031].value) # Series time (HHMMSS)\n", + " series_date = str(dcm_file[0x0008, 0x0021].value) # Series date (YYYYMMDD)\n", + " series_dt = dateutil.parser.parse(series_date + ' ' + series_time)\n", + "\n", + " nuclide_time = str(dcm_file[0x054, 0x0016][0][0x0018, 0x1072].value) # Injection time\n", + " nuclide_dt = dateutil.parser.parse(series_date + ' ' + nuclide_time)\n", + "\n", + " delta_time = (series_dt - nuclide_dt).total_seconds()\n", + " decay_correction = 2 ** (-1 * delta_time/half_life)\n", + " suv_factor = (weight * 1000) / (decay_correction * nuclide_dose)\n", + " return suv_factor\n", + "\n", + "def getDicomHeaders(file):\n", + " dicomHeaders = file.to_json_dict()\n", + " # remove pixel data from headers\n", + " dicomHeaders.pop('7FE00010', None)\n", + " return dicomHeaders\n", + "\n", + "def get_patient_nifti_dir(dicom_dir):\n", + " # This function finds the patient's directory and creates a NIFTI folder inside it.\n", + " # Assuming structure: .../data/patientX/DICOM/...\n", + " # We want: .../data/patientX/NIFTI/\n", + " patient_dir = dicom_dir\n", + " # Move up until we exit DICOM directories\n", + " # Typically, dicom_dir might look like: /.../data/patientX/DICOM/studyY\n", + " # One dirname: /.../data/patientX/DICOM\n", + " # Another dirname: /.../data/patientX\n", + " patient_dir = os.path.dirname(os.path.dirname(dicom_dir)) # This should now point to patientX directory\n", + " nifti_dir = os.path.join(patient_dir, \"NIFTI\")\n", + " if not os.path.exists(nifti_dir):\n", + " os.makedirs(nifti_dir)\n", + " return nifti_dir\n", + "\n", + "def dicomToNifti(file, seriesDir):\n", + " patientID, modality, studyDate = getattr(file, 'PatientID', None), getattr(file, 'Modality', None), getattr(file, 'StudyDate', None)\n", + " reader = sitk.ImageSeriesReader()\n", + " seriesNames = reader.GetGDCMSeriesFileNames(seriesDir)\n", + " reader.SetFileNames(seriesNames)\n", + " image = reader.Execute()\n", + "\n", + " # Convert PET to SUV if needed\n", + " if modality == 'PT':\n", + " pet = pydicom.dcmread(seriesNames[0]) # read one image\n", + " suv_factor = bqml_to_suv(pet)\n", + " image = sitk.Multiply(image, suv_factor)\n", + "\n", + " nifti_dir = get_patient_nifti_dir(seriesDir)\n", + " output_filename = os.path.join(nifti_dir, f'{patientID}_{modality}_{studyDate}.nii.gz')\n", + " sitk.WriteImage(image, output_filename, imageIO='NiftiImageIO')\n", + "\n", + "def sortParallelLists(list1, list2):\n", + " if len(list1) > 0 and len(list2) > 0:\n", + " tuples = zip(*sorted(zip(list1, list2)))\n", + " list1, list2 = [list(tuple) for tuple in tuples]\n", + " return list1, list2\n", + "\n", + "def buildMaskArray(file, seriesPath, labelPath) -> np.ndarray:\n", + " rtstruct = RTStructBuilder.create_from(dicom_series_path=seriesPath, rt_struct_path=labelPath)\n", + " rois = rtstruct.get_roi_names()\n", + " masks = [rtstruct.get_roi_mask_by_name(roi).astype(int) for roi in rois]\n", + "\n", + " final_mask = sum(masks)\n", + " final_mask = np.where(final_mask>=1, 1, 0)\n", + " # Reorient mask\n", + " final_mask = np.moveaxis(final_mask, [0, 1, 2], [1, 2, 0])\n", + " return final_mask\n", + "\n", + "def buildMasks(file, seriesPath, labelPath):\n", + " final_mask = buildMaskArray(file, seriesPath, labelPath)\n", + " reader = sitk.ImageSeriesReader()\n", + " dicom_names = reader.GetGDCMSeriesFileNames(seriesPath)\n", + " reader.SetFileNames(dicom_names)\n", + " ref_img = reader.Execute()\n", + "\n", + " mask_img = sitk.GetImageFromArray(final_mask)\n", + " mask_img.CopyInformation(ref_img)\n", + "\n", + " nifti_dir = get_patient_nifti_dir(seriesPath)\n", + " patientID, modality, studyDate = getattr(file, 'PatientID', None), getattr(file, 'Modality', None), getattr(file, 'StudyDate', None)\n", + " output_filename = os.path.join(nifti_dir, f'{patientID}_{modality}_{studyDate}_mask.nii.gz')\n", + " sitk.WriteImage(mask_img, output_filename, imageIO=\"NiftiImageIO\")\n", + "\n", + "def convertFiles():\n", + " dicomFilePaths = []\n", + " dicomFileDirs = []\n", + " dicomFileTraits = []\n", + " dicomFileHeaders = []\n", + " dicomFileHeaderKeys = []\n", + " labelInstanceUIDs = []\n", + " seriesInstanceUIDs = []\n", + " labelPaths = []\n", + " seriesPaths = []\n", + "\n", + " # Rename directories with overly long names\n", + " for _ in range(3):\n", + " for root, dirs, files in os.walk(IMAGE_FOLDER_PATH):\n", + " for dir in dirs:\n", + " if len(dir) > 20:\n", + " i = 5\n", + " while os.path.exists(os.path.join(root, dir[:i])):\n", + " i += 1\n", + " newDir = dir[:i]\n", + " os.rename(os.path.join(root, dir), os.path.join(root, newDir))\n", + "\n", + " # Collect DICOM file paths\n", + " for root, dirs, files in os.walk(IMAGE_FOLDER_PATH):\n", + " for file in files:\n", + " if file.endswith('.dcm'):\n", + " filePath = winapi_path(os.path.join(root, file))\n", + " fileDirname = os.path.dirname(filePath)\n", + " if len(dicomFilePaths) > 0 and fileDirname == dicomFileDirs[-1]:\n", + " dicomFilePaths[-1].append(filePath)\n", + " else:\n", + " dicomFilePaths.append([filePath])\n", + " dicomFileDirs.append(fileDirname)\n", + "\n", + " # Analyze DICOM files\n", + " for i in range(len(dicomFilePaths)):\n", + " if i % 10 == 0 or i == len(dicomFilePaths)-1:\n", + " print(f'Processing {round((i + 1) / len(dicomFilePaths) * 100, 2)}% of files')\n", + " file = pydicom.dcmread(dicomFilePaths[i][0], force=True)\n", + " headers = getDicomHeaders(file)\n", + " traits = {\n", + " \"Patient ID\": getattr(file, 'PatientID', None),\n", + " \"Patient's Sex\": getattr(file, 'PatientSex', None),\n", + " \"Patient's Age\": getattr(file, 'PatientAge', None),\n", + " \"Patient's Birth Date\": getattr(file, 'PatientBirthDate', None),\n", + " \"Patient's Weight\": getattr(file, 'PatientWeight', None),\n", + " \"Institution Name\": getattr(file, 'InstitutionName', None),\n", + " \"Referring Physician's Name\": getattr(file, 'ReferringPhysicianName', None),\n", + " \"Operator's Name\": getattr(file, 'OperatorsName', None),\n", + " \"Study Date\": getattr(file, 'StudyDate', None),\n", + " \"Study Time\": getattr(file, 'StudyTime', None),\n", + " \"Modality\": getattr(file, 'Modality', None),\n", + " \"Series Description\": getattr(file, 'SeriesDescription', None),\n", + " \"Dimensions\": np.array(getattr(file, 'pixel_array', np.array([]))).shape,\n", + " }\n", + " for key in headers.keys():\n", + " if key not in dicomFileHeaderKeys:\n", + " dicomFileHeaderKeys.append(key)\n", + " dicomFileTraits.append(traits)\n", + " dicomFileHeaders.append(headers)\n", + "\n", + " fileModality = getattr(file, 'Modality', None)\n", + "\n", + " # If it's an RTSTRUCT, track the referenced SeriesInstanceUID\n", + " if fileModality == 'RTSTRUCT':\n", + " seriesInstanceUID = headers['30060010']['Value'][0]['30060012']['Value'][0]['30060014']['Value'][0]['0020000E']['Value'][0]\n", + " labelInstanceUIDs.append(seriesInstanceUID)\n", + " labelPaths.append(dicomFilePaths[i][0])\n", + "\n", + " # Identify which series correspond to RTSTRUCT\n", + " for i in range(len(dicomFileDirs)):\n", + " if i % 10 == 0 or i == len(dicomFileDirs)-1:\n", + " print(f'Scanning series directories {round((i+1)/len(dicomFileDirs)*100, 2)}%')\n", + " file = pydicom.dcmread(dicomFilePaths[i][0], force=True)\n", + " fileModality = getattr(file, 'Modality', None)\n", + " seriesInstanceUID = getDicomHeaders(file)['0020000E']['Value'][0]\n", + " if fileModality != 'RTSTRUCT':\n", + " if seriesInstanceUID in labelInstanceUIDs:\n", + " seriesPaths.append(dicomFileDirs[i])\n", + " seriesInstanceUIDs.append(seriesInstanceUID)\n", + "\n", + " labelInstanceUIDs, labelPaths = sortParallelLists(labelInstanceUIDs, labelPaths)\n", + " seriesInstanceUIDs, seriesPaths = sortParallelLists(seriesInstanceUIDs, seriesPaths)\n", + "\n", + " # Save attributes\n", + " if len(dicomFilePaths) > 0:\n", + " data_dir = os.path.join(IMAGE_FOLDER_PATH, \"data\")\n", + " if not os.path.exists(data_dir):\n", + " os.makedirs(data_dir)\n", + " with open(os.path.join(IMAGE_FOLDER_PATH, ATTRIBUTE_FILE_NAME), 'w', encoding='UTF8', newline='') as f:\n", + " writer = csv.DictWriter(f, fieldnames=dicomFileTraits[0].keys())\n", + " writer.writeheader()\n", + " writer.writerows(dicomFileTraits)\n", + "\n", + " if SAVE_JSON:\n", + " with open(os.path.join(IMAGE_FOLDER_PATH, HEADERS_FILE_NAME), 'w') as f:\n", + " json.dump(dicomFileHeaders, f)\n", + "\n", + " # Convert PET series to NIFTI\n", + " for i in range(len(dicomFileDirs)):\n", + " if i % 10 == 0 or i == len(dicomFileDirs)-1:\n", + " print(f'Converting PET series to NIFTI {round((i+1)/len(dicomFileDirs)*100, 2)}%')\n", + " if len(dicomFilePaths[i]) > 1:\n", + " file = pydicom.dcmread(dicomFilePaths[i][0], force=True)\n", + " fileModality = getattr(file, 'Modality', None)\n", + " if fileModality == 'PT':\n", + " dicomToNifti(file, dicomFileDirs[i])\n", + "\n", + " # Convert RTSTRUCT to NIFTI masks\n", + " for i in range(min([len(labelPaths), len(seriesPaths)])):\n", + " if i % 10 == 0 or i == len(dicomFileDirs)-1:\n", + " print(f'Converting RTSTRUCT to NIFTI masks {round((i+1)/min([len(labelPaths), len(seriesPaths)])*100, 2)}%')\n", + " file_label = pydicom.dcmread(labelPaths[i], force=True)\n", + " if len(labelInstanceUIDs) != len(seriesInstanceUIDs):\n", + " # Need to match label's UID to a series UID\n", + " j = 0\n", + " if len(labelInstanceUIDs) < len(seriesInstanceUIDs):\n", + " while (i + j) < len(seriesInstanceUIDs) and labelInstanceUIDs[i] != seriesInstanceUIDs[i+j]:\n", + " j += 1\n", + " try:\n", + " buildMasks(file_label, seriesPaths[i+j], labelPaths[i])\n", + " except:\n", + " print('Failed to build mask for label: ', labelPaths[i])\n", + " else:\n", + " while (i + j) < len(labelInstanceUIDs) and seriesInstanceUIDs[i] != labelInstanceUIDs[i+j]:\n", + " j += 1\n", + " try:\n", + " buildMasks(pydicom.dcmread(labelPaths[i+j], force=True), seriesPaths[i], labelPaths[i+j])\n", + " except:\n", + " print('Failed to build mask for label: ', labelPaths[i+j])\n", + " else:\n", + " try:\n", + " buildMasks(file_label, seriesPaths[i], labelPaths[i])\n", + " except:\n", + " print('Failed to build mask for label: ', labelPaths[i])\n", + "\n", + " print('Done! Created NIFTI files in the NIFTI folder inside each patient directory.')\n", + "\n", + "if __name__ == '__main__':\n", + " convertFiles()" + ] + }, + { + "cell_type": "markdown", + "source": [ + "**dcmrtstruct2nii example:**" + ], + "metadata": { + "id": "v2J8RH9Ol_5G" + } + }, + { + "cell_type": "code", + "source": [ + "pip install dcmrtstruct2nii" + ], + "metadata": { + "id": "p-jA8cV-mFMX" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "pip install \"pydicom<2.0\"" + ], + "metadata": { + "id": "REW8TLCYmHFL" + }, + "execution_count": null, + "outputs": [] + }, + { + "cell_type": "code", + "source": [ + "from dcmrtstruct2nii import dcmrtstruct2nii, list_rt_structs\n", + "\n", + "#print(list_rt_structs('/path/to/dicom/rtstruct/file.dcm'))\n", + "\n", + "dcmrtstruct2nii('./RT/rt_struct_file.dcm', './PET', './result')" + ], + "metadata": { + "id": "6AFs61wXmLf-" + }, + "execution_count": null, + "outputs": [] + } + ] +} \ No newline at end of file diff --git a/rt_utils/nifti2rt.py b/rt_utils/nifti2rt.py deleted file mode 100644 index 17396ec..0000000 --- a/rt_utils/nifti2rt.py +++ /dev/null @@ -1,22 +0,0 @@ -import SimpleITK as sitk -from SimpleITK import GetArrayFromImage, sitkNearestNeighbor, Image -import numpy as np -from torch import nn -from rt_utils import RTStructBuilder - -def debug_output(seg: sitk.Image, seg_path: str, uid:str, dicom_path: str): - mask_from_sitkImage_zyx = np.transpose(sitk.GetArrayFromImage(seg), (2, 1, 0)) - mask_from_sitkImage_xzy = np.transpose(mask_from_sitkImage_zyx, axes=(2, 0, 1)) - mask_from_sitkImage_xyz = np.transpose(mask_from_sitkImage_xzy, (2, 1, 0)) - mask_from_sitkImage_int64 = mask_from_sitkImage_xyz - mask_from_sitkImage_bool = mask_from_sitkImage_int64.astype(bool) - # Create new RT Struct. Requires the DICOM series path for the RT Struct. - rtstruct = RTStructBuilder.create_new(dicom_series_path = dicom_path) - # Add the 3D mask as an ROI setting the color, description, and name - rtstruct.add_roi( - mask=mask_from_sitkImage_bool, - color=[255, 0, 255], - name="your ROI!" - ) - - rtstruct.save(os.path.join(OUTPUT_DIR, uid+'_tmtv-rt-struct')) diff --git a/setup.py b/setup.py index 5d525ed..e8a8bc1 100644 --- a/setup.py +++ b/setup.py @@ -15,8 +15,12 @@ long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/qurit/rtutils", - package_dir={'':"rt_utils"}, - packages=setuptools.find_packages("rt_utils", exclude="tests"), + + # package_dir={'':"rt_utils"}, + # packages=setuptools.find_packages("rt_utils", exclude="tests"), + package_dir={"": "src"}, + packages=setuptools.find_packages(where="src"), + keywords=["RTStruct", "Dicom", "Pydicom"], classifiers=[ "Operating System :: OS Independent", diff --git a/NIFTI_conversion.py b/src/rt_utils/NIFTI_conversion.py similarity index 100% rename from NIFTI_conversion.py rename to src/rt_utils/NIFTI_conversion.py diff --git a/rt_utils/__init__.py b/src/rt_utils/__init__.py similarity index 100% rename from rt_utils/__init__.py rename to src/rt_utils/__init__.py diff --git a/rt_utils/ds_helper.py b/src/rt_utils/ds_helper.py similarity index 100% rename from rt_utils/ds_helper.py rename to src/rt_utils/ds_helper.py diff --git a/rt_utils/image_helper.py b/src/rt_utils/image_helper.py similarity index 89% rename from rt_utils/image_helper.py rename to src/rt_utils/image_helper.py index 5770b42..fd636dc 100644 --- a/rt_utils/image_helper.py +++ b/src/rt_utils/image_helper.py @@ -1,7 +1,8 @@ import os -from typing import List +from typing import List, Union from enum import IntEnum +import SimpleITK import cv2 as cv import numpy as np from pydicom import dcmread @@ -60,7 +61,11 @@ def get_contours_coords(roi_data: ROIData, series_data): mask_slice = create_pin_hole_mask(mask_slice, roi_data.approximate_contours) # Get contours from mask - contours, _ = find_mask_contours(mask_slice, roi_data.approximate_contours) + contours, _ = find_mask_contours(mask_slice, + roi_data.approximate_contours, + scaling_factor=roi_data.scaling_factor) + if not contours: + continue validate_contours(contours) # Format for DICOM @@ -82,7 +87,8 @@ def get_contours_coords(roi_data: ROIData, series_data): return series_contours -def find_mask_contours(mask: np.ndarray, approximate_contours: bool): + +def find_mask_contours(mask: np.ndarray, approximate_contours: bool, scaling_factor: int): approximation_method = ( cv.CHAIN_APPROX_SIMPLE if approximate_contours else cv.CHAIN_APPROX_NONE ) @@ -93,8 +99,12 @@ def find_mask_contours(mask: np.ndarray, approximate_contours: bool): contours = list( contours ) # Open-CV updated contours to be a tuple so we convert it back into a list here + + # Coordinates are rescaled to image grid by dividing with scaling factor for i, contour in enumerate(contours): - contours[i] = [[pos[0][0], pos[0][1]] for pos in contour] + contours[i] = [[(contour[i][0][0] / scaling_factor), (contour[i][0][1] / scaling_factor)] for i in + range(0, len(contour))] + hierarchy = hierarchy[0] # Format extra array out of data return contours, hierarchy @@ -126,7 +136,7 @@ def create_pin_hole_mask(mask: np.ndarray, approximate_contours: bool): def draw_line_upwards_from_point( - mask: np.ndarray, start, fill_value: int + mask: np.ndarray, start, fill_value: int ) -> np.ndarray: line_width = 2 end = (start[0], start[1] - 1) @@ -161,10 +171,8 @@ def get_pixel_to_patient_transformation_matrix(series_data): row_direction, column_direction, slice_direction = get_slice_directions(first_slice) mat = np.identity(4, dtype=np.float32) - #The following might appear counter-intuitive, i.e. multiplying the row direction with the column spacing and vice-versa - #But is the correct way to create the transformation matrix, see https://nipy.org/nibabel/dicom/dicom_orientation.html - mat[:3, 0] = row_direction * column_spacing - mat[:3, 1] = column_direction * row_spacing + mat[:3, 0] = row_direction * row_spacing + mat[:3, 1] = column_direction * column_spacing mat[:3, 2] = slice_direction * slice_spacing mat[:3, 3] = offset @@ -185,11 +193,9 @@ def get_patient_to_pixel_transformation_matrix(series_data): # inv(M) = [ inv(rotation&scaling) -inv(rotation&scaling) * translation ] # [ 0 1 ] - #The following might appear counter-intuitive, i.e. dividing the row direction with the column spacing and vice-versa - #But is the correct way to create the inverse transformation matrix, see https://nipy.org/nibabel/dicom/dicom_orientation.html linear = np.identity(3, dtype=np.float32) - linear[0, :3] = row_direction / column_spacing - linear[1, :3] = column_direction / row_spacing + linear[0, :3] = row_direction / row_spacing + linear[1, :3] = column_direction / column_spacing linear[2, :3] = slice_direction / slice_spacing mat = np.identity(4, dtype=np.float32) @@ -200,7 +206,7 @@ def get_patient_to_pixel_transformation_matrix(series_data): def apply_transformation_to_3d_points( - points: np.ndarray, transformation_matrix: np.ndarray + points: np.ndarray, transformation_matrix: np.ndarray ): """ * Augment each point with a '1' as the fourth coordinate to allow translation @@ -223,7 +229,7 @@ def get_slice_directions(series_slice: Dataset): slice_direction = np.cross(row_direction, column_direction) if not np.allclose( - np.dot(row_direction, column_direction), 0.0, atol=1e-3 + np.dot(row_direction, column_direction), 0.0, atol=1e-3 ) or not np.allclose(np.linalg.norm(slice_direction), 1.0, atol=1e-3): raise Exception("Invalid Image Orientation (Patient) attribute") @@ -267,7 +273,7 @@ def get_slice_contour_data(series_slice: Dataset, contour_sequence: Sequence): def get_slice_mask_from_slice_contour_data( - series_slice: Dataset, slice_contour_data, transformation_matrix: np.ndarray + series_slice: Dataset, slice_contour_data, transformation_matrix: np.ndarray ): # Go through all contours in a slice, create polygons in correct space and with a correct format # and append to polygons array (appropriate for fillPoly) @@ -279,9 +285,10 @@ def get_slice_mask_from_slice_contour_data( polygon = np.array(polygon).squeeze() polygons.append(polygon) slice_mask = create_empty_slice_mask(series_slice).astype(np.uint8) - cv.fillPoly(img=slice_mask, pts = polygons, color = 1) + cv.fillPoly(img=slice_mask, pts=polygons, color=1) return slice_mask + def create_empty_series_mask(series_data): ref_dicom_image = series_data[0] mask_dims = ( @@ -307,4 +314,4 @@ class Hierarchy(IntEnum): next_node = 0 previous_node = 1 first_child = 2 - parent_node = 3 + parent_node = 3 \ No newline at end of file diff --git a/nifti2rt.py b/src/rt_utils/nifti2rt.py similarity index 100% rename from nifti2rt.py rename to src/rt_utils/nifti2rt.py diff --git a/rt_utils/rtstruct.py b/src/rt_utils/rtstruct.py similarity index 57% rename from rt_utils/rtstruct.py rename to src/rt_utils/rtstruct.py index c52d816..2bc0dc7 100644 --- a/rt_utils/rtstruct.py +++ b/src/rt_utils/rtstruct.py @@ -1,9 +1,11 @@ -from typing import List, Union +from typing import List, Union, Dict + import numpy as np from pydicom.dataset import FileDataset -from rt_utils.utils import ROIData -from . import ds_helper, image_helper +from rt_utils.utils import ROIData +from . import ds_helper, image_helper, smoothing +from typing import Tuple class RTStruct: """ @@ -15,12 +17,13 @@ def __init__(self, series_data, ds: FileDataset, ROIGenerationAlgorithm=0): self.ds = ds self.frame_of_reference_uid = ds.ReferencedFrameOfReferenceSequence[ -1 - ].FrameOfReferenceUID # Use last structured set ROI + ].FrameOfReferenceUID # Use last strucitured set ROI def set_series_description(self, description: str): """ Set the series description for the RTStruct dataset """ + self.ds.SeriesDescription = description def add_roi( @@ -32,51 +35,30 @@ def add_roi( use_pin_hole: bool = False, approximate_contours: bool = True, roi_generation_algorithm: Union[str, int] = 0, + apply_smoothing: Union[str, None] = None, # strings can be "2d" or "3d" or something else if a different smoothing function is used + smoothing_function = smoothing.pipeline, # Can be any function/set of functions that takes the following parameters + # # smoothing_function(mask=mask, apply_smoothing=apply_smoothing, + # smoothing_parameters=smoothing_parameters) -> np.ndarray + # The returned np.ndarray can be of any integer scalar shape in x and y of the used dicom image. + # Note that Z direction should not be scaled. For instance CT_image.shape == (512, 512, 150). + # Smoothed returned array can be (1024, 1024, 150) or (5120, 5120, 150), though you RAM will suffer with the latter. + smoothing_parameters: Union[Dict, None] = None, ): """ - Add a Region of Interest (ROI) to the RTStruct given a 3D binary mask for each slice. - - Optionally input a color or name for the ROI. - If `use_pin_hole` is set to True, attempts to handle ROIs with holes by creating a single continuous contour. - If `approximate_contours` is set to False, no approximation is done during contour generation, - potentially resulting in a large amount of contour data. - - This method updates the internal DICOM structure (RTStruct) by adding: - - ROIContourSequence - - StructureSetROISequence - - RTROIObservationsSequence - - Parameters - ---------- - mask : np.ndarray - 3D boolean array indicating the ROI. Its shape must match - the underlying DICOM series in the third dimension. - color : str or list of int, optional - Color representation for the ROI (e.g., "red" or [255, 0, 0]). Defaults to None. - name : str, optional - Name/label for the ROI. Defaults to None. - description : str, optional - Longer description of the ROI. Defaults to an empty string. - use_pin_hole : bool, optional - If True, attempts to create a single continuous contour for ROIs with holes. Defaults to False. - approximate_contours : bool, optional - If False, skips approximation during contour generation, leading to larger contour data. Defaults to True. - roi_generation_algorithm : str or int, optional - Identifier for the algorithm used to generate the ROI. Defaults to 0. - - Raises - ------ - ROIException - - If the mask is not a 3D boolean array. - - If the mask's shape does not match the loaded DICOM series dimensions. - - If the mask is empty (no voxels set to True). - - Returns - ------- - None - Modifies the internal RTStruct. + Add a ROI to the rtstruct given a 3D binary mask for the ROI's at each slice + Optionally input a color or name for the ROI + If use_pin_hole is set to true, will cut a pinhole through ROI's with holes in them so that they are represented with one contour + If approximate_contours is set to False, no approximation will be done when generating contour data, leading to much larger amount of contour data """ - # TODO: test if name already exists + if apply_smoothing: + mask = smoothing_function(mask=mask, apply_smoothing=apply_smoothing, + smoothing_parameters=smoothing_parameters) + + ## If upscaled coords are given, they should be adjusted accordingly + rows = self.series_data[0][0x00280010].value + scaling_factor = int(mask.shape[0] / rows) + + # TODO test if name already exists self.validate_mask(mask) roi_number = len(self.ds.StructureSetROISequence) + 1 roi_data = ROIData( @@ -89,6 +71,7 @@ def add_roi( use_pin_hole, approximate_contours, roi_generation_algorithm, + scaling_factor ) self.ds.ROIContourSequence.append( @@ -104,7 +87,7 @@ def add_roi( def validate_mask(self, mask: np.ndarray) -> bool: if mask.dtype != bool: raise RTStruct.ROIException( - f"Mask data type must be boolean, but got {mask.dtype}. Please ensure the mask is a 3D boolean array." + f"Mask data type must be boolean. Got {mask.dtype}" ) if mask.ndim != 3: @@ -112,7 +95,7 @@ def validate_mask(self, mask: np.ndarray) -> bool: if len(self.series_data) != np.shape(mask)[2]: raise RTStruct.ROIException( - "Mask must have the same number of layers (in the 3rd dimension) as the input series. " + "Mask must have the save number of layers (In the 3rd dimension) as input series. " + f"Expected {len(self.series_data)}, got {np.shape(mask)[2]}" ) @@ -151,17 +134,19 @@ def get_roi_mask_by_name(self, name) -> np.ndarray: def save(self, file_path: str): """ - Saves the RTStruct with the specified name / location. - Automatically adds '.dcm' as a suffix if necessary. + Saves the RTStruct with the specified name / location + Automatically adds '.dcm' as a suffix """ + # Add .dcm if needed file_path = file_path if file_path.endswith(".dcm") else file_path + ".dcm" try: - # Using 'with' to handle file opening and closing automatically - with open(file_path, "w") as file: - print("Writing file to", file_path) - self.ds.save_as(file_path) + file = open(file_path, "w") + # Opening worked, we should have a valid file_path + print("Writing file to", file_path) + self.ds.save_as(file_path) + file.close() except OSError: raise Exception(f"Cannot write to file path '{file_path}'") @@ -169,4 +154,5 @@ class ROIException(Exception): """ Exception class for invalid ROI masks """ - pass + + pass \ No newline at end of file diff --git a/rt_utils/rtstruct_builder.py b/src/rt_utils/rtstruct_builder.py similarity index 100% rename from rt_utils/rtstruct_builder.py rename to src/rt_utils/rtstruct_builder.py diff --git a/rt_utils/rtstruct_merger.py b/src/rt_utils/rtstruct_merger.py similarity index 100% rename from rt_utils/rtstruct_merger.py rename to src/rt_utils/rtstruct_merger.py diff --git a/src/rt_utils/smoothing.py b/src/rt_utils/smoothing.py new file mode 100644 index 0000000..35ddf48 --- /dev/null +++ b/src/rt_utils/smoothing.py @@ -0,0 +1,171 @@ +import SimpleITK as sitk +import numpy as np +from scipy import ndimage, signal +from typing import List, Union, Tuple, Dict +import logging + +# A set of parameters that is know to work well +default_smoothing_parameters_2d = { + "scaling_iterations": 2, + "filter_iterations": 3, + "crop_margins": [20, 20, 1], + "np_kron": {"scaling_factor": 3}, + "ndimage_gaussian_filter": {"sigma": 2, + "radius": 3}, + "threshold": {"threshold": 0.5}, +} + +def kron_upscale(mask: np.ndarray, params): + """ + This function upscales masks like so + + 1|2 1|1|2|2 + 3|4 --> 1|1|2|2 + 3|3|4|4 + 3|3|4|4 + + Scaling only in x and y direction + """ + + scaling_array = (params["scaling_factor"], params["scaling_factor"], 1) + + return np.kron(mask, np.ones(scaling_array)) + +def gaussian_blur(mask: np.ndarray, params): + return ndimage.gaussian_filter(mask, **params) + +def binary_threshold(mask: np.ndarray, params): + return mask > params["threshold"] + +def get_new_margin(column, margin, column_length): + """ + This functions takes a column (of x, y, or z) coordinates and adds a margin. + If margin exceeds mask size, the margin is returned to most extreme possible values + """ + new_min = column.min() - margin + if new_min < 0: + new_min = 0 + + new_max = column.max() + margin + if new_max > column_length: + new_max = column_length + + return new_min, new_max + +def crop_mask(mask: np.ndarray, crop_margins: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """ + This function crops masks to non-zero pixels padded by crop_margins. + Returns (cropped mask, bounding box) + """ + x, y, z = np.nonzero(mask) + + x_min, x_max = get_new_margin(x, crop_margins[0], mask.shape[0]) + y_min, y_max = get_new_margin(y, crop_margins[1], mask.shape[1]) + z_min, z_max = get_new_margin(z, crop_margins[2], mask.shape[2]) + + bbox = np.array([x_min, x_max, y_min, y_max, z_min, z_max]) + + return mask[bbox[0]: bbox[1], + bbox[2]: bbox[3], + bbox[4]: bbox[5]], bbox + +def restore_mask_dimensions(cropped_mask: np.ndarray, new_shape, bbox): + """ + This funtion restores mask dimentions to the given shape. + """ + new_mask = np.zeros(new_shape) + + new_mask[bbox[0]: bbox[1], bbox[2]: bbox[3], bbox[4]: bbox[5]] = cropped_mask + return new_mask.astype(bool) + +def iteration_2d(mask: np.ndarray, np_kron, ndimage_gaussian_filter, threshold, filter_iterations): + """ + This is the actual set of filters. Applied iterative over z direction + """ + cropped_mask = kron_upscale(mask=mask, params=np_kron) + + for filter_iteration in range(filter_iterations): + for z_idx in range(cropped_mask.shape[2]): + slice = cropped_mask[:, :, z_idx] + slice = gaussian_blur(mask=slice, params=ndimage_gaussian_filter) + slice = binary_threshold(mask=slice, params=threshold) + + cropped_mask[:, :, z_idx] = slice + + return cropped_mask + +def iteration_3d(mask: np.ndarray, np_kron, ndimage_gaussian_filter, threshold, filter_iterations): + """ + This is the actual filters applied iteratively in 3d. + """ + for filter_iteration in range(filter_iterations): + cropped_mask = kron_upscale(mask=mask, params=np_kron) + cropped_mask = gaussian_blur(mask=cropped_mask, params=ndimage_gaussian_filter) + cropped_mask = binary_threshold(mask=cropped_mask, params=threshold) + + return cropped_mask + +def pipeline(mask: np.ndarray, + apply_smoothing: str, + smoothing_parameters: Union[Dict, None]): + """ + This is the entrypoint for smoothing a mask. + """ + if not smoothing_parameters: + smoothing_parameters = default_smoothing_parameters_2d + + scaling_iterations = smoothing_parameters["scaling_iterations"] + filter_iterations = smoothing_parameters["filter_iterations"] + + crop_margins = np.array(smoothing_parameters["crop_margins"]) + np_kron = smoothing_parameters["np_kron"] + ndimage_gaussian_filter = smoothing_parameters["ndimage_gaussian_filter"] + threshold = smoothing_parameters["threshold"] + + logging.info(f"Original mask shape {mask.shape}") + logging.info(f"Cropping mask to non-zero") + cropped_mask, bbox = crop_mask(mask, crop_margins=crop_margins) + final_shape, final_bbox = get_final_mask_shape_and_bbox(mask=mask, + scaling_factor=np_kron["scaling_factor"], + scaling_iterations=scaling_iterations, + bbox=bbox) + logging.info(f"Final scaling with factor of {np_kron['scaling_factor']} for {scaling_iterations} scaling_iterations") + for i in range(scaling_iterations): + logging.info(f"Iteration {i+1} out of {scaling_iterations}") + logging.info(f"Applying filters") + if apply_smoothing == "2d": + cropped_mask = iteration_2d(cropped_mask, + np_kron=np_kron, + ndimage_gaussian_filter=ndimage_gaussian_filter, + threshold=threshold, + filter_iterations=filter_iterations) + elif apply_smoothing == "3d": + cropped_mask = iteration_3d(cropped_mask, + np_kron=np_kron, + ndimage_gaussian_filter=ndimage_gaussian_filter, + threshold=threshold, + filter_iterations=filter_iterations) + else: + raise Exception("Wrong dimension parameter. Use '2d' or '3d'.") + + # Restore dimensions + logging.info("Restoring original mask shape") + mask = restore_mask_dimensions(cropped_mask, final_shape, final_bbox) + return mask + +def get_final_mask_shape_and_bbox(mask, bbox, scaling_factor, scaling_iterations): + """ + This function scales image shape and the bounding box which should be used for the final mask + """ + + final_scaling_factor = pow(scaling_factor, scaling_iterations) + + final_shape = np.array(mask.shape) + final_shape[:2] *= final_scaling_factor + + bbox[:4] *= final_scaling_factor # Scale bounding box to final shape + bbox[:4] -= round(final_scaling_factor * 0.5) # Shift volumes to account for the shift that occurs as a result of the scaling + logging.info("Final shape: ", final_shape) + logging.info("Final bbox: ", bbox) + return final_shape, bbox + diff --git a/rt_utils/utils.py b/src/rt_utils/utils.py similarity index 96% rename from rt_utils/utils.py rename to src/rt_utils/utils.py index f04089e..30a982b 100644 --- a/rt_utils/utils.py +++ b/src/rt_utils/utils.py @@ -1,4 +1,4 @@ -from typing import List, Union +from typing import List, Union, Tuple from random import randrange from pydicom.uid import PYDICOM_IMPLEMENTATION_UID from dataclasses import dataclass @@ -41,7 +41,6 @@ class SOPClassUID: @dataclass class ROIData: """Data class to easily pass ROI data to helper methods.""" - mask: str color: Union[str, List[int]] number: int @@ -51,6 +50,10 @@ class ROIData: use_pin_hole: bool = False approximate_contours: bool = True roi_generation_algorithm: Union[str, int] = 0 + scaling_factor: int = 1 + smooth_radius: Union[int, None] = None + smooth_scale: Union[int, None] = None + def __post_init__(self): self.validate_color() @@ -124,4 +127,4 @@ def validate_roi_generation_algoirthm(self): "or a str (not recomended) for self.roi_generation_algorithm. Got {}.".format( type(self.roi_generation_algorithm) ) - ) + ) \ No newline at end of file