Skip to content

Training on dicom images throws key errors: "0008|0005", "0008|1050" or similar #2948

@ChristianEschen

Description

@ChristianEschen

Hi all

I have been facing some problems training on DICOM images. In specific I found that training throws key errors on "0008|0005", "0008|1050", which conditional optional and optional.

The files can be found here:
http://www.rubomedical.com/dicom_files/dicom_viewer_0002.zip
http://www.rubomedical.com/dicom_files/dicom_viewer_0003.zip

My enviroment is:
Ubuntu 20.04 LTS
Monai: 0.7.dev2137 (I have also tested with master)

The code to reproduce the error is here:
(Removing the comment section with pydicom solves the issue since the metadata is updated to have the correct keys.)


import logging
import os
import sys

import numpy as np
import torch
from torch.utils.data import DataLoader
import monai
print('monai vers', monai.__version__)
import pydicom
from monai.transforms import Activations, AddChanneld, AsDiscrete, Compose, LoadImaged, RandRotate90d, Resized, ScaleIntensityd, EnsureTyped, EnsureType, ToTensord, EnsureChannelFirstD, RandSpatialCropd, Spacingd, SqueezeDimd


def main():
    path1 = "/home/sauroman/mia/data/angio/2/0002.DCM"
    path2 = "/home/sauroman/mia/data/angio/3/0003.DCM"

    #########Uncommenting this section will break the training#################
    #ds1 = pydicom.read_file(path1)
    #ds2 = pydicom.read_file(path2)

    #ds1.add_new((0x008, 0x005), 'CS', 'ISO_IR 100')
    #ds2.add_new((0x008, 0x005), 'CS', 'ISO_IR 100')
    #path1 = 'img1.dcm'
    #path2 = 'img2.dcm'
    #ds1.save_as(path1)
    #ds2.save_as(path2)
    ############################################################################
    images = [
        path1,
        path2
    ]

    labels = np.array([0, 1], dtype=np.int64)
    train_files = [{"img": img, "label": label} for img, label in zip(images, labels)]

    # Define transforms for image
    train_transforms = Compose(
        [
            LoadImaged(keys=["img"]),
            EnsureChannelFirstD(keys=["img"]),
            Resized(keys=["img"], spatial_size=(48, 48, 48)),
           # SqueezeDimd(keys=["img"], dim=-1),
            ToTensord(keys=["img"])
        ]
    )
    # Define dataset, data loader
    check_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
    check_loader = DataLoader(check_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())
    check_data = monai.utils.misc.first(check_loader)
    print(check_data["img"].shape, check_data["label"])

    # create a training data loader
    train_ds = monai.data.Dataset(data=train_files, transform=train_transforms)
    train_loader = DataLoader(train_ds, batch_size=2, shuffle=True, num_workers=4, pin_memory=torch.cuda.is_available())

    # Create DenseNet121, CrossEntropyLoss and Adam optimizer
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = monai.networks.nets.DenseNet121(spatial_dims=3, in_channels=1, out_channels=2).to(device)
    loss_function = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), 1e-5)

    # start a typical PyTorch training
    for epoch in range(50):
        print("-" * 10)
        print(f"epoch {epoch + 1}/{50}")
        model.train()
        epoch_loss = 0
        step = 0
        for batch_data in train_loader:
            step += 1
            inputs, labels = batch_data["img"].to(device), batch_data["label"].to(device)
            optimizer.zero_grad()
            outputs = model(inputs)
            loss = loss_function(outputs, labels)
            loss.backward()
            optimizer.step()
            epoch_loss += loss.item()
            epoch_len = len(train_ds) // train_loader.batch_size
            print(f"{step}/{epoch_len}, train_loss: {loss.item():.4f}")
        epoch_loss /= step
        print(f"epoch {epoch + 1} average loss: {epoch_loss:.4f}")


if __name__ == "__main__":
    main()
    
   Running this gives:
    ##################################################################################################
    Exception has occurred: KeyError       (note: full exception trace is shown but execution is paused at: _run_module_as_main)
Caught KeyError in DataLoader worker process 0.
Original Traceback (most recent call last):
  File "/home/sauroman/.pyenv/versions/3.8.3/lib/python3.8/site-packages/torch/utils/data/_utils/worker.py", line 198, in _worker_loop
    data = fetcher.fetch(index)
  File "/home/sauroman/.pyenv/versions/3.8.3/lib/python3.8/site-packages/torch/utils/data/_utils/fetch.py", line 47, in fetch
    return self.collate_fn(data)# Code
  File "/home/sauroman/.pyenv/versions/3.8.3/lib/python3.8/site-packages/torch/utils/data/_utils/collate.py", line 73, in default_collate
    return {key: default_collate([d[key] for d in batch]) for key in elem}
  File "/home/sauroman/.pyenv/versions/3.8.3/lib/python3.8/site-packages/torch/utils/data/_utils/collate.py", line 73, in <dictcomp>
    return {key: default_collate([d[key] for d in batch]) for key in elem}
  File "/home/sauroman/.pyenv/versions/3.8.3/lib/python3.8/site-packages/torch/utils/data/_utils/collate.py", line 73, in default_collate
    return {key: default_collate([d[key] for d in batch]) for key in elem}
  File "/home/sauroman/.pyenv/versions/3.8.3/lib/python3.8/site-packages/torch/utils/data/_utils/collate.py", line 73, in <dictcomp>
    return {key: default_collate([d[key] for d in batch]) for key in elem}
  File "/home/sauroman/.pyenv/versions/3.8.3/lib/python3.8/site-packages/torch/utils/data/_utils/collate.py", line 73, in <listcomp>
    return {key: default_collate([d[key] for d in batch]) for key in elem}
KeyError: '0008|0005'
    ```

Metadata

Metadata

Assignees

Labels

bugSomething isn't working

Type

No type

Projects

No projects

Relationships

None yet

Development

No branches or pull requests

Issue actions