Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,10 @@ virchow2 = [
"huggingface-hub>=0.27.1",
"torch>=2.0.0",
]
attentionui =[
"napari>=0.6.0",
"pyqt5>=5.15.11",
]
cobra = [
"stamp[flash-attention]",
"causal-conv1d @ git+https://github.com/KatherLab/causal-conv1d.git@52ec902314b9eda800162c73502a89f3572fc522",
Expand Down
24 changes: 24 additions & 0 deletions src/stamp/__main__.py
Original file line number Diff line number Diff line change
Expand Up @@ -236,6 +236,27 @@ def _run_cli(args: argparse.Namespace) -> None:
default_slide_mpp=config.heatmaps.default_slide_mpp,
)

case "attentionui":
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As mentioned in the other comment, this new section could be integrated into heatmaps one adding one more optional field

from stamp.heatmaps import attention_ui_

if config.attentionui is None:
raise ValueError("no attention configuration supplied")

_add_file_handle_(_logger, output_dir=config.attentionui.output_dir)
_logger.info(
"using the following configuration:\n"
f"{yaml.dump(config.attentionui.model_dump(mode='json'))}"
)
attention_ui_(
feature_dir=config.attentionui.feature_dir,
wsi_dir=config.attentionui.wsi_dir,
checkpoint_path=config.attentionui.checkpoint_path,
output_dir=config.attentionui.output_dir,
slide_paths=config.attentionui.slide_paths,
device=config.attentionui.device,
default_slide_mpp=config.attentionui.default_slide_mpp,
)

case _:
raise RuntimeError(
"unreachable: the argparser should only allow valid commands"
Expand Down Expand Up @@ -295,6 +316,9 @@ def main() -> None:
)
commands.add_parser("config", help="Print the loaded configuration")
commands.add_parser("heatmaps", help="Generate heatmaps for a trained model")
commands.add_parser(
"attentionui", help="Provides an interactive UI for exploring attention maps"
)

args = parser.parse_args()

Expand Down
3 changes: 2 additions & 1 deletion src/stamp/config.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from pydantic import BaseModel, ConfigDict

from stamp.encoding.config import PatientEncodingConfig, SlideEncodingConfig
from stamp.heatmaps.config import HeatmapConfig
from stamp.heatmaps.config import HeatmapConfig, AttentionUIConfig
from stamp.modeling.config import (
AdvancedConfig,
CrossvalConfig,
Expand All @@ -24,6 +24,7 @@ class StampConfig(BaseModel):
statistics: StatsConfig | None = None

heatmaps: HeatmapConfig | None = None
attentionui: AttentionUIConfig | None = None

slide_encoding: SlideEncodingConfig | None = None

Expand Down
19 changes: 19 additions & 0 deletions src/stamp/config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -215,6 +215,25 @@ heatmaps:
#bottomk: 5


attentionui:
output_dir: "/path/to/save/files/to"

# Directory the extracted features are saved in.
feature_dir: "/path/your/extracted/features/are/stored/in"

wsi_dir: "/path/containing/whole/slide/images/to/extract/features/from"

# Path of the model to generate the attention maps with.
checkpoint_path: "/path/to/model.ckpt"

# Slides to generate the attention maps for.
# The slide paths have to be specified relative to `wsi_dir`.
# If not specified, stamp will allow processing for all slides in `wsi_dir`.
#slide_paths:
#- slide1.svs
#- slide2.mrxs


slide_encoding:
# Encoder to use for slide encoding. Possible options are "cobra",
# "eagle", "titan", "gigapath", "chief", "prism", "madeleine".
Expand Down
52 changes: 52 additions & 0 deletions src/stamp/heatmaps/__init__.py
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Attention UI logic should be in a separate script, as it happens with crossval and train.

Original file line number Diff line number Diff line change
Expand Up @@ -314,3 +314,55 @@ def heatmaps_(

fig.savefig(slide_output_dir / f"overview-{h5_path.stem}.png")
plt.close(fig)


def attention_ui_(
*,
feature_dir: Path,
wsi_dir: Path,
checkpoint_path: Path,
output_dir: Path,
slide_paths: Iterable[Path] | None,
device: DeviceLikeType,
default_slide_mpp: SlideMPP | None,
) -> None:
try:
from stamp.heatmaps.attention_ui import show_attention_ui
except ImportError as e:
raise ImportError(
"Attention UI dependencies not installed. "
"Please reinstall stamp using `pip install 'stamp[attentionui]'`"
) from e

with torch.no_grad():
# Collect slides to generate attention maps for
if slide_paths is not None:
wsis_to_process_all = (wsi_dir / slide for slide in slide_paths)
else:
wsis_to_process_all = (
p for ext in supported_extensions for p in wsi_dir.glob(f"**/*{ext}")
)

# Check of a corresponding feature file exists
wsis_to_process = []
for wsi_path in wsis_to_process_all:
h5_path = feature_dir / wsi_path.with_suffix(".h5").name

if not h5_path.exists():
_logger.info(
f"could not find matching h5 file at {h5_path}. Skipping..."
)
continue

wsis_to_process.append(str(wsi_path))

# Launch the UI
show_attention_ui(
feature_dir,
wsis_to_process,
checkpoint_path,
output_dir,
slide_paths,
device,
default_slide_mpp,
)
Loading