llm-tools/mcps/dicom_mcp/dicom_mcp/tools/discovery.py
Gregory Gauthier 83ec950df7 first commit
2026-04-08 12:11:04 +01:00

292 lines
11 KiB
Python

"""DICOM file discovery tools: list_files and find_dixon_series."""
import asyncio
import json
from pathlib import Path
from typing import Any, Dict, Optional
from mcp.types import ToolAnnotations
from dicom_mcp.server import mcp
from dicom_mcp.config import MAX_FILES
from dicom_mcp.constants import ResponseFormat
from dicom_mcp.helpers.tags import _safe_get_tag, _format_markdown_table
from dicom_mcp.helpers.sequence import (
_identify_sequence_type,
_is_dixon_sequence,
_get_dixon_image_types,
)
from dicom_mcp.helpers.files import _find_dicom_files
@mcp.tool(
name="dicom_list_files",
annotations=ToolAnnotations(
title="List DICOM Files in Directory",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_list_files(
directory: str,
recursive: bool = True,
filter_sequence_type: Optional[str] = None,
count_only: bool = False,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""List all DICOM files in a directory with optional filtering.
Recursively searches a directory for DICOM files and provides
basic metadata about each file including series information and sequence type.
Useful for discovering available test data and organizing QA workflows.
Set count_only=True to return just the series breakdown with file counts
instead of listing every individual file. Much more efficient for large
directories when you only need an inventory overview.
"""
try:
dir_path = Path(directory)
if not dir_path.exists():
return f"Error: Directory not found: {directory}"
if not dir_path.is_dir():
return f"Error: Path is not a directory: {directory}"
dicom_files, truncated = await asyncio.to_thread(
_find_dicom_files, dir_path, recursive
)
if not dicom_files:
return f"No DICOM files found in {directory}"
# Build per-file info and optional series counts
file_info = []
series_counts: Dict[str, Dict[str, Any]] = {} # keyed by series_number
for file_path, ds in dicom_files:
sequence_type = _identify_sequence_type(ds)
if filter_sequence_type:
normalized_filter = filter_sequence_type.lower().strip()
if sequence_type.value != normalized_filter:
continue
series_num = _safe_get_tag(ds, (0x0020, 0x0011))
series_desc = _safe_get_tag(ds, (0x0008, 0x103E))
manufacturer = _safe_get_tag(ds, (0x0008, 0x0070))
modality = _safe_get_tag(ds, (0x0008, 0x0060))
# Always accumulate series counts (cheap)
if series_num not in series_counts:
series_counts[series_num] = {
"series_number": series_num,
"series_description": series_desc,
"sequence_type": sequence_type.value,
"manufacturer": manufacturer,
"modality": modality,
"file_count": 0,
}
series_counts[series_num]["file_count"] += 1
# Only build full file list when not in count_only mode
if not count_only:
info = {
"path": str(file_path.relative_to(dir_path)),
"series_description": series_desc,
"series_number": series_num,
"sequence_type": sequence_type.value,
"manufacturer": manufacturer,
"modality": modality,
}
file_info.append(info)
total_matched = sum(sc["file_count"] for sc in series_counts.values())
# Sort series by number
def _sort_series_key(item: tuple) -> tuple:
sn = item[1]["series_number"]
if sn is None or sn == "N/A":
return (2, "")
try:
return (0, int(sn))
except (ValueError, TypeError):
return (1, str(sn))
sorted_series = sorted(series_counts.items(), key=_sort_series_key)
if count_only:
# --- count_only mode: return series breakdown only ---
series_list = [data for _, data in sorted_series]
if response_format == ResponseFormat.JSON:
result = {
"total_files": total_matched,
"directory": str(dir_path),
"truncated": truncated,
"series_count": len(series_list),
"series": series_list,
}
return json.dumps(result, indent=2)
else:
output = [
f"# DICOM File Counts in {dir_path}\n",
f"Total files: {total_matched} across {len(series_list)} series\n",
]
if truncated:
output.append(
f"**Warning**: Scan truncated at {MAX_FILES} files.\n"
)
headers = ["Series", "Description", "Type", "Manufacturer", "Files"]
rows = [
[
s["series_number"],
s["series_description"],
s["sequence_type"],
s["manufacturer"],
str(s["file_count"]),
]
for s in series_list
]
output.append(_format_markdown_table(headers, rows))
return "\n".join(output)
else:
# --- full listing mode (original behaviour) ---
if response_format == ResponseFormat.JSON:
result = {
"total_files": len(file_info),
"directory": str(dir_path),
"truncated": truncated,
"files": file_info,
}
return json.dumps(result, indent=2)
else:
output = [
f"# DICOM Files in {dir_path}\n",
f"Total files found: {len(file_info)}\n",
]
if truncated:
output.append(
f"**Warning**: Results truncated at {MAX_FILES} files. Narrow your search directory.\n"
)
if file_info:
headers = ["Path", "Series", "Number", "Sequence", "Manufacturer"]
rows = [
[
f["path"],
f["series_description"],
f["series_number"],
f["sequence_type"],
f["manufacturer"],
]
for f in file_info
]
output.append(_format_markdown_table(headers, rows))
return "\n".join(output)
except Exception as e:
return f"Error listing DICOM files: {str(e)}"
@mcp.tool(
name="dicom_find_dixon_series",
annotations=ToolAnnotations(
title="Find Dixon Sequences",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_find_dixon_series(
directory: str,
recursive: bool = True,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Find and analyze Dixon sequences in a directory.
Searches for Dixon (chemical shift) sequences and identifies the different
image types (water, fat, in-phase, out-phase) available in each series.
Critical for body composition QA workflows.
"""
try:
dir_path = Path(directory)
if not dir_path.exists():
return f"Error: Directory not found: {directory}"
dicom_files, truncated = await asyncio.to_thread(
_find_dicom_files, dir_path, recursive
)
if not dicom_files:
return f"No DICOM files found in {directory}"
series_map = {}
for file_path, ds in dicom_files:
if not _is_dixon_sequence(ds):
continue
series_uid = _safe_get_tag(ds, (0x0020, 0x000E))
if series_uid not in series_map:
series_map[series_uid] = {
"series_description": _safe_get_tag(ds, (0x0008, 0x103E)),
"series_number": _safe_get_tag(ds, (0x0020, 0x0011)),
"series_uid": series_uid,
"image_types": set(),
"files": [],
"sample_file": str(file_path),
}
image_types = _get_dixon_image_types(ds)
series_map[series_uid]["image_types"].update(image_types)
series_map[series_uid]["files"].append(str(file_path))
if not series_map:
return f"No Dixon sequences found in {directory}"
series_list = []
for series_data in series_map.values():
series_list.append(
{
"series_description": series_data["series_description"],
"series_number": series_data["series_number"],
"series_uid": series_data["series_uid"],
"image_types": sorted(list(series_data["image_types"])),
"file_count": len(series_data["files"]),
"sample_file": series_data["sample_file"],
}
)
series_list.sort(key=lambda x: x["series_number"])
if response_format == ResponseFormat.JSON:
result = {
"total_dixon_series": len(series_list),
"directory": str(dir_path),
"truncated": truncated,
"series": series_list,
}
return json.dumps(result, indent=2)
else:
output = [
f"# Dixon Sequences in {dir_path}\n",
f"Found {len(series_list)} Dixon series\n",
]
if truncated:
output.append(
f"**Warning**: File scan truncated at {MAX_FILES} files.\n"
)
for series in series_list:
output.append(
f"## Series {series['series_number']}: {series['series_description']}"
)
output.append(f"- **Series UID**: {series['series_uid']}")
output.append(f"- **Image Types**: {', '.join(series['image_types'])}")
output.append(f"- **File Count**: {series['file_count']}")
output.append(f"- **Sample File**: {Path(series['sample_file']).name}")
output.append("")
return "\n".join(output)
except Exception as e:
return f"Error finding Dixon sequences: {str(e)}"