first commit

This commit is contained in:
Gregory Gauthier 2026-04-08 12:11:04 +01:00
commit 83ec950df7
1449 changed files with 412954 additions and 0 deletions

4
.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
**/.DS_Store
**/.idea/
**/deploy/
*.log

28
.mcp.json Normal file
View File

@ -0,0 +1,28 @@
{
"mcpServers": {
"dicom-mcp": {
"command": "zsh",
"args": ["mcps/dicom_mcp/run_dicom_mcp_server.sh"]
},
"filesystem": {
"command": "zsh",
"args": ["mcps/filesystem_mcp/launch_filesystem_mcp.sh"],
"env": {
"MCP_LOG_LEVEL": "debug",
"MCP_TRANSPORT_TYPE": "stdio"
}
},
"open-meteo-mcp": {
"command": "zsh",
"args": ["mcps/open_meteo_mcp/run_open_meteo_mcp_server.sh"]
},
"playwright-mcp": {
"command": "zsh",
"args": ["mcps/playwright_mcp/launch_playwright_mcp.sh"]
},
"selenium-mcp": {
"command": "zsh",
"args": ["mcps/selenium_mcp/launch_selenium_mcp_server.sh"]
}
}
}

70
README.md Normal file
View File

@ -0,0 +1,70 @@
# Claude Tools
## Overview
A collection of Claude skills, and "Model Context Protocol" servers, that are useful to the functional quality team, or anyone else interested in using some of Claude's more exotic capabilities.
## Requirements
* Python 3.10+
* Node.js (for filesystem and playwright MCPs)
* Poetry (for Python-based MCPs)
* Claude Desktop and/or Claude Code
## Structure
```
claude-tools/
|-- .claude/ Claude Code project settings
|-- .mcp.json MCP server registration for Claude Code
|-- config/ Config files for Claude Desktop (~/.config/Claude/)
|-- mcps/ MCP servers
| |-- dicom_mcp/ DICOM medical imaging inspection (17 read-only tools)
| |-- filesystem_mcp/ File system operations via @cyanheads/filesystem-mcp-server
| |-- open_meteo_mcp/ Global weather data via the Open-Meteo API
| |-- playwright_mcp/ Browser automation via Playwright
| |-- selenium_mcp/ Browser automation via Selenium WebDriver
|-- skills/ Skill packages
|-- artifacts-builder/ Build multi-component HTML artifacts with React, Tailwind, shadcn/ui
|-- document-skills/ Document creation and manipulation
| |-- docx/ Word document creation, editing, and analysis
| |-- pdf/ PDF extraction, creation, merging, splitting, and forms
| |-- pptx/ PowerPoint creation, editing, and analysis
| |-- xlsx/ Spreadsheet creation, analysis, and formula support
|-- mcp-builder/ Guide for building MCP servers (Python FastMCP / Node SDK)
|-- skill-creator/ Guide for creating new Claude Code skills
|-- webapp-testing/ Test local web applications with Playwright scripts
```
## Setup
### Claude Code
The `.mcp.json` at the project root registers all MCP servers. Opening this project in Claude Code will make them available. The `.claude/settings.local.json` enables all servers and registers skill paths.
### Claude Desktop
Copy (or symlink) `config/claude_desktop_config.json` to your Claude Desktop config directory:
```bash
cp config/claude_desktop_config.json ~/Library/Application\ Support/Claude/claude_desktop_config.json
```
## Claude Docs
### Skills
- **Skills Overview**: [Agent Skills - Claude Docs](https://platform.claude.com/docs/en/agents-and-tools/agent-skills/overview)
- **Creating Skills in Claude Code**: [Agent Skills - Claude Code Docs](https://code.claude.com/docs/en/skills)
- **Using Skills with the API**: [Using Agent Skills with the API - Claude Docs](https://platform.claude.com/docs/en/build-with-claude/skills-guide)
- **Skills API Quickstart**: [Agent Skills - Claude Docs](https://platform.claude.com/docs/en/agents-and-tools/agent-skills/overview)
- **Skills on Console**: [Agent Skills - Claude Docs](https://console.anthropic.com/docs/en/agents-and-tools/agent-skills/overview)
- **GitHub - anthropics/skills**: [GitHub - anthropics/skills: Public repository for Agent Skills](https://github.com/anthropics/skills)
### MCPs
- **MCP Documentation (main)**: [MCP Docs Model Context Protocol MCP](https://modelcontextprotocol.info/docs/)
- **MCP Specification**: [GitHub - modelcontextprotocol/modelcontextprotocol: Specification and documentation for the Model Context Protocol](https://github.com/modelcontextprotocol/modelcontextprotocol)
- **MCP Specification Repository**: [GitHub - modelcontextprotocol/modelcontextprotocol: Specification and documentation for the Model Context Protocol](https://github.com/modelcontextprotocol/modelcontextprotocol)
- **MCP Documentation Repository**: [GitHub - modelcontextprotocol/docs: Documentation for the Model Context Protocol (MCP)](https://github.com/modelcontextprotocol/docs)
- **MCP in Claude Docs**: https://anthropic.mintlify.app/en/docs/mcp

View File

@ -0,0 +1,27 @@
{
"mcpServers": {
"dicom-mcp": {
"command": "/Users/gregory.gauthier/Projects/pd/claude-tools/mcps/dicom_mcp/run_dicom_mcp_server.sh"
},
"filesystem": {
"command": "zsh",
"args": ["/Users/gregory.gauthier/Projects/pd/claude-tools/mcps/filesystem_mcp/launch_filesystem_mcp.sh"],
"env": {
"MCP_LOG_LEVEL": "debug",
"MCP_TRANSPORT_TYPE": "stdio"
}
},
"open-meteo-mcp": {
"command": "/Users/gregory.gauthier/Projects/pd/claude-tools/mcps/open_meteo_mcp/run_open_meteo_mcp_server.sh"
},
"playwright-mcp": {
"command": "/Users/gregory.gauthier/Projects/pd/claude-tools/mcps/playwright_mcp/launch_playwright_mcp.sh"
},
"selenium-mcp": {
"command": "/Users/gregory.gauthier/Projects/pd/claude-tools/mcps/selenium_mcp/launch_selenium_mcp_server.sh"
}
},
"preferences": {
"quickEntryDictationShortcut": "capslock"
}
}

View File

@ -0,0 +1,6 @@
---
description: Use when Edit feature fails with xAI "Raw sampling not supported" error
alwaysApply: false
---
For Edit operations (Cmd/Ctrl+I), prefer non-xAI models like gpt-4o or claude-3.5-sonnet. xAI reasoning models don't support raw sampling needed for Edit.

7
mcps/dicom_mcp/.gitignore vendored Normal file
View File

@ -0,0 +1,7 @@
.ai/
.idea/
.claude/
__pycache__/
build/
dist/
poetry.lock

7
mcps/dicom_mcp/.mcp.json Normal file
View File

@ -0,0 +1,7 @@
{
"mcpServers": {
"dicom_mcp": {
"command": "/data/Projects/mcp_servers/dicom_mcp/run_dicom_mcp_server.sh"
}
}
}

152
mcps/dicom_mcp/CLAUDE.md Normal file
View File

@ -0,0 +1,152 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Project Overview
A Model Context Protocol (MCP) server that provides DICOM medical imaging QA tools to Claude. Built with FastMCP and pydicom, it exposes 17 read-only async tools for analyzing DICOM files, with a focus on body composition analysis, Dixon sequence validation, pixel analysis, Philips private tag resolution, UID comparison, segmentation verification, and T1 mapping (MOLLI/NOLLI) analysis.
## Commands
```bash
# Install dependencies
poetry install --with dev
# Run all tests
poetry run pytest -v --tb=short
# Run a single test class
poetry run pytest test_dicom_mcp.py::TestToolRegistration -v
# Run a single test
poetry run pytest test_dicom_mcp.py::TestToolRegistration::test_all_tools_registered -v
# Run the MCP server directly
poetry run python -m dicom_mcp
# Run with PII filtering enabled
DICOM_MCP_PII_FILTER=true poetry run python -m dicom_mcp
```
## Architecture
The project is structured as a Python package (`dicom_mcp/`) with a backward-compatible shim at the root (`dicom_mcp.py`). All tests live in `test_dicom_mcp.py`.
### Package Structure
```
dicom_mcp/
__init__.py # Re-exports all public symbols for backward compat
__main__.py # Entry point: python -m dicom_mcp
server.py # mcp = FastMCP("dicom_mcp") + run()
config.py # Env-var-driven config (PII_FILTER_ENABLED, MAX_FILES)
constants.py # Enums (ResponseFormat, SequenceType), COMMON_TAGS, VALID_TAG_GROUPS
pii.py # PII tag set + redaction functions
helpers/
__init__.py # Re-exports all helper functions
tags.py # _safe_get_tag, _format_tag_value, _resolve_tag, _validate_tag_groups, _format_markdown_table
sequence.py # _identify_sequence_type, _is_dixon_sequence, _get_dixon_image_types
files.py # _is_dicom_file, _find_dicom_files
philips.py # _resolve_philips_private_tag, _list_philips_private_creators
pixels.py # _get_pixel_array, _extract_roi, _compute_stats, _apply_windowing
filters.py # _parse_filter, _apply_filter
tree.py # _build_tree_text, _build_tree_json, _format_tree_value
tools/
__init__.py # Imports all tool modules to trigger @mcp.tool() registration
discovery.py # dicom_list_files, dicom_find_dixon_series
metadata.py # dicom_get_metadata, dicom_compare_headers
query.py # dicom_query, dicom_summarize_directory
validation.py # dicom_validate_sequence, dicom_analyze_series
search.py # dicom_search
philips.py # dicom_query_philips_private
pixels.py # dicom_read_pixels, dicom_compute_snr, dicom_render_image
tree.py # dicom_dump_tree
uid_comparison.py # dicom_compare_uids
segmentation.py # dicom_verify_segmentations
ti_analysis.py # dicom_analyze_ti
dicom_mcp.py # Thin shim so `python dicom_mcp.py` still works
```
### Import Chain (No Circular Dependencies)
- `config.py` and `constants.py` are leaf modules (no internal imports)
- `server.py` only imports `FastMCP` externally — owns the `mcp` instance
- `helpers/*.py` import from `constants.py`, `config.py`, pydicom/numpy (never `server.py`)
- `tools/*.py` import `mcp` from `server.py`, plus helpers and constants
- `tools/__init__.py` imports all tool modules (triggers `@mcp.tool()` registration)
- `server.run()` does a deferred import of `dicom_mcp.tools` before starting
- `__init__.py` re-exports everything so `import dicom_mcp` still works
**Tool structure:** Each tool is an async function that takes a directory/file path, performs DICOM analysis using pydicom, and returns formatted results (markdown or JSON). All tools are read-only and annotated with `ToolAnnotations(readOnlyHint=True)`. All blocking I/O is wrapped in `asyncio.to_thread()` to prevent event loop blocking.
**The 17 tools:**
- `dicom_list_files` — Recursively find DICOM files, optionally filtered by sequence type; supports count_only mode
- `dicom_get_metadata` — Extract DICOM header info from a single file using tag groups; supports Philips private tags via `philips_private_tags` parameter
- `dicom_compare_headers` — Compare headers across 2-10 files side-by-side
- `dicom_find_dixon_series` — Identify Dixon sequences and detect image types (water/fat/in-phase/out-phase)
- `dicom_validate_sequence` — Validate sequence parameters (TR, TE, flip angle) against expected values
- `dicom_analyze_series` — Comprehensive series analysis checking parameter consistency and completeness
- `dicom_summarize_directory` — High-level overview of directory contents with field-level summaries
- `dicom_query` — Query arbitrary DICOM tags across a directory with optional grouping
- `dicom_search` — Search DICOM files using filter syntax (text, numeric, presence operators)
- `dicom_query_philips_private` — Query Philips private DICOM tags by DD number and element offset; can list all Private Creator tags or resolve specific private elements
- `dicom_read_pixels` — Extract pixel statistics with optional ROI and histogram
- `dicom_compute_snr` — Compute signal-to-noise ratio from two ROIs
- `dicom_render_image` — Render DICOM to PNG with windowing and ROI overlays
- `dicom_dump_tree` — Full hierarchical dump of DICOM structure including nested sequences; configurable depth and private tag visibility
- `dicom_compare_uids` — Compare UID sets (e.g. SeriesInstanceUID) between two DICOM directories; supports any tag keyword or hex code
- `dicom_verify_segmentations` — Validate that segmentation DICOM files reference valid source images via SourceImageSequence
- `dicom_analyze_ti` — Extract and validate inversion times from MOLLI/T1 mapping sequences across vendors; handles Philips private TI tags automatically
**Key constants:**
- `MAX_FILES` — Safety limit for directory scans (default 1000, configurable via `DICOM_MCP_MAX_FILES` env var)
- `COMMON_TAGS` — Dictionary of 9 tag groups (patient_info, study_info, series_info, image_info, acquisition, manufacturer, equipment, geometry, pixel_data) mapping to DICOM tag tuples
- `VALID_TAG_GROUPS` — Sorted list of valid tag group names
## PII Filtering
Patient-identifying tags can be redacted from tool output via an environment variable:
- **Enable:** `DICOM_MCP_PII_FILTER=true` (accepts `true`, `1`, `yes`, case-insensitive)
- **Disable:** unset or any other value (default)
**Redacted tags** (patient tags only): PatientName `(0010,0010)`, PatientID `(0010,0020)`, PatientBirthDate `(0010,0030)`, PatientSex `(0010,0040)`.
**Affected tools:** `dicom_get_metadata`, `dicom_compare_headers`, `dicom_summarize_directory`, `dicom_query`. All other tools do not expose patient data and are unaffected.
Redaction is applied at the output formatting level via `redact_if_pii()` in `pii.py`. Internal logic (sequence identification, grouping) uses raw values.
## Behavioural Constraints
This MCP is a **data inspection tool**, not a clinical decision support system. When using these tools, keep responses strictly factual and descriptive:
- **Report** what is observed in the DICOM data (tag values, pixel statistics, parameters, counts)
- **Describe** technical characteristics (acquisition settings, sequence types, vendor differences)
- **Do not** suggest clinical utility, diagnostic applications, or workflow suitability
- **Do not** interpret findings in a clinical or diagnostic context
- **Do not** assess data quality relative to specific clinical use cases
- **Do not** recommend clinical actions based on the data
> Present data as-is. Qualified professionals draw the conclusions.
These constraints are enforced at protocol level via `FastMCP(instructions=...)` in `server.py`, which sends them to any MCP client at connection time. See **[docs/GUIDELINES.md](docs/GUIDELINES.md)** for the full policy and regulatory context.
## Key Patterns
- All tools use `ResponseFormat` enum (MARKDOWN/JSON) for output formatting
- `SequenceType` enum covers: DIXON, T1_MAPPING, MULTI_ECHO_GRE, SPIN_ECHO_IR, T1, T2, FLAIR, DWI, LOCALIZER, UNKNOWN
- DICOM files are validated by checking for the 128-byte preamble + "DICM" magic bytes in `_is_dicom_file()`
- Custom DICOM tags can be specified in hex format `(GGGG,EEEE)`
- Philips private tags are resolved dynamically per-file via `_resolve_philips_private_tag()` — block assignments vary across scanners
- All synchronous I/O (pydicom.dcmread, file globbing) is wrapped in `asyncio.to_thread()` to keep the event loop responsive
- `_find_dicom_files()` returns `tuple[list[tuple[Path, Dataset]], bool]` — pre-read datasets + truncation flag — to avoid double-reading files
- The server imports `FastMCP` from `mcp.server.fastmcp` (not directly from `fastmcp`)
## Documentation
Additional documentation lives in the `docs/` directory:
- **[docs/USAGE.md](docs/USAGE.md)** — Detailed tool reference, parameter guide, and QA workflow examples
- **[docs/TODO.md](docs/TODO.md)** — Planned improvements and known issues
- **[docs/CAPABILITIES.md](docs/CAPABILITIES.md)** — Plain-English summary of all 17 tool capabilities
- **[docs/GUIDELINES.md](docs/GUIDELINES.md)** — Behavioural constraints and regulatory context

389
mcps/dicom_mcp/INSTALL.md Normal file
View File

@ -0,0 +1,389 @@
# Installation Guide
This guide covers installing the DICOM MCP server for use with Claude Desktop, Claude Code, or any other MCP-compatible client.
## Table of Contents
- [Supported Clients](#supported-clients)
- [1. Installation](#1-installation)
- [1a. Standalone Package (Non-Developers)](#1a-standalone-package-non-developers)
- [1b. Developer Installation (Poetry)](#1b-developer-installation-poetry)
- [2. Client Configuration](#2-client-configuration)
- [2a. Claude Desktop](#2a-claude-desktop)
- [2b. Claude Code](#2b-claude-code)
- [3. PII Filtering](#3-pii-filtering)
- [4. Environment Variables](#4-environment-variables)
- [5. Verifying the Installation](#5-verifying-the-installation)
- [6. Troubleshooting](#6-troubleshooting)
---
## Supported Clients
This MCP server communicates via **stdio** — it runs as a local subprocess on your machine. This means it only works with Claude clients that can spawn local processes.
| Client | Supported? | Config file | Notes |
|--------|-----------|-------------|-------|
| **Claude Desktop** (native app) | Yes | `claude_desktop_config.json` | GUI-based, single config file per platform |
| **Claude Code** (terminal) | Yes | `.mcp.json` or `~/.claude.json` | CLI-based, multiple scope levels |
| **Claude Code** (VS Code / IDE) | Yes | Same as above | Same CLI, same config files — the IDE terminal makes no difference |
| **claude.ai** (web browser) | No | N/A | Cannot spawn local processes; would require an HTTP/SSE remote server wrapper |
This server implements the open [Model Context Protocol](https://modelcontextprotocol.io/) standard, so it also works with other MCP-compatible clients such as **ChatGPT**, Cursor, and Windsurf. Configuration will vary by client — refer to your client's MCP documentation. The instructions below cover Claude Desktop and Claude Code specifically.
If you're unsure which client you have: **Claude Desktop** is the native app with a chat window. **Claude Code** is the `claude` command you run in a terminal (standalone, VS Code, or any other IDE). **claude.ai** is the website at claude.ai.
---
## 1. Installation
Choose the installation method that matches your environment.
### 1a. Standalone Package (Non-Developers)
**No Python installation required.** Ideal for radiographers, QA analysts, and other team members who don't have a development environment. Everything is self-contained — Python, pydicom, numpy, and all dependencies are bundled.
#### macOS
1. Download the standalone package for your Mac:
- Apple Silicon (M1/M2/M3/M4): `dicom_mcp_standalone_arm64.tar.gz`
- Intel Mac: `dicom_mcp_standalone_x86_64.tar.gz`
2. Extract and install:
```bash
tar -xzf dicom_mcp_standalone_arm64.tar.gz
cd dicom_mcp_standalone_arm64
./install_to_claude.sh
```
3. Restart Claude Desktop.
The installer configures Claude Desktop automatically.
#### Linux
1. Download `dicom_mcp_standalone_linux_x86_64.tar.gz` (or `aarch64` for ARM).
2. Extract and install:
```bash
tar -xzf dicom_mcp_standalone_linux_x86_64.tar.gz
cd dicom_mcp_standalone_linux_x86_64
./install_to_claude.sh
```
3. Restart your MCP client.
### 1b. Developer Installation (Poetry)
For developers who want to modify the code, run tests, or extend the server.
#### Prerequisites
- Python 3.12 or higher
- [Poetry](https://python-poetry.org/docs/#installation) (Python package manager)
#### Quick Setup
```bash
# Clone the repository
git clone <repository-url>
cd dicom_mcp
# Run the automated installer (macOS, configures Claude Desktop)
./install.sh
```
#### Manual Setup
1. Install dependencies:
```bash
poetry install --with dev
```
2. Verify the installation:
```bash
poetry run pytest -v --tb=short
```
You should see all 138 tests passing.
3. Start the server (to test it manually):
```bash
poetry run python -m dicom_mcp
```
After installation, proceed to [Client Configuration](#2-client-configuration) below.
---
## 2. Client Configuration
Configuration is different for Claude Desktop and Claude Code. If you used the standalone installer (`install_to_claude.sh`), Claude Desktop is already configured — skip to [Verifying the Installation](#5-verifying-the-installation).
### 2a. Claude Desktop
Claude Desktop reads its MCP server configuration from a single JSON file. The location depends on your OS:
| OS | Config file path |
|----|-----------------|
| macOS | `~/Library/Application Support/Claude/claude_desktop_config.json` |
| Linux | `~/.config/Claude/claude_desktop_config.json` |
| Windows | `%APPDATA%\Claude\claude_desktop_config.json` |
#### Using the GUI
1. Open Claude Desktop and go to Settings (gear icon).
2. Navigate to the Developer section.
<img src="img/settings_developer_option.png" width="150px" alt="Developer settings">
3. Click **Edit Config** or **Add MCP Server**.
<img src="img/mcp_servers_panel.png" width="450px" alt="MCP servers panel">
4. Add the server configuration using one of the JSON examples below.
#### Poetry (Recommended for Developers)
```json
{
"mcpServers": {
"dicom_mcp": {
"command": "poetry",
"args": ["run", "python", "-m", "dicom_mcp"],
"cwd": "/absolute/path/to/dicom_mcp"
}
}
}
```
Replace `/absolute/path/to/dicom_mcp` with the actual directory where the project lives.
#### Virtualenv Python Directly
If Claude Desktop has trouble finding Poetry, you can point it at the virtualenv Python directly. First, get the path:
```bash
poetry env info --path
```
Then configure:
```json
{
"mcpServers": {
"dicom_mcp": {
"command": "/path/to/poetry/virtualenv/bin/python",
"args": ["-m", "dicom_mcp"],
"cwd": "/absolute/path/to/dicom_mcp"
}
}
}
```
After adding the configuration, **restart Claude Desktop** to load the server.
### 2b. Claude Code
Claude Code supports multiple configuration scopes for MCP servers, each stored in a dedicated file (separate from Claude Code's settings files). Choose the scope that matches your use case.
#### Scope Overview
| Scope | Config file | Shared via git? | Use case |
|-------|-------------|-----------------|----------|
| **Project** | `.mcp.json` (project root) | Yes | Team-shared — everyone on the project gets the server |
| **User** | `~/.claude.json` | No | Personal — available in all your projects |
| **Local** | `~/.claude.json` (project-specific section) | No | Personal — available only in the current project |
| **Managed** | System directory (see below) | Enterprise IT | Organisation-wide enforcement |
**Precedence** (highest to lowest): Managed > Local > Project > User. If the same MCP server name appears at multiple scopes, the higher-precedence scope wins.
#### Project Scope (Team-Shared)
Create `.mcp.json` in your project root and commit it to version control. All collaborators will get the server automatically:
```json
{
"mcpServers": {
"dicom_mcp": {
"command": "poetry",
"args": ["run", "python", "-m", "dicom_mcp"],
"cwd": "/absolute/path/to/dicom_mcp"
}
}
}
```
Replace `/absolute/path/to/dicom_mcp` with the actual directory where the DICOM MCP project lives.
**Note:** Project-scoped servers require user approval on first use. Users can reset approval with `claude mcp reset-project-choices`.
#### User Scope (Personal, All Projects)
Add to `~/.claude.json` to make the server available in every Claude Code session:
```json
{
"mcpServers": {
"dicom_mcp": {
"command": "poetry",
"args": ["run", "python", "-m", "dicom_mcp"],
"cwd": "/absolute/path/to/dicom_mcp"
}
}
}
```
#### Local Scope (Personal, One Project)
The local scope is stored in `~/.claude.json` but scoped to a specific project directory. The easiest way to add a local-scoped server is via the CLI:
```bash
claude mcp add dicom_mcp \
--scope local \
-- poetry run python -m dicom_mcp
```
#### Managed Scope (Enterprise)
For organisation-wide deployment, IT administrators can place a managed configuration in:
| OS | Path |
|----|------|
| macOS | `/Library/Application Support/ClaudeCode/managed-mcp.json` |
| Linux/WSL | `/etc/claude-code/managed-mcp.json` |
| Windows | `C:\Program Files\ClaudeCode\managed-mcp.json` |
Managed servers cannot be overridden by users and take the highest precedence.
#### Using the CLI
Claude Code provides a CLI for managing MCP servers without editing JSON files directly:
```bash
# Add a server (default: local scope)
claude mcp add dicom_mcp -- poetry run python -m dicom_mcp
# Add at a specific scope
claude mcp add dicom_mcp --scope user -- poetry run python -m dicom_mcp
claude mcp add dicom_mcp --scope project -- poetry run python -m dicom_mcp
# List all configured servers
claude mcp list
# Get details for a specific server
claude mcp get dicom_mcp
# Remove a server
claude mcp remove dicom_mcp
```
#### Virtualenv Python Fallback
If Claude Code has trouble finding Poetry, point directly at the virtualenv Python. First, get the path:
```bash
poetry env info --path
```
Then use the full Python path in your configuration (at any scope):
```json
{
"mcpServers": {
"dicom_mcp": {
"command": "/path/to/poetry/virtualenv/bin/python",
"args": ["-m", "dicom_mcp"],
"cwd": "/absolute/path/to/dicom_mcp"
}
}
}
```
After saving any configuration, restart Claude Code (or start a new session) for the changes to take effect. You can verify the server is loaded by running `/mcp` in the Claude Code interface.
---
## 3. PII Filtering
Patient-identifying tags can be redacted from all tool output by setting an environment variable. This works with any client and any configuration method.
Add an `env` block to your MCP server configuration:
```json
{
"mcpServers": {
"dicom_mcp": {
"command": "...",
"args": ["..."],
"env": {
"DICOM_MCP_PII_FILTER": "true"
}
}
}
}
```
When enabled, the following tags are replaced with `[REDACTED]` in all tool output:
- PatientName
- PatientID
- PatientBirthDate
- PatientSex
This affects `dicom_get_metadata`, `dicom_compare_headers`, `dicom_summarize_directory`, and `dicom_query`. All other tools do not expose patient data and are unaffected.
**Standalone users:** Edit the Claude Desktop config file that `install_to_claude.sh` created (see [Claude Desktop config locations](#2a-claude-desktop)) and add the `env` block to the existing `dicom_mcp` entry. Keep the `command` value the installer wrote — you're only adding the `env` section.
To disable, unset the variable or set it to any value other than `true`, `1`, or `yes`.
---
## 4. Environment Variables
| Variable | Default | Description |
|----------|---------|-------------|
| `DICOM_MCP_PII_FILTER` | `false` | Set to `true`, `1`, or `yes` to redact patient tags in tool output |
| `DICOM_MCP_MAX_FILES` | `1000` | Maximum number of DICOM files to scan per directory operation |
---
## 5. Verifying the Installation
Once configured, restart your client and verify the server is working:
1. **Claude Desktop**: Ask Claude "What MCP tools do you have available?" — you should see 17 DICOM tools listed.
2. **Claude Code**: The tools will appear in the tool list. You can also test directly:
```
List your available DICOM MCP tools
```
3. **MCP Inspector** (for debugging):
```bash
npx @modelcontextprotocol/inspector poetry run python -m dicom_mcp
```
---
## 6. Troubleshooting
### Server not appearing after configuration
- Double-check that the `cwd` path is correct and contains the `dicom_mcp/` package directory.
- Ensure Poetry is installed and on the PATH, or use the virtualenv Python directly.
- Restart the client after any configuration change.
### ModuleNotFoundError
The configured Python environment is missing dependencies. Either:
- Run `poetry install` in the project directory, or
- Switch to the virtualenv Python configuration (see above).
### Permission denied on standalone launcher
```bash
chmod +x run_dicom_mcp.sh install_to_claude.sh
```
### Poetry not found by Claude Desktop
Claude Desktop may not inherit your shell's PATH. Use the virtualenv Python directly instead of the Poetry command (see configuration examples above).

254
mcps/dicom_mcp/README.md Normal file
View File

@ -0,0 +1,254 @@
# DICOM MCP Server
A Model Context Protocol (MCP) server for analyzing DICOM medical imaging files, specifically designed for QA workflows in body composition analysis and liver imaging.
## Who is this for?
This server is designed primarily for **Claude Desktop users** — radiographers, QA analysts, and other non-technical team members who need to inspect and validate DICOM files but don't write code. Claude Desktop has no shell or filesystem access, so the MCP server is the only way to give it DICOM analysis capabilities.
**Claude Code users** (developers, engineers) generally don't need this. Claude Code can run Python and use pydicom directly, offering more flexibility than the predefined tools here. That said, the MCP can still be useful in Claude Code as a convenience layer if you want consistent, structured QA outputs without writing ad-hoc scripts each session.
## What is this for?
This MCP server provides a **plain-language API** for interacting with DICOM data. It doesn't eliminate the need for Python/pydicom knowledge, but it significantly reduces the cognitive load for many common tasks.
**What it does well:**
- Makes common QA tasks instant (Dixon detection, header comparison, protocol validation)
- Removes boilerplate — no more writing the same pydicom scripts repeatedly
- Natural language interface — "find Dixon sequences" vs `pydicom.dcmread()` loops
- Pixel-level analysis — render images, measure signal statistics, compute SNR
- Consistent output formats across all operations
- Configurable PII filtering to redact patient tags when required
**What it doesn't (and shouldn't) try to do:**
- Replace pydicom for custom/novel analyses
- Handle every edge case in DICOM
- Be a full DICOM viewer/editor
It's the perfect "80/20" tool — handles 80% of routine QA tasks with 20% of the effort. For the remaining 20% of complex cases, users still have full Python/pydicom access.
## Tools
The DICOM MCP server provides 17 tools across six categories:
### Directory & File Discovery
| Tool | Description |
|------|-------------|
| `dicom_list_files` | List DICOM files with metadata filtering and optional `count_only` mode |
| `dicom_summarize_directory` | High-level overview with unique tag values and file counts |
| `dicom_find_dixon_series` | Locate and classify Dixon (chemical shift) sequences |
| `dicom_search` | Find files matching filter criteria (text, numeric, presence operators) |
### Metadata & Validation
| Tool | Description |
|------|-------------|
| `dicom_get_metadata` | Extract header information organised by tag groups |
| `dicom_compare_headers` | Side-by-side comparison of up to 10 files |
| `dicom_validate_sequence` | Check acquisition parameters against expected values |
| `dicom_analyze_series` | Comprehensive series consistency and completeness check |
| `dicom_query` | Query arbitrary tags across all files with optional grouping |
### Philips Private Tags
| Tool | Description |
|------|-------------|
| `dicom_query_philips_private` | Query Philips private DICOM tags by DD number and element offset |
### Pixel Analysis
| Tool | Description |
|------|-------------|
| `dicom_read_pixels` | Pixel statistics (min, max, mean, std, percentiles, histogram) with optional ROI |
| `dicom_compute_snr` | Signal-to-noise ratio from signal and noise ROIs |
| `dicom_render_image` | Render DICOM to PNG with configurable windowing and ROI overlays |
### Structure & Comparison
| Tool | Description |
|------|-------------|
| `dicom_dump_tree` | Full hierarchical dump of DICOM structure including nested sequences |
| `dicom_compare_uids` | Compare UID sets between two directories to find shared, missing, or extra UIDs |
### Segmentation & T1 Mapping
| Tool | Description |
|------|-------------|
| `dicom_verify_segmentations` | Validate that segmentation files correctly reference their source images |
| `dicom_analyze_ti` | Extract and validate inversion times from MOLLI/NOLLI T1 mapping sequences across vendors |
For detailed usage examples, parameter reference, and QA workflows, see **[docs/USAGE.md](docs/USAGE.md)**. For a plain-English summary of what each tool does, see **[docs/CAPABILITIES.md](docs/CAPABILITIES.md)**. For behavioural constraints and regulatory context, see **[docs/GUIDELINES.md](docs/GUIDELINES.md)**.
## Installation
See **[INSTALL.md](INSTALL.md)** for complete installation instructions covering:
- **Standalone package** — No Python required, ideal for non-developers
- **Claude Desktop** — Poetry-based developer setup with GUI configuration
- **Claude Code** — Adding the MCP server to your Claude Code environment
- **PII filtering** — How to enable patient data redaction for team deployments
### Quick Start (Developer)
```bash
# Install dependencies
poetry install --with dev
# Run tests to verify
poetry run pytest -v --tb=short
# Start the server
poetry run python -m dicom_mcp
```
## PII Filtering
Patient-identifying tags can be redacted from all tool output by setting an environment variable:
```bash
export DICOM_MCP_PII_FILTER=true
```
When enabled, the following tags are replaced with `[REDACTED]` in tool responses:
- PatientName
- PatientID
- PatientBirthDate
- PatientSex
This affects `dicom_get_metadata`, `dicom_compare_headers`, `dicom_summarize_directory`, and `dicom_query`. All other tools do not expose patient data and are unaffected.
To disable, unset the variable or set it to any value other than `true`, `1`, or `yes`.
## Project Structure
```
dicom_mcp/ # Python package
__init__.py # Re-exports all public symbols
__main__.py # Entry point: python -m dicom_mcp
server.py # FastMCP instance and run()
config.py # Environment-driven configuration
constants.py # Enums and tag group definitions
pii.py # PII redaction functions
helpers/ # Internal helper functions
tags.py # Tag reading, formatting, resolution
sequence.py # Sequence type identification
files.py # DICOM file discovery
philips.py # Philips private tag resolution
pixels.py # Pixel array and statistics
filters.py # Search filter parsing
tree.py # DICOM tree building and formatting
tools/ # MCP tool definitions
discovery.py # dicom_list_files, dicom_find_dixon_series
metadata.py # dicom_get_metadata, dicom_compare_headers
query.py # dicom_query, dicom_summarize_directory
validation.py # dicom_validate_sequence, dicom_analyze_series
search.py # dicom_search
philips.py # dicom_query_philips_private
pixels.py # dicom_read_pixels, dicom_compute_snr, dicom_render_image
tree.py # dicom_dump_tree
uid_comparison.py # dicom_compare_uids
segmentation.py # dicom_verify_segmentations
ti_analysis.py # dicom_analyze_ti
docs/ # Documentation
USAGE.md # Detailed tool reference and QA workflows
TODO.md # Planned improvements and known issues
CAPABILITIES.md # Plain-English summary of all capabilities
GUIDELINES.md # Behavioural constraints and regulatory context
dicom_mcp.py # Backward-compatible entry point shim
test_dicom_mcp.py # Test suite (138 tests)
```
## Development
### Running Tests
```bash
poetry run pytest -v --tb=short
```
The test suite includes 138 tests covering tool registration, helper functions, PII filtering, and full pipeline integration tests using synthetic DICOM files.
### Linting
```bash
ruff check dicom_mcp/ dicom_mcp.py test_dicom_mcp.py
black --check dicom_mcp/ dicom_mcp.py test_dicom_mcp.py
```
### Building Standalone Packages
```bash
# macOS (detects architecture automatically)
./build_standalone.sh
# Linux
./build_standalone_linux.sh
```
This bundles Python 3.12, fastmcp, pydicom, numpy, and Pillow into a self-contained package (~50MB) that requires no system Python.
### MCP Inspector
To test the server interactively outside of Claude:
```bash
npx @modelcontextprotocol/inspector poetry run python -m dicom_mcp
```
### Extending the Server
New tools can be added by creating a module in `dicom_mcp/tools/` and importing it from `dicom_mcp/tools/__init__.py`. Each tool module imports the shared `mcp` instance from `dicom_mcp.server` and uses the `@mcp.tool()` decorator.
Other extension points:
- **Custom tag groups**: Edit `COMMON_TAGS` in `dicom_mcp/constants.py`
- **Sequence type detection**: Modify `_identify_sequence_type()` in `dicom_mcp/helpers/sequence.py`
- **PII tag set**: Edit `PII_TAGS` in `dicom_mcp/pii.py` (derived from `COMMON_TAGS["patient_info"]`)
## Troubleshooting
### "Module not found" errors
Install all dependencies:
```bash
poetry install --with dev
```
If the MCP server starts but fails with `ModuleNotFoundError` for numpy or Pillow, the Claude Desktop config may be pointing to a different Python than the Poetry virtualenv. Either install the missing packages into that Python or update the config to use the virtualenv Python directly (see [INSTALL.md](INSTALL.md)).
### "File not found" errors
Use absolute paths, not relative paths. Check that the path exists and is accessible.
### "Not a valid DICOM file" errors
Verify the file is actually a DICOM file. Try opening with another DICOM viewer or `pydicom.dcmread()` directly.
### Server not appearing in Claude Desktop
- Verify the configuration file path is correct
- Check that the configured Python can find all dependencies
- Restart Claude Desktop after configuration changes
- Check Claude Desktop logs for error messages
### "No pixel data" errors
Some DICOM files (presentation states, structured reports, derived objects) don't contain pixel data. Use `dicom_get_metadata` to check the file's SOP Class or ImageType before attempting pixel operations.
## Example Screenshot
<img src="img/claude_desktop_example.png" width="623" alt="Claude Desktop session example">
## License
This MCP server is provided as-is for QA and testing purposes.
## Support
For issues or questions:
1. Check the Troubleshooting section above
2. See **[docs/USAGE.md](docs/USAGE.md)** for detailed tool reference and workflow examples
3. See **[INSTALL.md](INSTALL.md)** for installation help
4. Verify your DICOM files with pydicom directly
5. Review MCP server logs in Claude Desktop

View File

@ -0,0 +1,84 @@
image: python:3.12
definitions:
caches:
poetry: ~/.cache/pypoetry
pipelines:
default:
- step:
name: Test
caches:
- pip
- poetry
script:
# Install Poetry
- curl -sSL https://install.python-poetry.org | python3 -
- export PATH="/root/.local/bin:$PATH"
# Verify Poetry installation
- poetry --version
# Install dependencies
- poetry install --with dev
# Run tests
- poetry run pytest -v --tb=short
# Optional: Run linting/type checking if you add them later
# - poetry run ruff check .
# - poetry run mypy .
branches:
main:
- step:
name: Test on Main
caches:
- pip
- poetry
script:
# Install Poetry
- curl -sSL https://install.python-poetry.org | python3 -
- export PATH="/root/.local/bin:$PATH"
# Verify Poetry installation
- poetry --version
# Install dependencies
- poetry install --with dev
# Run tests with coverage
- poetry run pytest -v --tb=short
# Optional: Generate coverage report
# - poetry run pytest --cov=. --cov-report=term-missing
- step:
name: Build Standalone Package
script:
# Build standalone distributable for Linux x86_64
- chmod +x build_standalone_linux.sh
- ./build_standalone_linux.sh
artifacts:
- dist/*.tar.gz
pull-requests:
'**':
- step:
name: Test PR
caches:
- pip
- poetry
script:
# Install Poetry
- curl -sSL https://install.python-poetry.org | python3 -
- export PATH="/root/.local/bin:$PATH"
# Verify Poetry installation
- poetry --version
# Install dependencies
- poetry install --with dev
# Run tests
- poetry run pytest -v --tb=short

View File

@ -0,0 +1,160 @@
#!/bin/bash
# Build a standalone distributable package for DICOM MCP Server
# Uses python-build-standalone to bundle Python 3.12 + dependencies
set -e
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
echo -e "${GREEN}=== Building Standalone DICOM MCP Package ===${NC}\n"
# Detect architecture
ARCH=$(uname -m)
RELEASE="20260203"
if [ "$ARCH" = "x86_64" ]; then
PYTHON_BUILD="cpython-3.12.12+${RELEASE}-x86_64-apple-darwin-install_only_stripped.tar.gz"
DIST_ARCH="x86_64"
elif [ "$ARCH" = "arm64" ]; then
PYTHON_BUILD="cpython-3.12.12+${RELEASE}-aarch64-apple-darwin-install_only_stripped.tar.gz"
DIST_ARCH="arm64"
else
echo -e "${RED}Unsupported architecture: $ARCH${NC}"
exit 1
fi
TEMP_BUILD_DIR="build/temp"
DIST_DIR="build/dicom_mcp_standalone_${DIST_ARCH}"
PYTHON_URL="https://github.com/indygreg/python-build-standalone/releases/download/${RELEASE}/$PYTHON_BUILD"
# Clean previous builds
rm -rf "build" "dist"
mkdir -p "$TEMP_BUILD_DIR" "$DIST_DIR"
# Download standalone Python
echo -e "${YELLOW}Downloading Python 3.12 standalone build for ${ARCH}...${NC}"
curl -L "$PYTHON_URL" -o "$TEMP_BUILD_DIR/python.tar.gz"
# Extract Python
echo -e "${YELLOW}Extracting Python...${NC}"
tar -xzf "$TEMP_BUILD_DIR/python.tar.gz" -C "$TEMP_BUILD_DIR"
mv "$TEMP_BUILD_DIR/python" "$DIST_DIR/python"
# Set up Python environment
PYTHON_BIN="$DIST_DIR/python/bin/python3"
# Install dependencies
echo -e "${YELLOW}Installing dependencies...${NC}"
"$PYTHON_BIN" -m pip install --upgrade pip
"$PYTHON_BIN" -m pip install fastmcp==2.0.0 pydicom==3.0.1 numpy==2.4.2 Pillow==12.1.1
# Copy server code
echo -e "${YELLOW}Copying server code...${NC}"
cp -r dicom_mcp/ "$DIST_DIR/dicom_mcp/"
cp dicom_mcp.py "$DIST_DIR/"
# Create launcher script
echo -e "${YELLOW}Creating launcher script...${NC}"
cat > "$DIST_DIR/run_dicom_mcp.sh" << 'EOF'
#!/bin/bash
# DICOM MCP Server Launcher
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$SCRIPT_DIR"
exec "$SCRIPT_DIR/python/bin/python3" -m dicom_mcp
EOF
chmod +x "$DIST_DIR/run_dicom_mcp.sh"
# Create installation instructions
cat > "$DIST_DIR/INSTALL.txt" << EOF
DICOM MCP Server - Installation Instructions
1. Move this entire folder to your preferred location, e.g.:
/Users/yourusername/Applications/dicom_mcp_standalone_${DIST_ARCH}
2. Edit your Claude Desktop configuration file:
~/Library/Application Support/Claude/claude_desktop_config.json
3. Add the following to the "mcpServers" section:
"dicom_mcp": {
"command": "/path/to/dicom_mcp_standalone_${DIST_ARCH}/run_dicom_mcp.sh"
}
(Replace /path/to/ with the actual location where you placed this folder)
4. Restart Claude Desktop
5. Verify the server is working by asking Claude:
"List available MCP tools"
You should see 12 DICOM tools available.
No Python installation required - everything is bundled!
EOF
# Create a simple installer script for end users
cat > "$DIST_DIR/install_to_claude.sh" << 'EOF'
#!/bin/bash
set -e
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CONFIG_FILE="$HOME/Library/Application Support/Claude/claude_desktop_config.json"
echo -e "${GREEN}=== DICOM MCP Server - Claude Desktop Setup ===${NC}\n"
# Create config directory if needed
mkdir -p "$HOME/Library/Application Support/Claude"
# Check if config exists
if [ -f "$CONFIG_FILE" ]; then
echo -e "${YELLOW}Existing configuration found.${NC}"
echo -e "Please add the following to your mcpServers section:\n"
echo '{
"dicom_mcp": {
"command": "'"$SCRIPT_DIR/run_dicom_mcp.sh"'"
}
}'
echo -e "\n${YELLOW}Configuration file location:${NC}"
echo "$CONFIG_FILE"
else
echo -e "${YELLOW}Creating new configuration...${NC}"
cat > "$CONFIG_FILE" << CONFIGEOF
{
"mcpServers": {
"dicom_mcp": {
"command": "$SCRIPT_DIR/run_dicom_mcp.sh"
}
}
}
CONFIGEOF
echo -e "${GREEN}✓ Configuration created${NC}"
fi
echo -e "\n${GREEN}Next step: Restart Claude Desktop${NC}\n"
EOF
chmod +x "$DIST_DIR/install_to_claude.sh"
# Create tarball for distribution
echo -e "${YELLOW}Creating distribution package...${NC}"
mkdir -p dist
tar -czf "dist/dicom_mcp_standalone_${DIST_ARCH}.tar.gz" -C build "dicom_mcp_standalone_${DIST_ARCH}"
# Clean up build directory
rm -rf build
echo -e "\n${GREEN}=== Build Complete ===${NC}\n"
echo -e "Distribution package created at:"
echo -e "${YELLOW}dist/dicom_mcp_standalone_${DIST_ARCH}.tar.gz${NC}\n"
echo -e "Package size: $(du -h "dist/dicom_mcp_standalone_${DIST_ARCH}.tar.gz" | cut -f1)\n"
echo -e "To distribute:"
echo -e "1. Send the .tar.gz file to users"
echo -e "2. Users extract it: ${YELLOW}tar -xzf dicom_mcp_standalone_${DIST_ARCH}.tar.gz${NC}"
echo -e "3. Users run: ${YELLOW}cd dicom_mcp_standalone_${DIST_ARCH} && ./install_to_claude.sh${NC}\n"

View File

@ -0,0 +1,163 @@
#!/bin/bash
# Build a standalone distributable package for DICOM MCP Server (Linux)
# Uses python-build-standalone to bundle Python 3.12 + dependencies
set -e
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
RED='\033[0;31m'
NC='\033[0m'
echo -e "${GREEN}=== Building Standalone DICOM MCP Package (Linux) ===${NC}\n"
# Detect architecture
ARCH=$(uname -m)
RELEASE="20260203"
if [ "$ARCH" = "x86_64" ]; then
PYTHON_BUILD="cpython-3.12.12+${RELEASE}-x86_64-unknown-linux-gnu-install_only_stripped.tar.gz"
DIST_ARCH="x86_64"
elif [ "$ARCH" = "aarch64" ]; then
PYTHON_BUILD="cpython-3.12.12+${RELEASE}-aarch64-unknown-linux-gnu-install_only_stripped.tar.gz"
DIST_ARCH="aarch64"
else
echo -e "${RED}Unsupported architecture: $ARCH${NC}"
exit 1
fi
TEMP_BUILD_DIR="build/temp"
DIST_DIR="build/dicom_mcp_standalone_linux_${DIST_ARCH}"
PYTHON_URL="https://github.com/indygreg/python-build-standalone/releases/download/${RELEASE}/$PYTHON_BUILD"
# Clean previous builds
rm -rf "build" "dist"
mkdir -p "$TEMP_BUILD_DIR" "$DIST_DIR"
# Download standalone Python
echo -e "${YELLOW}Downloading Python 3.12 standalone build for Linux ${ARCH}...${NC}"
curl -L "$PYTHON_URL" -o "$TEMP_BUILD_DIR/python.tar.gz"
# Extract Python
echo -e "${YELLOW}Extracting Python...${NC}"
tar -xzf "$TEMP_BUILD_DIR/python.tar.gz" -C "$TEMP_BUILD_DIR"
mv "$TEMP_BUILD_DIR/python" "$DIST_DIR/python"
# Set up Python environment
PYTHON_BIN="$DIST_DIR/python/bin/python3"
# Install dependencies
echo -e "${YELLOW}Installing dependencies...${NC}"
"$PYTHON_BIN" -m pip install --upgrade pip
"$PYTHON_BIN" -m pip install fastmcp==2.0.0 pydicom==3.0.1 numpy==2.4.2 Pillow==12.1.1
# Copy server code
echo -e "${YELLOW}Copying server code...${NC}"
cp -r dicom_mcp/ "$DIST_DIR/dicom_mcp/"
cp dicom_mcp.py "$DIST_DIR/"
# Create launcher script
echo -e "${YELLOW}Creating launcher script...${NC}"
cat > "$DIST_DIR/run_dicom_mcp.sh" << 'EOF'
#!/bin/bash
# DICOM MCP Server Launcher
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd "$SCRIPT_DIR"
exec "$SCRIPT_DIR/python/bin/python3" -m dicom_mcp
EOF
chmod +x "$DIST_DIR/run_dicom_mcp.sh"
# Create installation instructions
cat > "$DIST_DIR/INSTALL.txt" << EOF
DICOM MCP Server - Installation Instructions (Linux)
1. Move this entire folder to your preferred location, e.g.:
/opt/dicom_mcp_standalone_linux_${DIST_ARCH}
2. Edit your MCP client configuration file to add this server.
For Claude Desktop on Linux (~/.config/Claude/claude_desktop_config.json):
{
"mcpServers": {
"dicom_mcp": {
"command": "/path/to/dicom_mcp_standalone_linux_${DIST_ARCH}/run_dicom_mcp.sh"
}
}
}
(Replace /path/to/ with the actual location where you placed this folder)
3. Restart your MCP client
4. Verify the server is working by asking Claude:
"List available MCP tools"
You should see 12 DICOM tools available.
No Python installation required - everything is bundled!
EOF
# Create a simple installer script for end users
cat > "$DIST_DIR/install_to_claude.sh" << 'EOF'
#!/bin/bash
set -e
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
CONFIG_FILE="$HOME/.config/Claude/claude_desktop_config.json"
echo -e "${GREEN}=== DICOM MCP Server - Claude Desktop Setup (Linux) ===${NC}\n"
# Create config directory if needed
mkdir -p "$HOME/.config/Claude"
# Check if config exists
if [ -f "$CONFIG_FILE" ]; then
echo -e "${YELLOW}Existing configuration found.${NC}"
echo -e "Please add the following to your mcpServers section:\n"
echo '{
"dicom_mcp": {
"command": "'"$SCRIPT_DIR/run_dicom_mcp.sh"'"
}
}'
echo -e "\n${YELLOW}Configuration file location:${NC}"
echo "$CONFIG_FILE"
else
echo -e "${YELLOW}Creating new configuration...${NC}"
cat > "$CONFIG_FILE" << CONFIGEOF
{
"mcpServers": {
"dicom_mcp": {
"command": "$SCRIPT_DIR/run_dicom_mcp.sh"
}
}
}
CONFIGEOF
echo -e "${GREEN}✓ Configuration created${NC}"
fi
echo -e "\n${GREEN}Next step: Restart Claude Desktop${NC}\n"
EOF
chmod +x "$DIST_DIR/install_to_claude.sh"
# Create tarball for distribution
echo -e "${YELLOW}Creating distribution package...${NC}"
mkdir -p dist
tar -czf "dist/dicom_mcp_standalone_linux_${DIST_ARCH}.tar.gz" -C build "dicom_mcp_standalone_linux_${DIST_ARCH}"
# Clean up build directory
rm -rf build
echo -e "\n${GREEN}=== Build Complete ===${NC}\n"
echo -e "Distribution package created at:"
echo -e "${YELLOW}dist/dicom_mcp_standalone_linux_${DIST_ARCH}.tar.gz${NC}\n"
echo -e "Package size: $(du -h "dist/dicom_mcp_standalone_linux_${DIST_ARCH}.tar.gz" | cut -f1)\n"
echo -e "To distribute:"
echo -e "1. Send the .tar.gz file to users"
echo -e "2. Users extract it: ${YELLOW}tar -xzf dicom_mcp_standalone_linux_${DIST_ARCH}.tar.gz${NC}"
echo -e "3. Users run: ${YELLOW}cd dicom_mcp_standalone_linux_${DIST_ARCH} && ./install_to_claude.sh${NC}\n"

View File

@ -0,0 +1,11 @@
#!/usr/bin/env python3
"""Backward-compatible entry point.
Delegates to the ``dicom_mcp`` package so that
``python dicom_mcp.py`` continues to work.
"""
from dicom_mcp.server import run
if __name__ == "__main__":
run()

View File

@ -0,0 +1,81 @@
"""DICOM MCP Server package.
Re-exports all public symbols so that ``import dicom_mcp`` continues to
provide the same top-level namespace as the original single-file module.
"""
# --- Server instance ---
from dicom_mcp.server import mcp # noqa: F401
# --- Configuration ---
from dicom_mcp.config import MAX_FILES, PII_FILTER_ENABLED # noqa: F401
# --- PII filtering ---
from dicom_mcp.pii import ( # noqa: F401
PII_TAGS,
PII_REDACTED_VALUE,
is_pii_tag,
redact_if_pii,
)
# --- Enums and constants ---
from dicom_mcp.constants import ( # noqa: F401
ResponseFormat,
SequenceType,
COMMON_TAGS,
VALID_TAG_GROUPS,
)
# --- Helper functions ---
from dicom_mcp.helpers import ( # noqa: F401
_safe_get_tag,
_format_tag_value,
_resolve_tag,
_validate_tag_groups,
_format_markdown_table,
_identify_sequence_type,
_is_dixon_sequence,
_get_dixon_image_types,
_is_dicom_file,
_find_dicom_files,
_resolve_philips_private_tag,
_list_philips_private_creators,
_get_pixel_array,
_extract_roi,
_compute_stats,
_apply_windowing,
_parse_filter,
_apply_filter,
_build_tree_text,
_build_tree_json,
_format_tree_value,
)
# --- Tool functions (importing triggers @mcp.tool() registration) ---
from dicom_mcp.tools.discovery import ( # noqa: F401
dicom_list_files,
dicom_find_dixon_series,
)
from dicom_mcp.tools.metadata import ( # noqa: F401
dicom_get_metadata,
dicom_compare_headers,
)
from dicom_mcp.tools.validation import ( # noqa: F401
dicom_validate_sequence,
dicom_analyze_series,
)
from dicom_mcp.tools.query import ( # noqa: F401
dicom_query,
dicom_summarize_directory,
)
from dicom_mcp.tools.search import dicom_search # noqa: F401
from dicom_mcp.tools.philips import dicom_query_philips_private # noqa: F401
from dicom_mcp.tools.pixels import ( # noqa: F401
dicom_read_pixels,
dicom_compute_snr,
dicom_render_image,
)
from dicom_mcp.tools.tree import dicom_dump_tree # noqa: F401
from dicom_mcp.tools.uid_comparison import dicom_compare_uids # noqa: F401
from dicom_mcp.tools.segmentation import dicom_verify_segmentations # noqa: F401
from dicom_mcp.tools.ti_analysis import dicom_analyze_ti # noqa: F401

View File

@ -0,0 +1,5 @@
"""Entry point for ``python -m dicom_mcp``."""
from dicom_mcp.server import run
run()

View File

@ -0,0 +1,13 @@
"""Environment-driven configuration for the DICOM MCP server."""
import os
# PII filtering: set DICOM_MCP_PII_FILTER=true to redact patient tags
PII_FILTER_ENABLED: bool = os.environ.get("DICOM_MCP_PII_FILTER", "").lower() in (
"true",
"1",
"yes",
)
# Safety limit for directory scans
MAX_FILES: int = int(os.environ.get("DICOM_MCP_MAX_FILES", "1000"))

View File

@ -0,0 +1,96 @@
"""Enums and constants for the DICOM MCP server."""
from enum import Enum
class ResponseFormat(str, Enum):
"""Output format for tool responses."""
MARKDOWN = "markdown"
JSON = "json"
class SequenceType(str, Enum):
"""Common MRI sequence types."""
DIXON = "dixon"
T1_MAPPING = "t1_mapping"
MULTI_ECHO_GRE = "multi_echo_gre"
SPIN_ECHO_IR = "spin_echo_ir"
T1 = "t1"
T2 = "t2"
FLAIR = "flair"
DWI = "dwi"
LOCALIZER = "localizer"
UNKNOWN = "unknown"
# Tag names for common DICOM attributes
COMMON_TAGS = {
"patient_info": [
(0x0010, 0x0010), # PatientName
(0x0010, 0x0020), # PatientID
(0x0010, 0x0030), # PatientBirthDate
(0x0010, 0x0040), # PatientSex
],
"study_info": [
(0x0008, 0x0020), # StudyDate
(0x0008, 0x0030), # StudyTime
(0x0020, 0x000D), # StudyInstanceUID
(0x0008, 0x1030), # StudyDescription
],
"series_info": [
(0x0008, 0x0060), # Modality
(0x0020, 0x000E), # SeriesInstanceUID
(0x0008, 0x103E), # SeriesDescription
(0x0020, 0x0011), # SeriesNumber
],
"image_info": [
(0x0028, 0x0010), # Rows
(0x0028, 0x0011), # Columns
(0x0028, 0x0008), # NumberOfFrames
(0x0020, 0x0013), # InstanceNumber
(0x0008, 0x0008), # ImageType
],
"acquisition": [
(0x0018, 0x0020), # ScanningSequence
(0x0018, 0x0021), # SequenceVariant
(0x0018, 0x0023), # MRAcquisitionType
(0x0018, 0x0080), # RepetitionTime
(0x0018, 0x0081), # EchoTime
(0x0018, 0x0082), # InversionTime
(0x0018, 0x1314), # FlipAngle
],
"manufacturer": [
(0x0008, 0x0070), # Manufacturer
(0x0008, 0x1090), # ManufacturerModelName
(0x0018, 0x1020), # SoftwareVersions
],
"equipment": [
(0x0008, 0x1010), # StationName
(0x0018, 0x1000), # DeviceSerialNumber
(0x0008, 0x0080), # InstitutionName
(0x0008, 0x1040), # InstitutionalDepartmentName
(0x0018, 0x0087), # MagneticFieldStrength
(0x0018, 0x1250), # ReceiveCoilName
(0x0018, 0x1251), # TransmitCoilName
],
"geometry": [
(0x0028, 0x0030), # PixelSpacing
(0x0018, 0x0050), # SliceThickness
(0x0018, 0x0088), # SpacingBetweenSlices
(0x0020, 0x1041), # SliceLocation
(0x0020, 0x0032), # ImagePositionPatient
(0x0020, 0x0037), # ImageOrientationPatient
],
"pixel_data": [
(0x0028, 0x0100), # BitsAllocated
(0x0028, 0x0101), # BitsStored
(0x0028, 0x0102), # HighBit
(0x0028, 0x0103), # PixelRepresentation
(0x0028, 0x1050), # WindowCenter
(0x0028, 0x1051), # WindowWidth
],
}
VALID_TAG_GROUPS = sorted(COMMON_TAGS.keys())

View File

@ -0,0 +1,61 @@
"""Re-export all helper functions for convenient importing."""
from dicom_mcp.helpers.tags import (
_safe_get_tag,
_format_tag_value,
_resolve_tag,
_validate_tag_groups,
_format_markdown_table,
)
from dicom_mcp.helpers.sequence import (
_identify_sequence_type,
_is_dixon_sequence,
_get_dixon_image_types,
)
from dicom_mcp.helpers.files import (
_is_dicom_file,
_find_dicom_files,
)
from dicom_mcp.helpers.philips import (
_resolve_philips_private_tag,
_list_philips_private_creators,
)
from dicom_mcp.helpers.pixels import (
_get_pixel_array,
_extract_roi,
_compute_stats,
_apply_windowing,
)
from dicom_mcp.helpers.filters import (
_parse_filter,
_apply_filter,
)
from dicom_mcp.helpers.tree import (
_build_tree_text,
_build_tree_json,
_format_tree_value,
)
__all__ = [
"_safe_get_tag",
"_format_tag_value",
"_resolve_tag",
"_validate_tag_groups",
"_format_markdown_table",
"_identify_sequence_type",
"_is_dixon_sequence",
"_get_dixon_image_types",
"_is_dicom_file",
"_find_dicom_files",
"_resolve_philips_private_tag",
"_list_philips_private_creators",
"_get_pixel_array",
"_extract_roi",
"_compute_stats",
"_apply_windowing",
"_parse_filter",
"_apply_filter",
"_build_tree_text",
"_build_tree_json",
"_format_tree_value",
]

View File

@ -0,0 +1,62 @@
"""DICOM file discovery and validation helpers."""
from pathlib import Path
import pydicom
from pydicom.errors import InvalidDicomError
from dicom_mcp.config import MAX_FILES
def _is_dicom_file(file_path: Path) -> bool:
"""Quick check if a file is likely DICOM by checking the preamble.
DICOM files have a 128-byte preamble followed by 'DICM' magic bytes.
Files with .dcm extension are assumed to be DICOM without preamble check.
"""
if file_path.suffix.lower() == ".dcm":
return True
try:
with open(file_path, "rb") as f:
f.seek(128)
magic = f.read(4)
return magic == b"DICM"
except (OSError, IOError):
return False
def _find_dicom_files(
directory: Path,
recursive: bool = True,
max_files: int = MAX_FILES,
) -> tuple[list[tuple[Path, pydicom.Dataset]], bool]:
"""Find DICOM files and return paths with pre-read datasets.
Returns:
Tuple of (list of (path, dataset) pairs, truncated flag).
truncated is True if max_files limit was hit.
"""
results: list[tuple[Path, pydicom.Dataset]] = []
truncated = False
pattern = "**/*" if recursive else "*"
for file_path in sorted(directory.glob(pattern)):
if not file_path.is_file():
continue
if not _is_dicom_file(file_path):
continue
try:
ds = pydicom.dcmread(file_path, stop_before_pixels=True)
results.append((file_path, ds))
except (InvalidDicomError, Exception):
continue
if len(results) >= max_files:
truncated = True
break
return results, truncated

View File

@ -0,0 +1,120 @@
"""Filter parsing and application for dicom_search."""
from typing import Any, Dict, Optional
# Text operators (case-insensitive matching on string tag values)
_TEXT_OPERATORS = {
"is": lambda actual, expected: actual.lower() == expected.lower(),
"is not": lambda actual, expected: actual.lower() != expected.lower(),
"contains": lambda actual, expected: expected.lower() in actual.lower(),
"starts with": lambda actual, expected: actual.lower().startswith(expected.lower()),
"ends with": lambda actual, expected: actual.lower().endswith(expected.lower()),
}
# Symbolic operators (numeric with string fallback)
_SYMBOLIC_OPERATORS = {
">=": lambda a, b: a >= b,
"<=": lambda a, b: a <= b,
"!=": lambda a, b: a != b,
">": lambda a, b: a > b,
"<": lambda a, b: a < b,
"=": lambda a, b: a == b,
}
# Presence operators (no value needed)
_PRESENCE_OPERATORS = {
"exists": lambda actual: actual != "N/A" and actual.strip() != "",
"missing": lambda actual: actual == "N/A" or actual.strip() == "",
}
def _parse_filter(filter_str: str) -> Optional[Dict[str, Any]]:
"""Parse a filter string into a structured filter dict.
Supported formats:
Text: "SeriesDescription contains MOST"
Symbolic: "EchoTime > 10"
Presence: "InversionTime exists"
Returns dict with keys: tag_spec, operator, value (None for presence),
operator_type ('text', 'symbolic', 'presence').
Returns None if parsing fails.
"""
filter_str = filter_str.strip()
if not filter_str:
return None
# --- Try presence operators first (no value part) ---
for op in _PRESENCE_OPERATORS:
if filter_str.lower().endswith(f" {op}"):
tag_spec = filter_str[: -(len(op) + 1)].strip()
if tag_spec:
return {
"tag_spec": tag_spec,
"operator": op,
"value": None,
"operator_type": "presence",
}
# --- Try text operators (multi-word, so check longest first) ---
for op in sorted(_TEXT_OPERATORS.keys(), key=len, reverse=True):
# Look for " op " in the string (space-delimited)
marker = f" {op} "
idx = filter_str.lower().find(marker)
if idx >= 0:
tag_spec = filter_str[:idx].strip()
value = filter_str[idx + len(marker) :].strip()
if tag_spec and value:
return {
"tag_spec": tag_spec,
"operator": op,
"value": value,
"operator_type": "text",
}
# --- Try symbolic operators (check >= <= != before > < =) ---
for op in sorted(_SYMBOLIC_OPERATORS.keys(), key=len, reverse=True):
marker = f" {op} "
idx = filter_str.find(marker)
if idx >= 0:
tag_spec = filter_str[:idx].strip()
value = filter_str[idx + len(marker) :].strip()
if tag_spec and value:
return {
"tag_spec": tag_spec,
"operator": op,
"value": value,
"operator_type": "symbolic",
}
return None
def _apply_filter(filter_def: Dict[str, Any], actual_value: str) -> bool:
"""Apply a parsed filter to an actual DICOM tag value.
Returns True if the value passes the filter.
"""
op = filter_def["operator"]
op_type = filter_def["operator_type"]
expected = filter_def.get("value")
if op_type == "presence":
return _PRESENCE_OPERATORS[op](actual_value)
if op_type == "text":
return _TEXT_OPERATORS[op](actual_value, expected)
if op_type == "symbolic":
comparator = _SYMBOLIC_OPERATORS[op]
# Try numeric comparison first
try:
num_actual = float(actual_value)
num_expected = float(expected)
return comparator(num_actual, num_expected)
except (ValueError, TypeError):
pass
# Fall back to string comparison
return comparator(actual_value, expected)
return False

View File

@ -0,0 +1,129 @@
"""Philips private tag resolution helpers."""
from typing import Any, Dict, List, Optional
import pydicom
def _resolve_philips_private_tag(
ds: pydicom.Dataset,
dd_number: int,
element_offset: int,
private_group: int = 0x2005,
) -> tuple[Optional[tuple[int, int]], Optional[str], Optional[str]]:
"""Resolve a Philips private tag by DD number and element offset.
Philips stores private data using Private Creator tags that reserve
256-element blocks within a private group. Each Private Creator tag
at (group, 00xx) contains a string like "Philips MR Imaging DD 001".
The block number is the last byte of the Private Creator tag address.
To find a specific private element:
1. Scan Private Creator tags (group, 0x0010)-(group, 0x00FF) for the
DD string matching the requested dd_number.
2. Take the block byte (last byte of the Private Creator tag address).
3. Construct the resolved tag: (group, block_byte << 8 | element_offset).
IMPORTANT: Block assignments are NOT fixed across scanners or software
versions. They must be looked up dynamically per file.
Args:
ds: A pydicom Dataset (already read).
dd_number: The DD number to look for (e.g. 1 for "DD 001").
element_offset: The element offset within the DD block (e.g. 0x85).
private_group: The DICOM private group to search (default 0x2005).
Returns:
Tuple of (resolved_tag, creator_string, value_str):
- resolved_tag: (group, element) tuple, or None if not found.
- creator_string: The Private Creator string that matched, or None.
- value_str: String representation of the tag value, or None.
"""
# Format the DD number for matching (e.g. 1 -> "001", 5 -> "005")
dd_suffix = f"{dd_number:03d}"
target_pattern = f"dd {dd_suffix}"
# Scan Private Creator slots: (group, 0x0010) through (group, 0x00FF)
for slot in range(0x0010, 0x0100):
creator_tag = (private_group, slot)
creator_elem = ds.get(creator_tag)
if creator_elem is None:
continue
creator_str = str(creator_elem.value).strip()
if not creator_str:
continue
# Check if this creator string matches our DD number
if target_pattern in creator_str.lower():
# The block byte is the slot number (last byte of creator tag)
block_byte = slot
resolved_element = (block_byte << 8) | element_offset
resolved_tag = (private_group, resolved_element)
# Try to read the value at the resolved tag
value_elem = ds.get(resolved_tag)
if value_elem is not None:
value_str = str(value_elem.value)
else:
value_str = None
return resolved_tag, creator_str, value_str
return None, None, None
def _list_philips_private_creators(
ds: pydicom.Dataset,
private_group: int = 0x2005,
) -> List[Dict[str, Any]]:
"""List all Philips Private Creator tags in a dataset.
Scans Private Creator slots (group, 0x0010)-(group, 0x00FF) and returns
information about each occupied slot, including the DD string and block byte.
Args:
ds: A pydicom Dataset (already read).
private_group: The DICOM private group to scan (default 0x2005).
Returns:
List of dicts with keys: slot, tag, block_byte, creator_string, dd_number.
"""
creators = []
for slot in range(0x0010, 0x0100):
creator_tag = (private_group, slot)
creator_elem = ds.get(creator_tag)
if creator_elem is None:
continue
creator_str = str(creator_elem.value).strip()
if not creator_str:
continue
# Try to extract DD number
dd_number = None
lower = creator_str.lower()
dd_idx = lower.find("dd ")
if dd_idx >= 0:
dd_part = lower[dd_idx + 3 :].strip()
# Extract leading digits
digits = ""
for ch in dd_part:
if ch.isdigit():
digits += ch
else:
break
if digits:
dd_number = int(digits)
creators.append(
{
"slot": slot,
"tag": f"({private_group:04X},{slot:04X})",
"block_byte": f"0x{slot:02X}",
"creator_string": creator_str,
"dd_number": dd_number,
}
)
return creators

View File

@ -0,0 +1,81 @@
"""Pixel array extraction, ROI handling, statistics, and windowing helpers."""
from typing import Any, Dict, List
import numpy as np
import pydicom
def _get_pixel_array(ds: pydicom.Dataset) -> np.ndarray:
"""Extract pixel array with rescale slope/intercept applied.
Handles Philips (and other vendor) rescaling so that returned values
are in the rescaled domain (e.g. signal intensity).
"""
pixels = ds.pixel_array.astype(np.float64)
slope = float(getattr(ds, "RescaleSlope", 1.0) or 1.0)
intercept = float(getattr(ds, "RescaleIntercept", 0.0) or 0.0)
if slope != 1.0 or intercept != 0.0:
pixels = pixels * slope + intercept
return pixels
def _extract_roi(pixels: np.ndarray, roi: List[int]) -> np.ndarray:
"""Extract a rectangular ROI from a 2D pixel array.
Args:
pixels: 2D numpy array (rows, cols)
roi: [x, y, width, height] where x,y is the top-left corner.
Coordinates are in image space (x = column, y = row).
Returns:
2D numpy array of the ROI region.
Raises:
ValueError: If the ROI is out of bounds or invalid.
"""
if len(roi) != 4:
raise ValueError("ROI must be [x, y, width, height]")
x, y, w, h = int(roi[0]), int(roi[1]), int(roi[2]), int(roi[3])
if w <= 0 or h <= 0:
raise ValueError(f"ROI dimensions must be positive (got {w}x{h})")
rows, cols = pixels.shape
if x < 0 or y < 0 or x + w > cols or y + h > rows:
raise ValueError(
f"ROI [{x},{y},{w},{h}] exceeds image bounds ({cols}x{rows}). "
f"ROI right edge: {x + w}, bottom edge: {y + h}"
)
return pixels[y : y + h, x : x + w]
def _compute_stats(pixels: np.ndarray) -> Dict[str, Any]:
"""Compute descriptive statistics for a pixel array."""
flat = pixels.ravel()
return {
"min": float(np.min(flat)),
"max": float(np.max(flat)),
"mean": float(np.mean(flat)),
"std": float(np.std(flat)),
"median": float(np.median(flat)),
"p5": float(np.percentile(flat, 5)),
"p25": float(np.percentile(flat, 25)),
"p75": float(np.percentile(flat, 75)),
"p95": float(np.percentile(flat, 95)),
"pixel_count": int(flat.size),
}
def _apply_windowing(
pixels: np.ndarray, window_center: float, window_width: float
) -> np.ndarray:
"""Apply DICOM windowing to produce an 8-bit display image.
Uses the standard DICOM linear VOI LUT:
if pixel <= (center - width/2): output = 0
if pixel > (center + width/2): output = 255
otherwise: linear mapping to 0-255
"""
lower = window_center - window_width / 2.0
upper = window_center + window_width / 2.0
windowed = np.clip((pixels - lower) / (upper - lower) * 255.0, 0, 255)
return windowed.astype(np.uint8)

View File

@ -0,0 +1,114 @@
"""MRI sequence identification helpers."""
from typing import List
import pydicom
from dicom_mcp.constants import SequenceType
from dicom_mcp.helpers.tags import _safe_get_tag
def _identify_sequence_type(ds: pydicom.Dataset) -> SequenceType:
"""Identify the MRI sequence type from DICOM metadata.
Uses a two-tier approach:
1. Series description keyword matching (fast, covers named protocols)
2. DICOM acquisition tag inspection (structural fallback)
IMPORTANT: Keyword matching order matters. More specific terms (e.g., 't1_mapping',
'molli') must be checked before generic terms (e.g., 't1') to avoid false positives.
"""
series_desc = _safe_get_tag(ds, (0x0008, 0x103E), "").lower()
# --- Tier 1: keyword matches on series description ---
# NOTE: Order is intentional - specific patterns checked before generic ones
# Dixon variants (check before MOST/IDEAL since some descriptions contain both)
if any(kw in series_desc for kw in ["dixon", "ideal", "flex"]):
return SequenceType.DIXON
# T1 mapping protocols (MOLLI, NOLLI, ShMOLLI, SASHA, etc.)
# Must check before generic 't1' to avoid misclassification
if any(kw in series_desc for kw in ["molli", "nolli", "shmolli", "sasha"]):
return SequenceType.T1_MAPPING
# Multi-echo GRE (MOST protocol for T2*/PDFF)
if "most" in series_desc or "multi" in series_desc and "echo" in series_desc:
return SequenceType.MULTI_ECHO_GRE
# Localizer / survey / scout
if any(kw in series_desc for kw in ["localizer", "survey", "scout"]):
return SequenceType.LOCALIZER
# Standard named sequences (specific before generic)
if "flair" in series_desc:
return SequenceType.FLAIR
if "dwi" in series_desc or "diffusion" in series_desc:
return SequenceType.DWI
# Check 't2' before 't1' since 't2' is more specific
if "t2" in series_desc:
return SequenceType.T2
if "t1" in series_desc:
return SequenceType.T1
# --- Tier 2: inspect DICOM acquisition tags ---
image_type = _safe_get_tag(ds, (0x0008, 0x0008), "").lower()
if "dixon" in image_type or "ideal" in image_type:
return SequenceType.DIXON
scanning_seq = _safe_get_tag(ds, (0x0018, 0x0020), "")
seq_variant = _safe_get_tag(ds, (0x0018, 0x0021), "")
has_ir = "IR" in scanning_seq
has_se = "SE" in scanning_seq
has_gr = "GR" in scanning_seq
has_mp = "MP" in seq_variant # Magnetization Prepared
# GR + IR (+ MP) = gradient echo inversion recovery (MOLLI-like T1 mapping)
if has_gr and has_ir and has_mp:
return SequenceType.T1_MAPPING
# SE + IR = spin echo inversion recovery
if has_se and has_ir:
return SequenceType.SPIN_ECHO_IR
# Multi-echo detection via NumberOfEchoes tag
num_echoes = _safe_get_tag(ds, (0x0018, 0x0086), "")
try:
if int(num_echoes) > 1 and has_gr:
return SequenceType.MULTI_ECHO_GRE
except (ValueError, TypeError):
pass
return SequenceType.UNKNOWN
def _is_dixon_sequence(ds: pydicom.Dataset) -> bool:
"""Check if a DICOM file is from a Dixon sequence."""
return _identify_sequence_type(ds) == SequenceType.DIXON
def _get_dixon_image_types(ds: pydicom.Dataset) -> List[str]:
"""Extract Dixon image types (water, fat, in-phase, out-phase) from metadata."""
image_types = []
image_type_tag = ds.get((0x0008, 0x0008))
if image_type_tag:
image_type_str = str(image_type_tag.value).lower()
if "water" in image_type_str or "w" in image_type_str.split("\\"):
image_types.append("water")
if "fat" in image_type_str or "f" in image_type_str.split("\\"):
image_types.append("fat")
if "in" in image_type_str or "ip" in image_type_str:
image_types.append("in-phase")
if "out" in image_type_str or "op" in image_type_str:
image_types.append("out-phase")
series_desc = _safe_get_tag(ds, (0x0008, 0x103E), "").lower()
if "water" in series_desc and "water" not in image_types:
image_types.append("water")
if "fat" in series_desc and "fat" not in image_types:
image_types.append("fat")
return image_types if image_types else ["unknown"]

View File

@ -0,0 +1,97 @@
"""Tag reading, formatting, and resolution helpers."""
from typing import Any, List, Union
import pydicom
from pydicom import datadict, multival
from dicom_mcp.constants import COMMON_TAGS
def _safe_get_tag(
ds: pydicom.Dataset, tag: Union[str, int, tuple[int, int]], default: str = "N/A"
) -> str:
"""Safely extract a DICOM tag value with fallback."""
try:
value = ds.get(tag, default)
if value == default:
return default
if hasattr(value, "value"):
return str(value.value)
return str(value)
except Exception:
return default
def _format_tag_value(tag: tuple, value: Any) -> str:
"""Format a DICOM tag value for display."""
if value is None or value == "":
return "N/A"
if isinstance(value, pydicom.sequence.Sequence):
return f"[Sequence with {len(value)} items]"
if isinstance(value, multival.MultiValue):
return "\\".join(str(v) for v in value)
return str(value)
def _resolve_tag(spec: str) -> tuple:
"""Resolve a tag keyword or hex pair to ((group, elem), display_name).
Accepts:
- Hex pairs: "0018,0081" or "0018, 0081"
- Keywords: "EchoTime", "PatientName", etc.
Returns:
(tag_tuple, display_name) where tag_tuple is None if unresolvable.
"""
spec = spec.strip()
# Try hex pair first: "0018,0081" or "0018, 0081"
if "," in spec:
parts = spec.split(",")
if len(parts) == 2:
try:
group = int(parts[0].strip(), 16)
elem = int(parts[1].strip(), 16)
tag_tuple = (group, elem)
keyword = datadict.keyword_for_tag(tag_tuple)
name = (
keyword if keyword else f"({parts[0].strip()},{parts[1].strip()})"
)
return tag_tuple, name
except ValueError:
pass
# Try as keyword: "EchoTime", "PatientName", etc.
try:
tag_tuple = datadict.tag_for_keyword(spec)
if tag_tuple is not None:
return tag_tuple, spec
except Exception:
pass
return None, spec
def _validate_tag_groups(requested: list[str]) -> list[str]:
"""Validate tag group names, returning list of invalid ones."""
return [g for g in requested if g not in COMMON_TAGS]
def _format_markdown_table(headers: List[str], rows: List[List[str]]) -> str:
"""Format data as a Markdown table."""
col_widths = [len(h) for h in headers]
for row in rows:
for i, cell in enumerate(row):
col_widths[i] = max(col_widths[i], len(str(cell)))
header_row = " | ".join(h.ljust(col_widths[i]) for i, h in enumerate(headers))
separator = "-|-".join("-" * w for w in col_widths)
data_rows = []
for row in rows:
data_rows.append(
" | ".join(str(cell).ljust(col_widths[i]) for i, cell in enumerate(row))
)
return f"{header_row}\n{separator}\n" + "\n".join(data_rows)

View File

@ -0,0 +1,162 @@
"""DICOM tree-building helpers for hierarchical structure display."""
from typing import Any, Dict, List
import pydicom
from pydicom.dataelem import DataElement
from dicom_mcp.pii import redact_if_pii
def _format_tree_value(elem: DataElement, max_value_len: int = 80) -> str:
"""Format a DataElement value for tree display.
Truncates long values, handles bytes, multivalue, and sequences.
"""
if elem.VR == "SQ":
count = len(elem.value) if elem.value else 0
return f"<Sequence: {count} item{'s' if count != 1 else ''}>"
if elem.tag == (0x7FE0, 0x0010):
return "<Pixel Data>"
try:
val = elem.value
except Exception:
return "<unreadable>"
if isinstance(val, bytes):
if len(val) > 32:
return f"<{len(val)} bytes>"
return repr(val)
val_str = str(val)
if len(val_str) > max_value_len:
return val_str[:max_value_len] + "..."
return val_str
def _build_tree_text(
dataset: pydicom.Dataset,
max_depth: int = 10,
show_private: bool = True,
depth: int = 0,
prefix: str = "",
is_last: bool = True,
) -> List[str]:
"""Build an indented text tree of a DICOM dataset.
Returns a list of formatted strings with tree characters.
"""
lines: List[str] = []
elements = [elem for elem in dataset if elem.tag != (0x7FE0, 0x0010)]
if not show_private:
elements = [elem for elem in elements if not elem.tag.is_private]
for i, elem in enumerate(elements):
is_last_elem = i == len(elements) - 1
tag_str = f"({elem.tag.group:04X},{elem.tag.element:04X})"
keyword = elem.keyword or "Unknown"
vr = elem.VR if hasattr(elem, "VR") else "??"
# Apply PII redaction
tag_tuple = (elem.tag.group, elem.tag.element)
value_str = _format_tree_value(elem)
redacted = redact_if_pii(tag_tuple, value_str)
if redacted != value_str:
value_str = str(redacted)
if depth == 0:
connector = ""
child_prefix = ""
else:
connector = "└─ " if is_last_elem else "├─ "
child_prefix = prefix + (" " if is_last_elem else "")
line = f"{prefix}{connector}{tag_str} {keyword} [{vr}]: {value_str}"
lines.append(line)
# Recurse into sequences if within depth limit
if elem.VR == "SQ" and elem.value and depth < max_depth:
seq_items = list(elem.value)
for seq_idx, item in enumerate(seq_items):
is_last_item = seq_idx == len(seq_items) - 1
if depth == 0:
item_connector = "└─ " if is_last_item and is_last_elem else "├─ "
item_child_prefix = (
" " if is_last_item and is_last_elem else ""
)
else:
item_connector = (
child_prefix + "└─ " if is_last_item else child_prefix + "├─ "
)
item_child_prefix = child_prefix + (
" " if is_last_item else ""
)
lines.append(f"{item_connector}Item {seq_idx + 1}")
sub_lines = _build_tree_text(
item,
max_depth=max_depth,
show_private=show_private,
depth=depth + 1,
prefix=item_child_prefix,
is_last=is_last_item,
)
lines.extend(sub_lines)
return lines
def _build_tree_json(
dataset: pydicom.Dataset,
max_depth: int = 10,
show_private: bool = True,
depth: int = 0,
) -> List[Dict[str, Any]]:
"""Build a JSON-serializable tree of a DICOM dataset.
Returns a list of dicts, each with tag, vr, keyword, value, and
optionally items (for sequences).
"""
nodes: List[Dict[str, Any]] = []
elements = [elem for elem in dataset if elem.tag != (0x7FE0, 0x0010)]
if not show_private:
elements = [elem for elem in elements if not elem.tag.is_private]
for elem in elements:
tag_str = f"({elem.tag.group:04X},{elem.tag.element:04X})"
keyword = elem.keyword or "Unknown"
vr = elem.VR if hasattr(elem, "VR") else "??"
tag_tuple = (elem.tag.group, elem.tag.element)
value_str = _format_tree_value(elem)
redacted = redact_if_pii(tag_tuple, value_str)
if redacted != value_str:
value_str = str(redacted)
node: Dict[str, Any] = {
"tag": tag_str,
"vr": vr,
"keyword": keyword,
"value": value_str,
}
if elem.VR == "SQ" and elem.value and depth < max_depth:
items = []
for seq_idx, item in enumerate(elem.value):
children = _build_tree_json(
item,
max_depth=max_depth,
show_private=show_private,
depth=depth + 1,
)
items.append({"index": seq_idx + 1, "children": children})
node["items"] = items
nodes.append(node)
return nodes

View File

@ -0,0 +1,32 @@
"""PII tag set and redaction functions.
When ``DICOM_MCP_PII_FILTER`` is enabled (via environment variable),
patient-identifying tags are replaced with ``[REDACTED]`` in tool output.
"""
from typing import Any, Tuple
from dicom_mcp.constants import COMMON_TAGS
# The set of DICOM tags considered PII (patient tags only).
PII_TAGS: frozenset[Tuple[int, int]] = frozenset(COMMON_TAGS["patient_info"])
PII_REDACTED_VALUE = "[REDACTED]"
def is_pii_tag(tag_tuple: Tuple[int, int]) -> bool:
"""Return True if *tag_tuple* is a patient-identifying tag."""
return tag_tuple in PII_TAGS
def redact_if_pii(tag_tuple: Tuple[int, int], value: Any) -> Any:
"""Return ``[REDACTED]`` when PII filtering is active and the tag is PII.
The function checks the runtime config on every call so that
``importlib.reload(dicom_mcp.config)`` toggles behaviour in tests.
"""
from dicom_mcp.config import PII_FILTER_ENABLED
if PII_FILTER_ENABLED and is_pii_tag(tag_tuple):
return PII_REDACTED_VALUE
return value

View File

@ -0,0 +1,55 @@
"""FastMCP server instance and entry point."""
import logging
import os
import sys
from mcp.server.fastmcp import FastMCP
# ---------------------------------------------------------------------------
# Logging & Output Redirection
# ---------------------------------------------------------------------------
LOG_FILE = os.environ.get("DICOM_MCP_LOG_FILE", "dicom_mcp.log")
def setup_redirection():
"""Reduces noise by suppressing stderr and logging to a file."""
# Suppress stderr
devnull = open(os.devnull, "w")
os.dup2(devnull.fileno(), sys.stderr.fileno())
# Configure logging to write to the log file directly
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
filename=LOG_FILE,
filemode="a"
)
setup_redirection()
logger = logging.getLogger("dicom_mcp")
mcp = FastMCP(
"dicom_mcp",
instructions=(
"You are a DICOM data inspection assistant. "
"Your role is strictly factual and descriptive. "
"Report exactly what is present in the DICOM data: tag values, "
"pixel statistics, acquisition parameters, series counts, "
"file structure, and vendor differences. "
"Do NOT provide clinical interpretation, diagnostic guidance, "
"or opinions on data suitability for specific clinical purposes. "
"Do NOT suggest what conditions the data could help diagnose, "
"or recommend clinical actions based on the data. "
"Do NOT assess whether data quality is adequate for specific "
"clinical workflows. "
"Present findings as-is. Qualified professionals will draw "
"their own conclusions from the data you report."
),
)
def run():
"""Start the MCP server."""
# Import tools to ensure they are registered before running
import dicom_mcp.tools # noqa: F401
mcp.run()

View File

@ -0,0 +1,17 @@
"""Tool registration module.
Importing this package triggers all @mcp.tool() decorators, registering
every tool with the FastMCP server instance.
"""
from dicom_mcp.tools import discovery # noqa: F401
from dicom_mcp.tools import metadata # noqa: F401
from dicom_mcp.tools import validation # noqa: F401
from dicom_mcp.tools import query # noqa: F401
from dicom_mcp.tools import search # noqa: F401
from dicom_mcp.tools import philips # noqa: F401
from dicom_mcp.tools import pixels # noqa: F401
from dicom_mcp.tools import tree # noqa: F401
from dicom_mcp.tools import uid_comparison # noqa: F401
from dicom_mcp.tools import segmentation # noqa: F401
from dicom_mcp.tools import ti_analysis # noqa: F401

View File

@ -0,0 +1,291 @@
"""DICOM file discovery tools: list_files and find_dixon_series."""
import asyncio
import json
from pathlib import Path
from typing import Any, Dict, Optional
from mcp.types import ToolAnnotations
from dicom_mcp.server import mcp
from dicom_mcp.config import MAX_FILES
from dicom_mcp.constants import ResponseFormat
from dicom_mcp.helpers.tags import _safe_get_tag, _format_markdown_table
from dicom_mcp.helpers.sequence import (
_identify_sequence_type,
_is_dixon_sequence,
_get_dixon_image_types,
)
from dicom_mcp.helpers.files import _find_dicom_files
@mcp.tool(
name="dicom_list_files",
annotations=ToolAnnotations(
title="List DICOM Files in Directory",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_list_files(
directory: str,
recursive: bool = True,
filter_sequence_type: Optional[str] = None,
count_only: bool = False,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""List all DICOM files in a directory with optional filtering.
Recursively searches a directory for DICOM files and provides
basic metadata about each file including series information and sequence type.
Useful for discovering available test data and organizing QA workflows.
Set count_only=True to return just the series breakdown with file counts
instead of listing every individual file. Much more efficient for large
directories when you only need an inventory overview.
"""
try:
dir_path = Path(directory)
if not dir_path.exists():
return f"Error: Directory not found: {directory}"
if not dir_path.is_dir():
return f"Error: Path is not a directory: {directory}"
dicom_files, truncated = await asyncio.to_thread(
_find_dicom_files, dir_path, recursive
)
if not dicom_files:
return f"No DICOM files found in {directory}"
# Build per-file info and optional series counts
file_info = []
series_counts: Dict[str, Dict[str, Any]] = {} # keyed by series_number
for file_path, ds in dicom_files:
sequence_type = _identify_sequence_type(ds)
if filter_sequence_type:
normalized_filter = filter_sequence_type.lower().strip()
if sequence_type.value != normalized_filter:
continue
series_num = _safe_get_tag(ds, (0x0020, 0x0011))
series_desc = _safe_get_tag(ds, (0x0008, 0x103E))
manufacturer = _safe_get_tag(ds, (0x0008, 0x0070))
modality = _safe_get_tag(ds, (0x0008, 0x0060))
# Always accumulate series counts (cheap)
if series_num not in series_counts:
series_counts[series_num] = {
"series_number": series_num,
"series_description": series_desc,
"sequence_type": sequence_type.value,
"manufacturer": manufacturer,
"modality": modality,
"file_count": 0,
}
series_counts[series_num]["file_count"] += 1
# Only build full file list when not in count_only mode
if not count_only:
info = {
"path": str(file_path.relative_to(dir_path)),
"series_description": series_desc,
"series_number": series_num,
"sequence_type": sequence_type.value,
"manufacturer": manufacturer,
"modality": modality,
}
file_info.append(info)
total_matched = sum(sc["file_count"] for sc in series_counts.values())
# Sort series by number
def _sort_series_key(item: tuple) -> tuple:
sn = item[1]["series_number"]
if sn is None or sn == "N/A":
return (2, "")
try:
return (0, int(sn))
except (ValueError, TypeError):
return (1, str(sn))
sorted_series = sorted(series_counts.items(), key=_sort_series_key)
if count_only:
# --- count_only mode: return series breakdown only ---
series_list = [data for _, data in sorted_series]
if response_format == ResponseFormat.JSON:
result = {
"total_files": total_matched,
"directory": str(dir_path),
"truncated": truncated,
"series_count": len(series_list),
"series": series_list,
}
return json.dumps(result, indent=2)
else:
output = [
f"# DICOM File Counts in {dir_path}\n",
f"Total files: {total_matched} across {len(series_list)} series\n",
]
if truncated:
output.append(
f"**Warning**: Scan truncated at {MAX_FILES} files.\n"
)
headers = ["Series", "Description", "Type", "Manufacturer", "Files"]
rows = [
[
s["series_number"],
s["series_description"],
s["sequence_type"],
s["manufacturer"],
str(s["file_count"]),
]
for s in series_list
]
output.append(_format_markdown_table(headers, rows))
return "\n".join(output)
else:
# --- full listing mode (original behaviour) ---
if response_format == ResponseFormat.JSON:
result = {
"total_files": len(file_info),
"directory": str(dir_path),
"truncated": truncated,
"files": file_info,
}
return json.dumps(result, indent=2)
else:
output = [
f"# DICOM Files in {dir_path}\n",
f"Total files found: {len(file_info)}\n",
]
if truncated:
output.append(
f"**Warning**: Results truncated at {MAX_FILES} files. Narrow your search directory.\n"
)
if file_info:
headers = ["Path", "Series", "Number", "Sequence", "Manufacturer"]
rows = [
[
f["path"],
f["series_description"],
f["series_number"],
f["sequence_type"],
f["manufacturer"],
]
for f in file_info
]
output.append(_format_markdown_table(headers, rows))
return "\n".join(output)
except Exception as e:
return f"Error listing DICOM files: {str(e)}"
@mcp.tool(
name="dicom_find_dixon_series",
annotations=ToolAnnotations(
title="Find Dixon Sequences",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_find_dixon_series(
directory: str,
recursive: bool = True,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Find and analyze Dixon sequences in a directory.
Searches for Dixon (chemical shift) sequences and identifies the different
image types (water, fat, in-phase, out-phase) available in each series.
Critical for body composition QA workflows.
"""
try:
dir_path = Path(directory)
if not dir_path.exists():
return f"Error: Directory not found: {directory}"
dicom_files, truncated = await asyncio.to_thread(
_find_dicom_files, dir_path, recursive
)
if not dicom_files:
return f"No DICOM files found in {directory}"
series_map = {}
for file_path, ds in dicom_files:
if not _is_dixon_sequence(ds):
continue
series_uid = _safe_get_tag(ds, (0x0020, 0x000E))
if series_uid not in series_map:
series_map[series_uid] = {
"series_description": _safe_get_tag(ds, (0x0008, 0x103E)),
"series_number": _safe_get_tag(ds, (0x0020, 0x0011)),
"series_uid": series_uid,
"image_types": set(),
"files": [],
"sample_file": str(file_path),
}
image_types = _get_dixon_image_types(ds)
series_map[series_uid]["image_types"].update(image_types)
series_map[series_uid]["files"].append(str(file_path))
if not series_map:
return f"No Dixon sequences found in {directory}"
series_list = []
for series_data in series_map.values():
series_list.append(
{
"series_description": series_data["series_description"],
"series_number": series_data["series_number"],
"series_uid": series_data["series_uid"],
"image_types": sorted(list(series_data["image_types"])),
"file_count": len(series_data["files"]),
"sample_file": series_data["sample_file"],
}
)
series_list.sort(key=lambda x: x["series_number"])
if response_format == ResponseFormat.JSON:
result = {
"total_dixon_series": len(series_list),
"directory": str(dir_path),
"truncated": truncated,
"series": series_list,
}
return json.dumps(result, indent=2)
else:
output = [
f"# Dixon Sequences in {dir_path}\n",
f"Found {len(series_list)} Dixon series\n",
]
if truncated:
output.append(
f"**Warning**: File scan truncated at {MAX_FILES} files.\n"
)
for series in series_list:
output.append(
f"## Series {series['series_number']}: {series['series_description']}"
)
output.append(f"- **Series UID**: {series['series_uid']}")
output.append(f"- **Image Types**: {', '.join(series['image_types'])}")
output.append(f"- **File Count**: {series['file_count']}")
output.append(f"- **Sample File**: {Path(series['sample_file']).name}")
output.append("")
return "\n".join(output)
except Exception as e:
return f"Error finding Dixon sequences: {str(e)}"

View File

@ -0,0 +1,260 @@
"""DICOM metadata extraction and comparison tools."""
import asyncio
import json
from pathlib import Path
from typing import Dict, List, Optional
import pydicom
from pydicom import datadict
from pydicom.errors import InvalidDicomError
from mcp.types import ToolAnnotations
from dicom_mcp.server import mcp
from dicom_mcp.constants import COMMON_TAGS, VALID_TAG_GROUPS, ResponseFormat
from dicom_mcp.helpers.tags import (
_safe_get_tag,
_validate_tag_groups,
_format_markdown_table,
)
from dicom_mcp.helpers.philips import _resolve_philips_private_tag
from dicom_mcp.pii import redact_if_pii
@mcp.tool(
name="dicom_get_metadata",
annotations=ToolAnnotations(
title="Get DICOM File Metadata",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_get_metadata(
file_path: str,
tag_groups: Optional[List[str]] = None,
custom_tags: Optional[List[str]] = None,
philips_private_tags: Optional[List[Dict[str, int]]] = None,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Extract metadata from a DICOM file.
Reads DICOM headers and extracts commonly used tags organized by
category. Supports both predefined tag groups and custom tag specification.
Available tag groups: patient_info, study_info, series_info, image_info,
acquisition, manufacturer, equipment, geometry, pixel_data.
For Philips files, you can also resolve private tags by providing
philips_private_tags a list of dicts with keys "dd_number" and
"element_offset" (and optionally "private_group", default 0x2005).
Example: [{"dd_number": 1, "element_offset": 133}] resolves the
DD 001 block's element at offset 0x85.
"""
try:
fp = Path(file_path)
if not fp.exists():
return f"Error: File not found: {file_path}"
ds = await asyncio.to_thread(pydicom.dcmread, fp, stop_before_pixels=True)
if tag_groups:
invalid = _validate_tag_groups(tag_groups)
if invalid:
return (
f"Error: Unknown tag group(s): {', '.join(invalid)}. "
f"Available groups: {', '.join(VALID_TAG_GROUPS)}"
)
tag_groups_to_use = {
k: v for k, v in COMMON_TAGS.items() if k in tag_groups
}
else:
tag_groups_to_use = COMMON_TAGS
metadata = {}
for group_name, tags in tag_groups_to_use.items():
group_data = {}
for tag in tags:
tag_name = datadict.keyword_for_tag(tag)
value = _safe_get_tag(ds, tag)
group_data[tag_name] = redact_if_pii(tag, value)
metadata[group_name] = group_data
if custom_tags:
custom_data = {}
for tag_str in custom_tags:
try:
group, element = tag_str.split(",")
tag = (int(group, 16), int(element, 16))
tag_name = datadict.keyword_for_tag(tag) or f"Tag_{tag_str}"
value = _safe_get_tag(ds, tag)
custom_data[tag_name] = value
except Exception as e:
custom_data[f"Error_{tag_str}"] = f"Invalid tag: {str(e)}"
metadata["custom_tags"] = custom_data
if philips_private_tags:
private_data = {}
for entry in philips_private_tags:
dd_num = entry.get("dd_number")
offset = entry.get("element_offset")
group = entry.get("private_group", 0x2005)
if dd_num is None or offset is None:
private_data[
(
f"Error_DD{dd_num}_0x{offset:02X}"
if offset
else "Error_invalid"
)
] = "Both dd_number and element_offset are required"
continue
resolved_tag, creator_str, value_str = _resolve_philips_private_tag(
ds, dd_num, offset, group
)
label = f"DD{dd_num:03d}_0x{offset:02X}"
if resolved_tag is not None:
tag_hex = f"({resolved_tag[0]:04X},{resolved_tag[1]:04X})"
private_data[label] = {
"resolved_tag": tag_hex,
"creator": creator_str,
"value": value_str if value_str is not None else "N/A",
}
else:
private_data[label] = {
"resolved_tag": None,
"error": f"No Private Creator found for DD {dd_num:03d} in group ({group:04X})",
}
metadata["philips_private"] = private_data
if response_format == ResponseFormat.JSON:
result = {"file_path": str(fp), "metadata": metadata}
return json.dumps(result, indent=2)
else:
output = [f"# DICOM Metadata: {fp.name}\n"]
for group_name, group_data in metadata.items():
output.append(f"## {group_name.replace('_', ' ').title()}\n")
if group_name == "philips_private":
for label, data in group_data.items():
if isinstance(data, dict) and "error" in data:
output.append(f"- **{label}**: {data['error']}")
elif isinstance(data, dict):
output.append(
f"- **{label}** [{data['resolved_tag']}]: "
f"{data['value']} *(creator: {data['creator']})*"
)
else:
output.append(f"- **{label}**: {data}")
else:
for tag_name, value in group_data.items():
output.append(f"- **{tag_name}**: {value}")
output.append("")
return "\n".join(output)
except InvalidDicomError:
return f"Error: Not a valid DICOM file: {file_path}"
except Exception as e:
return f"Error reading DICOM metadata: {str(e)}"
@mcp.tool(
name="dicom_compare_headers",
annotations=ToolAnnotations(
title="Compare DICOM Headers",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_compare_headers(
file_paths: List[str],
tag_groups: Optional[List[str]] = None,
show_differences_only: bool = False,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Compare DICOM headers across multiple files.
Compares specified DICOM tags across 2-10 files, highlighting
differences. Useful for validating sequence consistency and
identifying which images were selected from Dixon sequences.
Available tag groups: patient_info, study_info, series_info, image_info,
acquisition, manufacturer, equipment, geometry, pixel_data.
"""
try:
if len(file_paths) < 2 or len(file_paths) > 10:
return "Error: Provide between 2 and 10 file paths to compare."
paths = [Path(p) for p in file_paths]
for fp in paths:
if not fp.exists():
return f"Error: File not found: {fp}"
def _read_all():
datasets = []
for fp in paths:
ds = pydicom.dcmread(fp, stop_before_pixels=True)
datasets.append((fp, ds))
return datasets
datasets = await asyncio.to_thread(_read_all)
effective_groups = tag_groups if tag_groups else ["acquisition", "series_info"]
invalid = _validate_tag_groups(effective_groups)
if invalid:
return (
f"Error: Unknown tag group(s): {', '.join(invalid)}. "
f"Available groups: {', '.join(VALID_TAG_GROUPS)}"
)
tags_to_compare = []
for group in effective_groups:
tags_to_compare.extend(COMMON_TAGS[group])
comparison = {}
for tag in tags_to_compare:
tag_name = datadict.keyword_for_tag(tag)
values = [redact_if_pii(tag, _safe_get_tag(ds, tag)) for _, ds in datasets]
consistent = len(set(values)) == 1
if show_differences_only and consistent:
continue
comparison[tag_name] = {"values": values, "consistent": consistent}
if response_format == ResponseFormat.JSON:
result = {
"files": [str(fp) for fp, _ in datasets],
"comparison": comparison,
}
return json.dumps(result, indent=2)
else:
output = [
"# DICOM Header Comparison\n",
f"Comparing {len(datasets)} files:\n",
]
for i, (fp, _) in enumerate(datasets, 1):
output.append(f"{i}. {fp.name}")
output.append("")
if comparison:
headers = (
["Tag"] + [f"File {i+1}" for i in range(len(datasets))] + ["Status"]
)
rows = []
for tag_name, data in comparison.items():
status = "Consistent" if data["consistent"] else "Different"
row = [tag_name] + data["values"] + [status]
rows.append(row)
output.append(_format_markdown_table(headers, rows))
else:
if show_differences_only:
output.append("All compared tags are consistent across files.")
else:
output.append("No tags to compare.")
return "\n".join(output)
except Exception as e:
return f"Error comparing DICOM headers: {str(e)}"

View File

@ -0,0 +1,184 @@
"""Philips private tag query tool."""
import asyncio
import json
from pathlib import Path
from typing import Optional
import pydicom
from pydicom.errors import InvalidDicomError
from mcp.types import ToolAnnotations
from dicom_mcp.server import mcp
from dicom_mcp.constants import ResponseFormat
from dicom_mcp.helpers.tags import _safe_get_tag, _format_markdown_table
from dicom_mcp.helpers.philips import (
_resolve_philips_private_tag,
_list_philips_private_creators,
)
@mcp.tool(
name="dicom_query_philips_private",
annotations=ToolAnnotations(
title="Query Philips Private Tags",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_query_philips_private(
file_path: str,
dd_number: Optional[int] = None,
element_offset: Optional[int] = None,
private_group: int = 0x2005,
list_creators: bool = False,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Query Philips private DICOM tags using DD number and element offset.
Philips MRI scanners store proprietary metadata in private tag blocks.
Each block is reserved by a Private Creator tag containing a string
like "Philips MR Imaging DD 001". Block assignments vary across
scanners and software versions, so this tool resolves them dynamically.
Usage modes:
1. **List creators**: Set list_creators=True to see all Private Creator
tags and their DD numbers. Use this to discover what's available.
2. **Resolve a specific tag**: Provide dd_number and element_offset to
look up a specific private element. For example, dd_number=1 with
element_offset=0x85 finds shim calculation values.
Common Philips DD numbers and offsets (group 2005):
- DD 001, offset 0x85: Shim calculation values
- DD 001, offset 0x63: Stack ID
- DD 004, offset 0x00: MR Series data object
Args:
file_path: Path to a Philips DICOM file
dd_number: The DD number to look up (e.g. 1 for "DD 001")
element_offset: The element offset within the DD block (e.g. 0x85).
Can be provided as decimal (133) or will be interpreted
as decimal. Use hex notation in the description for clarity.
private_group: The DICOM private group to search (default 0x2005)
list_creators: If True, list all Private Creator tags instead of
resolving a specific tag
response_format: Output format (markdown or json)
"""
try:
fp = Path(file_path)
if not fp.exists():
return f"Error: File not found: {file_path}"
ds = await asyncio.to_thread(pydicom.dcmread, fp, stop_before_pixels=True)
manufacturer = _safe_get_tag(ds, (0x0008, 0x0070))
if "philips" not in manufacturer.lower():
return (
f"Warning: This file's manufacturer is '{manufacturer}', not Philips. "
"Philips private tag resolution may not produce meaningful results."
)
if list_creators:
# --- Mode 1: List all Private Creator tags ---
creators = _list_philips_private_creators(ds, private_group)
if not creators:
return (
f"No Private Creator tags found in group "
f"({private_group:04X}) of {fp.name}"
)
if response_format == ResponseFormat.JSON:
result = {
"file": fp.name,
"manufacturer": manufacturer,
"private_group": f"({private_group:04X})",
"creators": creators,
}
return json.dumps(result, indent=2)
else:
output = [
f"# Philips Private Creators: {fp.name}\n",
f"**Manufacturer**: {manufacturer}",
f"**Private group**: ({private_group:04X})",
f"**Creator slots found**: {len(creators)}\n",
]
headers = ["Tag", "Block", "DD #", "Creator String"]
rows = [
[
c["tag"],
c["block_byte"],
str(c["dd_number"]) if c["dd_number"] is not None else "N/A",
c["creator_string"],
]
for c in creators
]
output.append(_format_markdown_table(headers, rows))
return "\n".join(output)
elif dd_number is not None and element_offset is not None:
# --- Mode 2: Resolve a specific private tag ---
resolved_tag, creator_str, value_str = _resolve_philips_private_tag(
ds, dd_number, element_offset, private_group
)
if resolved_tag is None:
# Show available creators to help the user
creators = _list_philips_private_creators(ds, private_group)
available = ", ".join(
f"DD {c['dd_number']:03d}"
for c in creators
if c["dd_number"] is not None
)
return (
f"Error: No Private Creator found for DD {dd_number:03d} "
f"in group ({private_group:04X}) of {fp.name}.\n\n"
f"Available DD numbers: {available or 'none found'}"
)
tag_hex = f"({resolved_tag[0]:04X},{resolved_tag[1]:04X})"
if response_format == ResponseFormat.JSON:
result = {
"file": fp.name,
"manufacturer": manufacturer,
"query": {
"dd_number": dd_number,
"element_offset": f"0x{element_offset:02X}",
"private_group": f"({private_group:04X})",
},
"resolution": {
"creator_string": creator_str,
"resolved_tag": tag_hex,
"value": value_str,
},
}
return json.dumps(result, indent=2)
else:
output = [
f"# Philips Private Tag Lookup: {fp.name}\n",
f"**Manufacturer**: {manufacturer}\n",
"## Query",
f"- **DD number**: {dd_number:03d}",
f"- **Element offset**: 0x{element_offset:02X}",
f"- **Private group**: ({private_group:04X})\n",
"## Resolution",
f"- **Creator string**: {creator_str}",
f"- **Resolved tag**: {tag_hex}",
f"- **Value**: {value_str if value_str is not None else 'Tag exists but no value'}\n",
]
return "\n".join(output)
else:
return (
"Error: Provide either list_creators=True to discover available "
"Private Creator tags, or both dd_number and element_offset to "
"resolve a specific private tag."
)
except InvalidDicomError:
return f"Error: Not a valid DICOM file: {file_path}"
except Exception as e:
return f"Error querying Philips private tags: {str(e)}"

View File

@ -0,0 +1,511 @@
"""Pixel analysis tools: read_pixels, compute_snr, render_image."""
import asyncio
import json
from pathlib import Path
from typing import Any, Dict, List, Optional
import numpy as np
import pydicom
from PIL import Image, ImageDraw, ImageFont
from pydicom.errors import InvalidDicomError
from mcp.types import ToolAnnotations
from dicom_mcp.server import mcp
from dicom_mcp.constants import ResponseFormat
from dicom_mcp.helpers.tags import _safe_get_tag, _format_markdown_table
from dicom_mcp.helpers.pixels import (
_get_pixel_array,
_extract_roi,
_compute_stats,
_apply_windowing,
)
@mcp.tool(
name="dicom_read_pixels",
annotations=ToolAnnotations(
title="Read DICOM Pixel Statistics",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_read_pixels(
file_path: str,
roi: Optional[List[int]] = None,
include_histogram: bool = False,
histogram_bins: int = 50,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Extract pixel statistics from a DICOM file.
Reads pixel data and computes descriptive statistics (min, max, mean,
standard deviation, median, percentiles). Optionally restricts analysis
to a rectangular ROI.
Pixel values are rescaled using RescaleSlope and RescaleIntercept
when present (standard for Philips, common on Siemens/GE).
Args:
file_path: Path to the DICOM file
roi: Optional region of interest as [x, y, width, height] where
x,y is the top-left corner in pixel coordinates.
If omitted, statistics cover the entire image.
include_histogram: If True, include a binned histogram of pixel values
histogram_bins: Number of histogram bins (default 50)
response_format: Output format (markdown or json)
"""
try:
fp = Path(file_path)
if not fp.exists():
return f"Error: File not found: {file_path}"
ds = await asyncio.to_thread(pydicom.dcmread, fp)
if not hasattr(ds, "pixel_array"):
return f"Error: No pixel data in file: {file_path}"
pixels = _get_pixel_array(ds)
rows, cols = pixels.shape[:2]
image_info = {
"rows": rows,
"columns": cols,
"bits_allocated": int(getattr(ds, "BitsAllocated", 0)),
"bits_stored": int(getattr(ds, "BitsStored", 0)),
"rescale_slope": float(getattr(ds, "RescaleSlope", 1.0) or 1.0),
"rescale_intercept": float(getattr(ds, "RescaleIntercept", 0.0) or 0.0),
}
if pixels.ndim > 2:
return (
f"Error: Multi-frame image with shape {pixels.shape}. "
"This tool supports single-frame 2D images only."
)
roi_desc = "Full image"
if roi:
try:
pixels = _extract_roi(pixels, roi)
roi_desc = f"ROI [{roi[0]},{roi[1]}] {roi[2]}x{roi[3]}"
except ValueError as e:
return f"Error: {str(e)}"
stats = _compute_stats(pixels)
result = {
"file": fp.name,
"series_description": _safe_get_tag(ds, (0x0008, 0x103E)),
"image_info": image_info,
"region": roi_desc,
"statistics": stats,
}
if include_histogram:
counts, bin_edges = np.histogram(pixels.ravel(), bins=histogram_bins)
result["histogram"] = {
"bins": histogram_bins,
"counts": counts.tolist(),
"bin_edges": [round(float(e), 2) for e in bin_edges.tolist()],
}
if response_format == ResponseFormat.JSON:
return json.dumps(result, indent=2)
else:
output = [
f"# Pixel Statistics: {fp.name}\n",
f"**Series**: {result['series_description']}",
f"**Image size**: {rows} x {cols}",
f"**Region**: {roi_desc}",
f"**Rescale**: slope={image_info['rescale_slope']}, "
f"intercept={image_info['rescale_intercept']}\n",
"## Statistics\n",
]
headers = ["Metric", "Value"]
stat_rows = [
["Min", f"{stats['min']:.2f}"],
["Max", f"{stats['max']:.2f}"],
["Mean", f"{stats['mean']:.2f}"],
["Std Dev", f"{stats['std']:.2f}"],
["Median", f"{stats['median']:.2f}"],
["5th %ile", f"{stats['p5']:.2f}"],
["25th %ile", f"{stats['p25']:.2f}"],
["75th %ile", f"{stats['p75']:.2f}"],
["95th %ile", f"{stats['p95']:.2f}"],
["Pixel count", str(stats["pixel_count"])],
]
output.append(_format_markdown_table(headers, stat_rows))
if include_histogram and "histogram" in result:
output.append("\n## Histogram\n")
hist = result["histogram"]
max_count = max(hist["counts"]) if hist["counts"] else 1
for i, count in enumerate(hist["counts"]):
low = hist["bin_edges"][i]
high = hist["bin_edges"][i + 1]
bar_len = int(40 * count / max_count) if max_count > 0 else 0
bar = "#" * bar_len
output.append(f" {low:8.1f} - {high:8.1f} | {bar} ({count})")
return "\n".join(output)
except InvalidDicomError:
return f"Error: Not a valid DICOM file: {file_path}"
except Exception as e:
return f"Error reading pixel data: {str(e)}"
@mcp.tool(
name="dicom_compute_snr",
annotations=ToolAnnotations(
title="Compute SNR from ROIs",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_compute_snr(
file_path: str,
signal_roi: List[int],
noise_roi: List[int],
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Compute signal-to-noise ratio from two ROIs in a DICOM image.
Places a signal ROI (typically in tissue of interest) and a noise ROI
(typically in background air or a uniform region) and computes:
- SNR = mean(signal) / std(noise)
- Individual statistics for both ROIs
This is the single-image method SNR. For a more robust estimate,
use two identical acquisitions and compute SNR from the difference
image, but this gives a practical per-image metric.
Args:
file_path: Path to the DICOM file
signal_roi: Signal region as [x, y, width, height]
noise_roi: Noise/background region as [x, y, width, height]
response_format: Output format (markdown or json)
Tip: Use dicom_render_image first to visualise the image and identify
appropriate ROI coordinates, then use this tool to measure SNR.
"""
try:
fp = Path(file_path)
if not fp.exists():
return f"Error: File not found: {file_path}"
ds = await asyncio.to_thread(pydicom.dcmread, fp)
if not hasattr(ds, "pixel_array"):
return f"Error: No pixel data in file: {file_path}"
pixels = _get_pixel_array(ds)
if pixels.ndim > 2:
return (
f"Error: Multi-frame image with shape {pixels.shape}. "
"This tool supports single-frame 2D images only."
)
rows, cols = pixels.shape
try:
signal_pixels = _extract_roi(pixels, signal_roi)
except ValueError as e:
return f"Error in signal ROI: {str(e)}"
try:
noise_pixels = _extract_roi(pixels, noise_roi)
except ValueError as e:
return f"Error in noise ROI: {str(e)}"
signal_stats = _compute_stats(signal_pixels)
noise_stats = _compute_stats(noise_pixels)
noise_std = noise_stats["std"]
if noise_std == 0 or noise_std < 1e-10:
snr = float("inf")
snr_note = "Noise std is zero or near-zero; check ROI placement"
else:
snr = signal_stats["mean"] / noise_std
snr_note = None
result = {
"file": fp.name,
"series_description": _safe_get_tag(ds, (0x0008, 0x103E)),
"image_size": {"rows": rows, "columns": cols},
"signal_roi": {
"position": signal_roi,
"description": f"[{signal_roi[0]},{signal_roi[1]}] {signal_roi[2]}x{signal_roi[3]}",
"statistics": signal_stats,
},
"noise_roi": {
"position": noise_roi,
"description": f"[{noise_roi[0]},{noise_roi[1]}] {noise_roi[2]}x{noise_roi[3]}",
"statistics": noise_stats,
},
"snr": round(snr, 2) if snr != float("inf") else "infinite",
}
if snr_note:
result["snr_note"] = snr_note
if response_format == ResponseFormat.JSON:
return json.dumps(result, indent=2)
else:
output = [
f"# SNR Analysis: {fp.name}\n",
f"**Series**: {result['series_description']}",
f"**Image size**: {rows} x {cols}\n",
"## Signal ROI\n",
f"**Position**: {result['signal_roi']['description']}",
]
sig_headers = ["Metric", "Value"]
sig_rows = [
["Mean", f"{signal_stats['mean']:.2f}"],
["Std", f"{signal_stats['std']:.2f}"],
["Min", f"{signal_stats['min']:.2f}"],
["Max", f"{signal_stats['max']:.2f}"],
["Pixels", str(signal_stats["pixel_count"])],
]
output.append(_format_markdown_table(sig_headers, sig_rows))
output.append("\n## Noise ROI\n")
output.append(f"**Position**: {result['noise_roi']['description']}")
noise_headers = ["Metric", "Value"]
noise_rows = [
["Mean", f"{noise_stats['mean']:.2f}"],
["Std", f"{noise_stats['std']:.2f}"],
["Min", f"{noise_stats['min']:.2f}"],
["Max", f"{noise_stats['max']:.2f}"],
["Pixels", str(noise_stats["pixel_count"])],
]
output.append(_format_markdown_table(noise_headers, noise_rows))
output.append("\n## Result\n")
output.append(f"**SNR = {result['snr']}**")
output.append(
f"(mean_signal / std_noise = "
f"{signal_stats['mean']:.2f} / {noise_stats['std']:.2f})"
)
if snr_note:
output.append(f"\nWarning: {snr_note}")
return "\n".join(output)
except InvalidDicomError:
return f"Error: Not a valid DICOM file: {file_path}"
except Exception as e:
return f"Error computing SNR: {str(e)}"
@mcp.tool(
name="dicom_render_image",
annotations=ToolAnnotations(
title="Render DICOM Image to PNG",
readOnlyHint=False,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_render_image(
file_path: str,
output_path: str,
window_center: Optional[float] = None,
window_width: Optional[float] = None,
auto_window: bool = False,
overlay_rois: Optional[List[Dict[str, Any]]] = None,
show_info: bool = True,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Render a DICOM image to PNG with configurable windowing.
Applies a window/level transform and saves the result as an 8-bit
greyscale PNG. Optionally overlays ROI rectangles for visual
verification of ROI placement used in other tools.
Windowing priority:
1. Explicit window_center and window_width parameters
2. auto_window=True: uses 5th-95th percentile range
3. DICOM header WindowCenter/WindowWidth values
4. Fallback: full pixel range (min to max)
Args:
file_path: Path to the DICOM file
output_path: Path where the PNG will be saved
window_center: Optional manual window center value
window_width: Optional manual window width value
auto_window: If True, auto-calculate window from 5th-95th percentile
overlay_rois: Optional list of ROI overlays. Each dict has:
- roi: [x, y, width, height]
- label: Optional text label (e.g. "Signal", "Noise")
- color: Optional colour name ("red", "green", "blue",
"yellow", "cyan", "magenta", "white"; default "red")
show_info: If True, burn in series description and window values
response_format: Output format (markdown or json)
"""
try:
fp = Path(file_path)
if not fp.exists():
return f"Error: File not found: {file_path}"
out = Path(output_path)
out.parent.mkdir(parents=True, exist_ok=True)
ds = await asyncio.to_thread(pydicom.dcmread, fp)
if not hasattr(ds, "pixel_array"):
return f"Error: No pixel data in file: {file_path}"
pixels = _get_pixel_array(ds)
if pixels.ndim > 2:
return (
f"Error: Multi-frame image with shape {pixels.shape}. "
"This tool supports single-frame 2D images only."
)
rows, cols = pixels.shape
# --- Determine windowing ---
wc, ww = None, None
window_source = "none"
if window_center is not None and window_width is not None:
wc, ww = float(window_center), float(window_width)
window_source = "manual"
elif auto_window:
p5 = float(np.percentile(pixels, 5))
p95 = float(np.percentile(pixels, 95))
ww = p95 - p5
wc = p5 + ww / 2.0
window_source = "auto (p5-p95)"
else:
try:
header_wc = ds.get((0x0028, 0x1050))
header_ww = ds.get((0x0028, 0x1051))
if header_wc is not None and header_ww is not None:
hc = header_wc.value
hw = header_ww.value
if hasattr(hc, "__iter__") and not isinstance(hc, str):
hc = hc[0]
if hasattr(hw, "__iter__") and not isinstance(hw, str):
hw = hw[0]
wc, ww = float(hc), float(hw)
window_source = "DICOM header"
except Exception:
pass
if wc is None or ww is None:
pmin, pmax = float(np.min(pixels)), float(np.max(pixels))
ww = pmax - pmin if pmax > pmin else 1.0
wc = pmin + ww / 2.0
window_source = "full range"
display = _apply_windowing(pixels, wc, ww)
img = Image.fromarray(display, mode="L")
# --- Overlays ---
colour_map = {
"red": (255, 0, 0),
"green": (0, 255, 0),
"blue": (0, 100, 255),
"yellow": (255, 255, 0),
"cyan": (0, 255, 255),
"magenta": (255, 0, 255),
"white": (255, 255, 255),
}
if overlay_rois or show_info:
img = img.convert("RGB")
draw = ImageDraw.Draw(img)
try:
font = ImageFont.truetype("/System/Library/Fonts/Menlo.ttc", 12)
except (OSError, IOError):
try:
font = ImageFont.truetype(
"/usr/share/fonts/truetype/dejavu/DejaVuSansMono.ttf", 12
)
except (OSError, IOError):
font = ImageFont.load_default()
if overlay_rois:
for roi_overlay in overlay_rois:
roi = roi_overlay.get("roi")
if not roi or len(roi) != 4:
continue
label = roi_overlay.get("label", "")
colour_name = roi_overlay.get("color", "red").lower()
colour = colour_map.get(colour_name, (255, 0, 0))
x, y, w, h = int(roi[0]), int(roi[1]), int(roi[2]), int(roi[3])
for offset in range(2):
draw.rectangle(
[
x + offset,
y + offset,
x + w - 1 - offset,
y + h - 1 - offset,
],
outline=colour,
)
if label:
draw.text(
(x, max(0, y - 15)),
label,
fill=colour,
font=font,
)
if show_info:
series_desc = _safe_get_tag(ds, (0x0008, 0x103E))
info_text = f"{series_desc} WC:{wc:.0f} WW:{ww:.0f} ({window_source})"
bbox = draw.textbbox((0, 0), info_text, font=font)
text_w = bbox[2] - bbox[0]
text_h = bbox[3] - bbox[1]
draw.rectangle(
[0, rows - text_h - 6, text_w + 8, rows],
fill=(0, 0, 0),
)
draw.text(
(4, rows - text_h - 3),
info_text,
fill=(255, 255, 255),
font=font,
)
await asyncio.to_thread(img.save, str(out), "PNG")
result = {
"file": fp.name,
"output_path": str(out),
"series_description": _safe_get_tag(ds, (0x0008, 0x103E)),
"image_size": {"rows": rows, "columns": cols},
"windowing": {
"center": round(wc, 2),
"width": round(ww, 2),
"source": window_source,
},
"overlays": len(overlay_rois) if overlay_rois else 0,
}
if response_format == ResponseFormat.JSON:
return json.dumps(result, indent=2)
else:
output = [
f"# Rendered Image: {fp.name}\n",
f"**Series**: {result['series_description']}",
f"**Image size**: {rows} x {cols}",
f"**Windowing**: WC={wc:.1f}, WW={ww:.1f} ({window_source})",
f"**Output**: `{out}`",
]
if overlay_rois:
output.append(f"**ROI overlays**: {len(overlay_rois)}")
return "\n".join(output)
except InvalidDicomError:
return f"Error: Not a valid DICOM file: {file_path}"
except Exception as e:
return f"Error rendering image: {str(e)}"

View File

@ -0,0 +1,421 @@
"""DICOM tag query and directory summary tools."""
import asyncio
import json
from pathlib import Path
from typing import Any, Dict, List, Optional
from mcp.types import ToolAnnotations
from dicom_mcp.server import mcp
from dicom_mcp.config import MAX_FILES
from dicom_mcp.constants import ResponseFormat
from dicom_mcp.helpers.tags import _safe_get_tag, _resolve_tag, _format_markdown_table
from dicom_mcp.helpers.sequence import _identify_sequence_type
from dicom_mcp.helpers.files import _find_dicom_files
from dicom_mcp.pii import redact_if_pii
@mcp.tool(
name="dicom_query",
annotations=ToolAnnotations(
title="Query DICOM Tags Across Directory",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_query(
directory: str,
tags: List[str],
recursive: bool = True,
group_by: Optional[str] = None,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Query arbitrary DICOM tags across all files in a directory.
Aggregates unique values with file counts for any combination of DICOM
tags. Accepts tag names (keywords) or hex codes. Use this when you need
to check consistency or explore values for tags not covered by the
standard summary tool.
Args:
directory: Path to the directory to scan
tags: List of DICOM tag keywords (e.g. "EchoTime", "StudyDate") or
hex pairs (e.g. "0018,0081", "0008,0020")
recursive: Whether to scan subdirectories recursively
group_by: Optional tag keyword or hex pair to group results by
(e.g. "SeriesDescription" to see per-series breakdowns)
response_format: Output format (markdown or json)
"""
try:
dir_path = Path(directory)
if not dir_path.exists():
return f"Error: Directory not found: {directory}"
if not dir_path.is_dir():
return f"Error: Path is not a directory: {directory}"
if not tags:
return "Error: Provide at least one tag to query."
resolved_tags = []
errors = []
for spec in tags:
tag_tuple, name = _resolve_tag(spec)
if tag_tuple is None:
errors.append(f"Could not resolve tag: '{spec}'")
else:
resolved_tags.append((tag_tuple, name))
if errors and not resolved_tags:
return "Error: " + "; ".join(errors)
# Resolve group_by tag if provided
group_by_tag = None
group_by_name = None
if group_by:
group_by_tag, group_by_name = _resolve_tag(group_by)
if group_by_tag is None:
return f"Error: Could not resolve group_by tag: '{group_by}'"
# --- Scan files ---
dicom_files, truncated = await asyncio.to_thread(
_find_dicom_files, dir_path, recursive
)
if not dicom_files:
return f"No DICOM files found in {directory}"
# --- Aggregate values ---
if group_by_tag:
# Nested: group_value -> tag_name -> value -> count
grouped: Dict[str, Dict[str, Dict[str, int]]] = {}
for _, ds in dicom_files:
gv = _safe_get_tag(ds, group_by_tag)
if gv not in grouped:
grouped[gv] = {name: {} for _, name in resolved_tags}
for tag_tuple, name in resolved_tags:
value = redact_if_pii(tag_tuple, _safe_get_tag(ds, tag_tuple))
grouped[gv][name][value] = grouped[gv][name].get(value, 0) + 1
if response_format == ResponseFormat.JSON:
result = {
"directory": str(dir_path),
"total_files_scanned": len(dicom_files),
"truncated": truncated,
"group_by": group_by_name,
"groups": {},
}
if errors:
result["warnings"] = errors
for gv, tag_data in sorted(grouped.items()):
result["groups"][gv] = {
name: [
{"value": v, "count": c}
for v, c in sorted(vals.items(), key=lambda x: -x[1])
]
for name, vals in tag_data.items()
}
return json.dumps(result, indent=2)
else:
output = [
f"# DICOM Tag Query: {dir_path}\n",
f"**Files scanned**: {len(dicom_files)}",
f"**Grouped by**: {group_by_name}\n",
]
if truncated:
output.append(
f"**Warning**: Scan truncated at {MAX_FILES} files.\n"
)
if errors:
output.append(f"**Warnings**: {'; '.join(errors)}\n")
for gv in sorted(grouped.keys()):
output.append(f"## {group_by_name}: {gv}\n")
tag_data = grouped[gv]
for name, vals in tag_data.items():
sorted_vals = sorted(vals.items(), key=lambda x: -x[1])
if len(sorted_vals) == 1:
v, c = sorted_vals[0]
output.append(f"- **{name}**: {v} ({c} files)")
else:
output.append(f"- **{name}** ({len(sorted_vals)} unique):")
for v, c in sorted_vals:
output.append(f" - {v}: {c} files")
output.append("")
return "\n".join(output)
else:
# Flat: tag_name -> value -> count
collectors: Dict[str, Dict[str, int]] = {
name: {} for _, name in resolved_tags
}
for _, ds in dicom_files:
for tag_tuple, name in resolved_tags:
value = redact_if_pii(tag_tuple, _safe_get_tag(ds, tag_tuple))
collectors[name][value] = collectors[name].get(value, 0) + 1
if response_format == ResponseFormat.JSON:
result = {
"directory": str(dir_path),
"total_files_scanned": len(dicom_files),
"truncated": truncated,
"tags": {},
}
if errors:
result["warnings"] = errors
for name, vals in collectors.items():
result["tags"][name] = {
"unique_count": len(vals),
"values": [
{"value": v, "count": c}
for v, c in sorted(vals.items(), key=lambda x: -x[1])
],
}
return json.dumps(result, indent=2)
else:
output = [
f"# DICOM Tag Query: {dir_path}\n",
f"**Files scanned**: {len(dicom_files)}\n",
]
if truncated:
output.append(
f"**Warning**: Scan truncated at {MAX_FILES} files.\n"
)
if errors:
output.append(f"**Warnings**: {'; '.join(errors)}\n")
for name, vals in collectors.items():
sorted_vals = sorted(vals.items(), key=lambda x: -x[1])
output.append(f"## {name} ({len(sorted_vals)} unique)\n")
headers = ["Value", "File Count"]
rows = [[v, str(c)] for v, c in sorted_vals]
output.append(_format_markdown_table(headers, rows))
output.append("")
return "\n".join(output)
except Exception as e:
return f"Error querying DICOM tags: {str(e)}"
@mcp.tool(
name="dicom_summarize_directory",
annotations=ToolAnnotations(
title="Summarize DICOM Directory",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_summarize_directory(
directory: str,
recursive: bool = True,
include_series_overview: bool = True,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Summarize a DICOM directory by returning unique values of key tags.
Scans all DICOM files in a directory and returns distinct values for
important metadata fields (manufacturer, scanner model, series descriptions,
sequence types, institution, field strength, etc.) with file counts per
group. Much more efficient than listing every file when you need an
overview of what a directory contains.
Args:
directory: Path to the directory to scan
recursive: Whether to scan subdirectories recursively
include_series_overview: Whether to include detailed series-by-series table
response_format: Output format (markdown or json)
"""
try:
dir_path = Path(directory)
if not dir_path.exists():
return f"Error: Directory not found: {directory}"
if not dir_path.is_dir():
return f"Error: Path is not a directory: {directory}"
dicom_files, truncated = await asyncio.to_thread(
_find_dicom_files, dir_path, recursive
)
if not dicom_files:
return f"No DICOM files found in {directory}"
# --- Series-level grouping (only if requested) ---
sorted_series = []
if include_series_overview:
series_groups: Dict[str, Dict[str, Any]] = {}
for _, ds in dicom_files:
series_uid = _safe_get_tag(ds, (0x0020, 0x000E))
if series_uid not in series_groups:
series_groups[series_uid] = {
"series_number": _safe_get_tag(ds, (0x0020, 0x0011)),
"series_description": _safe_get_tag(ds, (0x0008, 0x103E)),
"sequence_type": _identify_sequence_type(ds).value,
"tr": _safe_get_tag(ds, (0x0018, 0x0080)),
"te": _safe_get_tag(ds, (0x0018, 0x0081)),
"ti": _safe_get_tag(ds, (0x0018, 0x0082)),
"fa": _safe_get_tag(ds, (0x0018, 0x1314)),
"file_count": 0,
}
series_groups[series_uid]["file_count"] += 1
# Sort series by series number (numeric where possible)
def _sort_key(item: tuple) -> tuple:
series_num = item[1]["series_number"]
if series_num is None or series_num == "N/A":
return (2, "") # Sort None/N/A values last
try:
return (0, int(series_num))
except (ValueError, TypeError):
return (1, str(series_num))
sorted_series = sorted(series_groups.items(), key=_sort_key)
# Tags to summarize: tag tuple -> (display_name, collector dict)
summary_fields = {
(0x0010, 0x0010): "Patient Name",
(0x0010, 0x0020): "Patient ID",
(0x0010, 0x0030): "Patient Birth Date",
(0x0010, 0x0040): "Patient Sex",
(0x0008, 0x0070): "Manufacturer",
(0x0008, 0x1090): "Scanner Model",
(0x0018, 0x0087): "Field Strength (T)",
(0x0008, 0x0080): "Institution",
(0x0008, 0x1010): "Station Name",
(0x0008, 0x0060): "Modality",
(0x0008, 0x103E): "Series Description",
(0x0020, 0x0011): "Series Number",
}
# Collect unique values with counts
collectors: Dict[str, Dict[str, int]] = {
name: {} for name in summary_fields.values()
}
sequence_type_counts: Dict[str, int] = {}
for _, ds in dicom_files:
for tag, field_name in summary_fields.items():
value = redact_if_pii(tag, _safe_get_tag(ds, tag))
collectors[field_name][value] = collectors[field_name].get(value, 0) + 1
seq_type = _identify_sequence_type(ds).value
sequence_type_counts[seq_type] = sequence_type_counts.get(seq_type, 0) + 1
# Build summary
summary = {
"directory": str(dir_path),
"total_files_scanned": len(dicom_files),
"truncated": truncated,
"fields": {},
}
for field_name, value_counts in collectors.items():
sorted_entries = sorted(value_counts.items(), key=lambda x: -x[1])
summary["fields"][field_name] = {
"unique_count": len(sorted_entries),
"values": [{"value": v, "count": c} for v, c in sorted_entries],
}
summary["fields"]["Sequence Type"] = {
"unique_count": len(sequence_type_counts),
"values": [
{"value": v, "count": c}
for v, c in sorted(sequence_type_counts.items(), key=lambda x: -x[1])
],
}
# Add series overview to summary (if enabled)
if include_series_overview and sorted_series:
summary["series_overview"] = [
{
"series_number": data["series_number"],
"series_description": data["series_description"],
"sequence_type": data["sequence_type"],
"tr": data["tr"],
"te": data["te"],
"ti": data["ti"],
"fa": data["fa"],
"file_count": data["file_count"],
}
for _, data in sorted_series
]
if response_format == ResponseFormat.JSON:
return json.dumps(summary, indent=2)
else:
output = [
f"# DICOM Directory Summary: {dir_path}\n",
f"**Total files scanned**: {len(dicom_files)}",
]
if truncated:
output.append(
f"**Warning**: Scan truncated at {MAX_FILES} files. "
"Results may not reflect all data in the directory.\n"
)
output.append("")
# Order fields for readability
field_order = [
"Patient Name",
"Patient ID",
"Patient Birth Date",
"Patient Sex",
"Manufacturer",
"Scanner Model",
"Field Strength (T)",
"Institution",
"Station Name",
"Modality",
"Sequence Type",
"Series Description",
"Series Number",
]
for field_name in field_order:
field_data = summary["fields"].get(field_name)
if not field_data:
continue
output.append(
f"## {field_name} ({field_data['unique_count']} unique)\n"
)
headers = ["Value", "File Count"]
rows = [
[entry["value"], str(entry["count"])]
for entry in field_data["values"]
]
output.append(_format_markdown_table(headers, rows))
output.append("")
# --- Series Overview Table (if enabled) ---
if include_series_overview and sorted_series:
output.append("## Series Overview\n")
series_headers = [
"Series",
"Description",
"Type",
"TR",
"TE",
"TI",
"FA",
"Files",
]
series_rows = [
[
data["series_number"],
data["series_description"],
data["sequence_type"],
data["tr"],
data["te"],
data["ti"],
data["fa"],
str(data["file_count"]),
]
for _, data in sorted_series
]
output.append(_format_markdown_table(series_headers, series_rows))
output.append("")
return "\n".join(output)
except Exception as e:
return f"Error summarizing directory: {str(e)}"

View File

@ -0,0 +1,249 @@
"""DICOM file search tool with filter parsing."""
import asyncio
import json
from pathlib import Path
from typing import Any, Dict, List
import pydicom
from mcp.types import ToolAnnotations
from dicom_mcp.server import mcp
from dicom_mcp.config import MAX_FILES
from dicom_mcp.constants import ResponseFormat
from dicom_mcp.helpers.tags import _safe_get_tag, _resolve_tag, _format_markdown_table
from dicom_mcp.helpers.sequence import _identify_sequence_type
from dicom_mcp.helpers.files import _find_dicom_files
from dicom_mcp.helpers.filters import _parse_filter, _apply_filter
@mcp.tool(
name="dicom_search",
annotations=ToolAnnotations(
title="Search DICOM Files by Criteria",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_search(
directory: str,
filters: List[str],
recursive: bool = True,
mode: str = "summary",
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Search for DICOM files matching specific criteria.
Finds files where tag values match all given filter conditions (AND logic).
Returns matching file paths, a count, or a summary depending on mode.
Filter syntax (one filter per list element):
Text operators (case-insensitive):
"SeriesDescription contains MOST"
"SeriesDescription is LMS MOST"
"PatientName is not UNKNOWN"
"SeriesDescription starts with Thigh"
"SeriesDescription ends with Dixon"
Numeric/symbolic operators (numeric when possible, string fallback):
"EchoTime > 10"
"FlipAngle <= 15"
"RepetitionTime = 516"
"SeriesNumber != 0"
Presence operators:
"InversionTime exists"
"InversionTime missing"
Tags can be keywords (EchoTime) or hex pairs (0018,0081).
All filters must match for a file to be included (AND logic).
Args:
directory: Path to the directory to scan
filters: List of filter strings
recursive: Whether to scan subdirectories recursively
mode: Result mode - "count" (just the number), "paths" (file paths),
or "summary" (series breakdown of matches, default)
response_format: Output format (markdown or json)
"""
try:
dir_path = Path(directory)
if not dir_path.exists():
return f"Error: Directory not found: {directory}"
if not dir_path.is_dir():
return f"Error: Path is not a directory: {directory}"
if not filters:
return "Error: Provide at least one filter."
if mode not in ("count", "paths", "summary"):
return f"Error: Invalid mode '{mode}'. Use 'count', 'paths', or 'summary'."
# --- Parse and resolve filters ---
parsed_filters = []
for f_str in filters:
parsed = _parse_filter(f_str)
if parsed is None:
return (
f"Error: Could not parse filter: '{f_str}'\n\n"
"Expected formats:\n"
" Text: TagName contains value\n"
" Symbolic: TagName > value\n"
" Presence: TagName exists"
)
tag_tuple, tag_name = _resolve_tag(parsed["tag_spec"])
if tag_tuple is None:
return f"Error: Could not resolve tag: '{parsed['tag_spec']}'"
parsed["tag_tuple"] = tag_tuple
parsed["tag_name"] = tag_name
parsed_filters.append(parsed)
# --- Scan files ---
dicom_files, truncated = await asyncio.to_thread(
_find_dicom_files, dir_path, recursive
)
if not dicom_files:
return f"No DICOM files found in {directory}"
# --- Apply filters ---
matches: list[tuple[Path, pydicom.Dataset]] = []
for file_path, ds in dicom_files:
all_match = True
for pf in parsed_filters:
actual = _safe_get_tag(ds, pf["tag_tuple"])
if not _apply_filter(pf, actual):
all_match = False
break
if all_match:
matches.append((file_path, ds))
# --- Format filter description ---
filter_desc = " AND ".join(
f"{pf['tag_name']} {pf['operator']}"
+ (f" {pf['value']}" if pf["value"] is not None else "")
for pf in parsed_filters
)
# --- Output based on mode ---
if mode == "count":
if response_format == ResponseFormat.JSON:
result = {
"directory": str(dir_path),
"filters": filter_desc,
"total_scanned": len(dicom_files),
"truncated": truncated,
"match_count": len(matches),
}
return json.dumps(result, indent=2)
else:
output = [
"# DICOM Search Results\n",
f"**Filter**: {filter_desc}",
f"**Scanned**: {len(dicom_files)} files",
f"**Matches**: {len(matches)} files",
]
if truncated:
output.append(
f"\n**Warning**: Scan truncated at {MAX_FILES} files."
)
return "\n".join(output)
elif mode == "paths":
rel_paths = [str(fp.relative_to(dir_path)) for fp, _ in matches]
if response_format == ResponseFormat.JSON:
result = {
"directory": str(dir_path),
"filters": filter_desc,
"total_scanned": len(dicom_files),
"truncated": truncated,
"match_count": len(matches),
"paths": rel_paths,
}
return json.dumps(result, indent=2)
else:
output = [
"# DICOM Search Results\n",
f"**Filter**: {filter_desc}",
f"**Scanned**: {len(dicom_files)} files",
f"**Matches**: {len(matches)} files\n",
]
if truncated:
output.append(
f"**Warning**: Scan truncated at {MAX_FILES} files.\n"
)
if matches:
for p in rel_paths:
output.append(f"- {p}")
else:
output.append("No files matched the filter criteria.")
return "\n".join(output)
else: # summary
# Group matches by series
series_groups: Dict[str, Dict[str, Any]] = {}
for fp, ds in matches:
series_num = _safe_get_tag(ds, (0x0020, 0x0011))
if series_num not in series_groups:
series_groups[series_num] = {
"series_number": series_num,
"series_description": _safe_get_tag(ds, (0x0008, 0x103E)),
"sequence_type": _identify_sequence_type(ds).value,
"file_count": 0,
}
series_groups[series_num]["file_count"] += 1
def _sort_key(item: tuple) -> tuple:
sn = item[1]["series_number"]
if sn is None or sn == "N/A":
return (2, "")
try:
return (0, int(sn))
except (ValueError, TypeError):
return (1, str(sn))
sorted_series = sorted(series_groups.items(), key=_sort_key)
if response_format == ResponseFormat.JSON:
result = {
"directory": str(dir_path),
"filters": filter_desc,
"total_scanned": len(dicom_files),
"truncated": truncated,
"match_count": len(matches),
"series_count": len(sorted_series),
"series": [data for _, data in sorted_series],
}
return json.dumps(result, indent=2)
else:
output = [
"# DICOM Search Results\n",
f"**Filter**: {filter_desc}",
f"**Scanned**: {len(dicom_files)} files",
f"**Matches**: {len(matches)} files across {len(sorted_series)} series\n",
]
if truncated:
output.append(
f"**Warning**: Scan truncated at {MAX_FILES} files.\n"
)
if sorted_series:
headers = ["Series", "Description", "Type", "Files"]
rows = [
[
s["series_number"],
s["series_description"],
s["sequence_type"],
str(s["file_count"]),
]
for _, s in sorted_series
]
output.append(_format_markdown_table(headers, rows))
else:
output.append("No files matched the filter criteria.")
return "\n".join(output)
except Exception as e:
return f"Error searching DICOM files: {str(e)}"

View File

@ -0,0 +1,235 @@
"""DICOM segmentation verification tool."""
import asyncio
import json
from pathlib import Path
from typing import Any, Dict, List, Tuple
import pydicom
from mcp.types import ToolAnnotations
from dicom_mcp.server import mcp
from dicom_mcp.constants import ResponseFormat
from dicom_mcp.helpers.tags import _safe_get_tag
from dicom_mcp.helpers.files import _find_dicom_files
# Standard DICOM Segmentation Storage SOP Class UID
_SEG_SOP_CLASS_UID = "1.2.840.10008.5.1.4.1.1.66.4"
def _is_segmentation(ds: pydicom.Dataset) -> bool:
"""Check if a dataset is a segmentation file."""
sop_class = _safe_get_tag(ds, (0x0008, 0x0016))
if sop_class == _SEG_SOP_CLASS_UID:
return True
if hasattr(ds, "SourceImageSequence"):
return True
return False
def _extract_references(ds: pydicom.Dataset) -> List[Dict[str, str]]:
"""Extract referenced SOP Instance UIDs from a segmentation dataset."""
refs: List[Dict[str, str]] = []
# Check SourceImageSequence
if hasattr(ds, "SourceImageSequence"):
for item in ds.SourceImageSequence:
ref_uid = _safe_get_tag(item, (0x0008, 0x1155))
ref_class = _safe_get_tag(item, (0x0008, 0x1150))
if ref_uid != "N/A":
refs.append(
{
"ReferencedSOPInstanceUID": str(ref_uid),
"ReferencedSOPClassUID": str(ref_class),
"source": "SourceImageSequence",
}
)
# Check ReferencedSeriesSequence → ReferencedInstanceSequence
if hasattr(ds, "ReferencedSeriesSequence"):
for series_item in ds.ReferencedSeriesSequence:
if hasattr(series_item, "ReferencedInstanceSequence"):
for inst_item in series_item.ReferencedInstanceSequence:
ref_uid = _safe_get_tag(inst_item, (0x0008, 0x1155))
ref_class = _safe_get_tag(inst_item, (0x0008, 0x1150))
if ref_uid != "N/A":
refs.append(
{
"ReferencedSOPInstanceUID": str(ref_uid),
"ReferencedSOPClassUID": str(ref_class),
"source": "ReferencedSeriesSequence",
}
)
return refs
@mcp.tool(
name="dicom_verify_segmentations",
annotations=ToolAnnotations(
title="Verify Segmentation Source References",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_verify_segmentations(
directory: str,
recursive: bool = True,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Validate that segmentation DICOM files reference valid source images.
Scans a directory for segmentation files (identified by SOPClassUID or
SourceImageSequence presence), extracts their referenced SOPInstanceUIDs,
and verifies that each reference points to an existing source file in
the same directory.
Useful for QA of segmentation outputs ensuring every segmentation
is linked to a valid source image with no dangling references.
Args:
directory: Directory containing segmentation and source DICOM files
recursive: Search subdirectories (default True)
response_format: Output format (markdown or json)
"""
try:
dir_path = Path(directory)
if not dir_path.exists():
return f"Error: Directory not found: {directory}"
all_files, truncated = await asyncio.to_thread(
_find_dicom_files, dir_path, recursive
)
if not all_files:
return f"No DICOM files found in {directory}"
# Classify files and build source UID index
seg_files: List[Tuple[Path, pydicom.Dataset]] = []
source_index: Dict[str, Path] = {} # SOPInstanceUID → Path
for fp, ds in all_files:
sop_uid = _safe_get_tag(ds, (0x0008, 0x0018))
if sop_uid != "N/A":
source_index[str(sop_uid)] = fp
if _is_segmentation(ds):
seg_files.append((fp, ds))
if not seg_files:
return (
f"No segmentation files found in {directory}. "
f"Scanned {len(all_files)} DICOM files."
)
# Verify references for each segmentation
results: List[Dict[str, Any]] = []
total_refs = 0
matched_refs = 0
unmatched_refs = 0
for fp, ds in seg_files:
seg_sop_uid = _safe_get_tag(ds, (0x0008, 0x0018))
seg_series_desc = _safe_get_tag(ds, (0x0008, 0x103E))
references = _extract_references(ds)
total_refs += len(references)
ref_results: List[Dict[str, Any]] = []
for ref in references:
ref_uid = ref["ReferencedSOPInstanceUID"]
source_path = source_index.get(ref_uid)
if source_path:
matched_refs += 1
ref_results.append(
{
"ReferencedSOPInstanceUID": ref_uid,
"source": ref["source"],
"matched": True,
"matched_file": source_path.name,
}
)
else:
unmatched_refs += 1
ref_results.append(
{
"ReferencedSOPInstanceUID": ref_uid,
"source": ref["source"],
"matched": False,
"matched_file": None,
}
)
results.append(
{
"segmentation_file": fp.name,
"segmentation_sop_uid": str(seg_sop_uid),
"series_description": str(seg_series_desc),
"reference_count": len(references),
"references": ref_results,
}
)
all_matched = unmatched_refs == 0
if response_format == ResponseFormat.JSON:
output = {
"directory": str(dir_path),
"total_files": len(all_files),
"truncated": truncated,
"segmentation_files": len(seg_files),
"source_files": len(all_files) - len(seg_files),
"total_references": total_refs,
"matched_references": matched_refs,
"unmatched_references": unmatched_refs,
"all_matched": all_matched,
"segmentations": results,
}
return json.dumps(output, indent=2)
else:
status = "ALL MATCHED" if all_matched else "UNMATCHED REFERENCES FOUND"
output_lines = [
f"# Segmentation Verification: {dir_path.name}\n",
f"**Status**: {status}\n",
"## Summary\n",
f"- **Total files scanned**: {len(all_files)}",
f"- **Segmentation files**: {len(seg_files)}",
f"- **Source files**: {len(all_files) - len(seg_files)}",
f"- **Total references**: {total_refs}",
f"- **Matched**: {matched_refs}",
f"- **Unmatched**: {unmatched_refs}",
]
if truncated:
output_lines.append(
"\n**Warning**: File scan was truncated. "
"Results may be incomplete."
)
# Show details for segmentations with unmatched references
unmatched_segs = [
r for r in results if any(not ref["matched"] for ref in r["references"])
]
if unmatched_segs:
output_lines.append("\n## Unmatched References\n")
for seg in unmatched_segs:
output_lines.append(f"### {seg['segmentation_file']}")
output_lines.append(f"Series: {seg['series_description']}\n")
for ref in seg["references"]:
if not ref["matched"]:
output_lines.append(
f"- `{ref['ReferencedSOPInstanceUID']}` "
f"(from {ref['source']}) — **NOT FOUND**"
)
if all_matched and seg_files:
output_lines.append(
"\nAll segmentation references successfully matched "
"to source files."
)
return "\n".join(output_lines)
except Exception as e:
return f"Error verifying segmentations: {str(e)}"

View File

@ -0,0 +1,310 @@
"""DICOM inversion time (TI) analysis tool for MOLLI sequences."""
import asyncio
import json
import statistics
from collections import defaultdict
from pathlib import Path
from typing import Any, Dict, List, Optional, Tuple
import pydicom
from mcp.types import ToolAnnotations
from dicom_mcp.server import mcp
from dicom_mcp.constants import ResponseFormat
from dicom_mcp.helpers.tags import _safe_get_tag, _format_markdown_table
from dicom_mcp.helpers.sequence import _identify_sequence_type
from dicom_mcp.helpers.files import _find_dicom_files
from dicom_mcp.helpers.philips import _resolve_philips_private_tag
def _extract_inversion_time(ds: pydicom.Dataset) -> Optional[float]:
"""Extract inversion time from a DICOM dataset, handling vendor differences.
Fallback chain:
1. Standard tag (0018,0082) InversionTime works for Siemens/GE
2. Philips private tag DD 006, offset 0x72 in group 2005 confirmed
at (2005,1572) on Philips Achieva dStream
3. Philips private tag DD 001, offset 0x20 in group 2001 alternate
location on some Philips software versions
Returns the TI value in milliseconds, or None if not found.
"""
# 1. Standard InversionTime tag
ti_val = _safe_get_tag(ds, (0x0018, 0x0082))
if ti_val != "N/A":
try:
return float(ti_val)
except (ValueError, TypeError):
pass
# 2-3. Philips private tag fallbacks
manufacturer = _safe_get_tag(ds, (0x0008, 0x0070))
if "philips" not in str(manufacturer).lower():
return None
# Try DD 006, offset 0x72 in group 2005
resolved_tag, _, value_str = _resolve_philips_private_tag(
ds, dd_number=6, element_offset=0x72, private_group=0x2005
)
if resolved_tag is not None and value_str is not None:
try:
return float(value_str)
except (ValueError, TypeError):
pass
# Try DD 001, offset 0x20 in group 2001
resolved_tag, _, value_str = _resolve_philips_private_tag(
ds, dd_number=1, element_offset=0x20, private_group=0x2001
)
if resolved_tag is not None and value_str is not None:
try:
return float(value_str)
except (ValueError, TypeError):
pass
return None
@mcp.tool(
name="dicom_analyze_ti",
annotations=ToolAnnotations(
title="Analyze Inversion Times (MOLLI)",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_analyze_ti(
directory: str,
recursive: bool = True,
gap_threshold: float = 2500.0,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Analyze inversion times from MOLLI/T1-mapping sequences.
Extracts TI values across all T1 mapping files in a directory, handling
manufacturer-specific differences (Siemens/GE use standard tags, Philips
stores TI in private tags). Groups by series, sorts by instance number,
and computes statistics including gap analysis.
Automatically handles:
- Siemens/GE: standard InversionTime tag (0018,0082)
- Philips: private tag (2005,xx72) via DD 006 block resolution
Args:
directory: Directory containing MOLLI/T1 mapping DICOM files
recursive: Search subdirectories (default True)
gap_threshold: Flag gaps between consecutive TIs exceeding this
value in ms (default 2500.0)
response_format: Output format (markdown or json)
"""
try:
dir_path = Path(directory)
if not dir_path.exists():
return f"Error: Directory not found: {directory}"
all_files, truncated = await asyncio.to_thread(
_find_dicom_files, dir_path, recursive
)
if not all_files:
return f"No DICOM files found in {directory}"
# Filter to T1 mapping / MOLLI files
ti_files: List[Tuple[Path, pydicom.Dataset]] = []
for fp, ds in all_files:
seq_type = _identify_sequence_type(ds)
if seq_type.value == "t1_mapping":
ti_files.append((fp, ds))
continue
# Also catch files with MOLLI in the description that might
# not be detected as t1_mapping by the heuristic
desc = str(_safe_get_tag(ds, (0x0008, 0x103E))).upper()
if "MOLLI" in desc:
ti_files.append((fp, ds))
if not ti_files:
return (
f"No T1 mapping or MOLLI sequences found in {directory}. "
f"Scanned {len(all_files)} DICOM files."
)
# Group by (SeriesNumber, SeriesDescription) and extract TI
series_data: Dict[Tuple[str, str], List[Dict[str, Any]]] = defaultdict(list)
for fp, ds in ti_files:
series_num = str(_safe_get_tag(ds, (0x0020, 0x0011)))
series_desc = str(_safe_get_tag(ds, (0x0008, 0x103E)))
instance_num = _safe_get_tag(ds, (0x0020, 0x0013))
manufacturer = str(_safe_get_tag(ds, (0x0008, 0x0070)))
ti_value = _extract_inversion_time(ds)
try:
inst_num = int(instance_num)
except (ValueError, TypeError):
inst_num = 0
series_data[(series_num, series_desc)].append(
{
"file": fp.name,
"instance_number": inst_num,
"inversion_time": ti_value,
"manufacturer": manufacturer,
}
)
# Analyze each series
series_results: List[Dict[str, Any]] = []
for (series_num, series_desc), files in sorted(series_data.items()):
# Sort by instance number
files.sort(key=lambda x: x["instance_number"])
all_tis = [f["inversion_time"] for f in files]
non_zero_tis = [ti for ti in all_tis if ti is not None and ti > 0]
zero_tis = [ti for ti in all_tis if ti is not None and ti == 0]
none_tis = [ti for ti in all_tis if ti is None]
# Compute statistics on non-zero TIs
stats: Dict[str, Any] = {
"total_files": len(files),
"ti_found": len(all_tis) - len(none_tis),
"ti_not_found": len(none_tis),
"zero_ti_count": len(zero_tis),
"non_zero_ti_count": len(non_zero_tis),
}
if non_zero_tis:
sorted_tis = sorted(non_zero_tis)
stats["ti_min"] = min(non_zero_tis)
stats["ti_max"] = max(non_zero_tis)
stats["ti_range"] = max(non_zero_tis) - min(non_zero_tis)
stats["ti_mean"] = statistics.mean(non_zero_tis)
stats["ti_median"] = statistics.median(non_zero_tis)
# Compute consecutive gaps (based on sorted order)
gaps = [
sorted_tis[i + 1] - sorted_tis[i]
for i in range(len(sorted_tis) - 1)
]
if gaps:
stats["max_gap"] = max(gaps)
stats["gap_threshold_exceeded"] = max(gaps) > gap_threshold
stats["gaps"] = gaps
else:
stats["max_gap"] = 0.0
stats["gap_threshold_exceeded"] = False
stats["gaps"] = []
manufacturer = files[0]["manufacturer"] if files else "Unknown"
ordered_tis = [
{
"instance": f["instance_number"],
"ti": f["inversion_time"],
"file": f["file"],
}
for f in files
]
series_results.append(
{
"series_number": series_num,
"series_description": series_desc,
"manufacturer": manufacturer,
"statistics": stats,
"ordered_inversion_times": ordered_tis,
}
)
if response_format == ResponseFormat.JSON:
output = {
"directory": str(dir_path),
"total_files_scanned": len(all_files),
"truncated": truncated,
"t1_mapping_files": len(ti_files),
"series_count": len(series_results),
"gap_threshold": gap_threshold,
"series": series_results,
}
return json.dumps(output, indent=2)
else:
output_lines = [
f"# Inversion Time Analysis: {dir_path.name}\n",
f"**Files scanned**: {len(all_files)}",
f"**T1 mapping files**: {len(ti_files)}",
f"**Series found**: {len(series_results)}",
f"**Gap threshold**: {gap_threshold} ms",
]
if truncated:
output_lines.append(
"\n**Warning**: File scan was truncated. "
"Results may be incomplete."
)
for sr in series_results:
stats = sr["statistics"]
output_lines.append(
f"\n## Series {sr['series_number']}: "
f"{sr['series_description']}\n"
)
output_lines.append(f"**Manufacturer**: {sr['manufacturer']}")
output_lines.append(f"**Total files**: {stats['total_files']}")
if stats["ti_not_found"] > 0:
output_lines.append(
f"**TI not found**: {stats['ti_not_found']} files "
"(no standard or private TI tag)"
)
output_lines.append(f"**Non-zero TIs**: {stats['non_zero_ti_count']}")
output_lines.append(
f"**Zero TIs**: {stats['zero_ti_count']} " "(likely output maps)"
)
if stats.get("ti_min") is not None:
output_lines.append(
f"\n**TI range**: {stats['ti_min']:.1f} - "
f"{stats['ti_max']:.1f} ms "
f"(range: {stats['ti_range']:.1f} ms)"
)
output_lines.append(
f"**Mean**: {stats['ti_mean']:.1f} ms | "
f"**Median**: {stats['ti_median']:.1f} ms"
)
if stats.get("max_gap", 0) > 0:
gap_warning = (
" **WARNING: exceeds threshold**"
if stats.get("gap_threshold_exceeded")
else ""
)
output_lines.append(
f"**Largest gap**: {stats['max_gap']:.1f} ms"
f"{gap_warning}"
)
# Ordered TI table
output_lines.append("\n### Inversion Times (ordered)\n")
headers = ["Instance", "TI (ms)", "File"]
rows = []
for ti_entry in sr["ordered_inversion_times"]:
ti_display = (
f"{ti_entry['ti']:.1f}" if ti_entry["ti"] is not None else "N/A"
)
rows.append(
[
str(ti_entry["instance"]),
ti_display,
ti_entry["file"],
]
)
output_lines.append(_format_markdown_table(headers, rows))
return "\n".join(output_lines)
except Exception as e:
return f"Error analyzing inversion times: {str(e)}"

View File

@ -0,0 +1,84 @@
"""DICOM tree dump tool."""
import asyncio
import json
from pathlib import Path
import pydicom
from pydicom.errors import InvalidDicomError
from mcp.types import ToolAnnotations
from dicom_mcp.server import mcp
from dicom_mcp.constants import ResponseFormat
from dicom_mcp.helpers.tree import _build_tree_text, _build_tree_json
@mcp.tool(
name="dicom_dump_tree",
annotations=ToolAnnotations(
title="Dump DICOM Tree Structure",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_dump_tree(
file_path: str,
max_depth: int = 10,
show_private: bool = True,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Display the full hierarchical structure of a DICOM file.
Recursively traverses the DICOM dataset including nested sequences,
producing an indented tree view. Useful for deep inspection of DICOM
structure, discovering nested sequence contents, and understanding
how data is organized across different manufacturers.
Args:
file_path: Path to a DICOM file
max_depth: Maximum recursion depth for nested sequences (default 10)
show_private: Include private tags in the output (default True)
response_format: Output format (markdown or json)
"""
try:
fp = Path(file_path)
if not fp.exists():
return f"Error: File not found: {file_path}"
ds = await asyncio.to_thread(pydicom.dcmread, fp, stop_before_pixels=True)
if response_format == ResponseFormat.JSON:
tree = _build_tree_json(
ds,
max_depth=max_depth,
show_private=show_private,
)
result = {
"file": fp.name,
"element_count": len([e for e in ds if e.tag != (0x7FE0, 0x0010)]),
"max_depth": max_depth,
"show_private": show_private,
"tree": tree,
}
return json.dumps(result, indent=2)
else:
lines = _build_tree_text(
ds,
max_depth=max_depth,
show_private=show_private,
)
header = [
f"# DICOM Tree: {fp.name}\n",
f"**Max depth**: {max_depth}",
f"**Show private tags**: {show_private}\n",
"```",
]
footer = ["```"]
return "\n".join(header + lines + footer)
except InvalidDicomError:
return f"Error: Not a valid DICOM file: {file_path}"
except Exception as e:
return f"Error dumping DICOM tree: {str(e)}"

View File

@ -0,0 +1,149 @@
"""DICOM UID comparison tool."""
import asyncio
import json
from pathlib import Path
from mcp.types import ToolAnnotations
from dicom_mcp.server import mcp
from dicom_mcp.constants import ResponseFormat
from dicom_mcp.helpers.tags import _safe_get_tag, _resolve_tag
from dicom_mcp.helpers.files import _find_dicom_files
@mcp.tool(
name="dicom_compare_uids",
annotations=ToolAnnotations(
title="Compare DICOM UIDs Between Directories",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_compare_uids(
directory1: str,
directory2: str,
recursive: bool = True,
compare_tag: str = "SeriesInstanceUID",
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Compare UID sets between two DICOM directories.
Extracts a specified UID tag from all files in both directories and
performs set comparison to identify shared, missing, and extra UIDs.
Useful for verifying that processed outputs contain all source series,
or that two datasets are aligned.
Args:
directory1: First directory (the "reference" set)
directory2: Second directory (the "comparison" set)
recursive: Search subdirectories (default True)
compare_tag: DICOM tag keyword or hex code to compare
(default "SeriesInstanceUID"). Supports keywords
like "StudyInstanceUID" or hex codes like "0020,000E".
response_format: Output format (markdown or json)
"""
try:
dir1 = Path(directory1)
dir2 = Path(directory2)
if not dir1.exists():
return f"Error: Directory not found: {directory1}"
if not dir2.exists():
return f"Error: Directory not found: {directory2}"
# Resolve the tag specification
tag_tuple, tag_name = _resolve_tag(compare_tag)
if tag_tuple is None:
return (
f"Error: Could not resolve tag '{compare_tag}'. "
"Use a keyword like 'SeriesInstanceUID' or hex code like '0020,000E'."
)
# Read files from both directories
files1, truncated1 = await asyncio.to_thread(_find_dicom_files, dir1, recursive)
files2, truncated2 = await asyncio.to_thread(_find_dicom_files, dir2, recursive)
if not files1:
return f"No DICOM files found in {directory1}"
if not files2:
return f"No DICOM files found in {directory2}"
# Extract UIDs from each directory
uids1 = set()
for _, ds in files1:
val = _safe_get_tag(ds, tag_tuple)
if val != "N/A":
uids1.add(str(val))
uids2 = set()
for _, ds in files2:
val = _safe_get_tag(ds, tag_tuple)
if val != "N/A":
uids2.add(str(val))
# Set comparison
shared = sorted(uids1 & uids2)
only_in_first = sorted(uids1 - uids2)
only_in_second = sorted(uids2 - uids1)
match = len(only_in_first) == 0 and len(only_in_second) == 0
result = {
"compare_tag": tag_name,
"directory1": str(dir1),
"directory2": str(dir2),
"directory1_files": len(files1),
"directory2_files": len(files2),
"directory1_truncated": truncated1,
"directory2_truncated": truncated2,
"unique_in_directory1": len(uids1),
"unique_in_directory2": len(uids2),
"shared": len(shared),
"only_in_directory1": len(only_in_first),
"only_in_directory2": len(only_in_second),
"match": match,
"shared_values": shared,
"only_in_directory1_values": only_in_first,
"only_in_directory2_values": only_in_second,
}
if response_format == ResponseFormat.JSON:
return json.dumps(result, indent=2)
else:
status = "MATCH" if match else "MISMATCH"
output = [
f"# UID Comparison: {tag_name}\n",
f"**Status**: {status}\n",
"## Summary\n",
"| | Directory 1 | Directory 2 |",
"|--|------------|------------|",
f"| **Path** | {dir1.name} | {dir2.name} |",
f"| **Files scanned** | {len(files1)} | {len(files2)} |",
f"| **Unique {tag_name}s** | {len(uids1)} | {len(uids2)} |",
"",
f"- **Shared**: {len(shared)}",
f"- **Only in directory 1**: {len(only_in_first)}",
f"- **Only in directory 2**: {len(only_in_second)}",
]
if truncated1 or truncated2:
output.append(
"\n**Warning**: File scan was truncated in one or both "
"directories. Results may be incomplete."
)
if only_in_first:
output.append(f"\n## Only in {dir1.name}\n")
for uid in only_in_first:
output.append(f"- `{uid}`")
if only_in_second:
output.append(f"\n## Only in {dir2.name}\n")
for uid in only_in_second:
output.append(f"- `{uid}`")
return "\n".join(output)
except Exception as e:
return f"Error comparing UIDs: {str(e)}"

View File

@ -0,0 +1,254 @@
"""DICOM sequence validation and series analysis tools."""
import asyncio
import json
from pathlib import Path
from typing import Any, Dict, Optional
from pydicom import datadict
from mcp.types import ToolAnnotations
from dicom_mcp.server import mcp
from dicom_mcp.config import MAX_FILES
from dicom_mcp.constants import ResponseFormat
from dicom_mcp.helpers.tags import _safe_get_tag, _format_markdown_table
from dicom_mcp.helpers.sequence import _identify_sequence_type
from dicom_mcp.helpers.files import _find_dicom_files
@mcp.tool(
name="dicom_validate_sequence",
annotations=ToolAnnotations(
title="Validate Sequence Parameters",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_validate_sequence(
file_path: str,
expected_parameters: Optional[Dict[str, Any]] = None,
manufacturer: Optional[str] = None,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Validate MRI sequence parameters against expected values.
Validates acquisition parameters (TR, TE, flip angle, etc.) against
expected values. Useful for verifying that test data matches protocol
specifications or that sequences are consistent across manufacturers.
"""
try:
fp = Path(file_path)
if not fp.exists():
return f"Error: File not found: {file_path}"
import pydicom
ds = await asyncio.to_thread(pydicom.dcmread, fp, stop_before_pixels=True)
sequence_type = _identify_sequence_type(ds)
actual_manufacturer = _safe_get_tag(ds, (0x0008, 0x0070))
validation_results = {
"file_path": str(fp),
"sequence_type": sequence_type.value,
"manufacturer": actual_manufacturer,
"parameters": {},
"validation_passed": True,
}
if manufacturer:
manufacturer_match = manufacturer.lower() in actual_manufacturer.lower()
validation_results["manufacturer_match"] = manufacturer_match
if not manufacturer_match:
validation_results["validation_passed"] = False
if expected_parameters:
param_tag_map = {
"RepetitionTime": (0x0018, 0x0080),
"EchoTime": (0x0018, 0x0081),
"InversionTime": (0x0018, 0x0082),
"FlipAngle": (0x0018, 0x1314),
"ScanningSequence": (0x0018, 0x0020),
"SequenceVariant": (0x0018, 0x0021),
"MRAcquisitionType": (0x0018, 0x0023),
}
for param_name, expected_value in expected_parameters.items():
if param_name in param_tag_map:
tag = param_tag_map[param_name]
actual_value = _safe_get_tag(ds, tag)
match = str(actual_value) == str(expected_value)
validation_results["parameters"][param_name] = {
"actual": actual_value,
"expected": str(expected_value),
"match": match,
}
if not match:
validation_results["validation_passed"] = False
if response_format == ResponseFormat.JSON:
return json.dumps(validation_results, indent=2)
else:
output = [f"# Sequence Validation: {fp.name}\n"]
status = "PASSED" if validation_results["validation_passed"] else "FAILED"
output.append(f"**Validation Status**: {status}\n")
output.append("## Sequence Information")
output.append(f"- **Sequence Type**: {sequence_type.value}")
output.append(f"- **Manufacturer**: {actual_manufacturer}")
if manufacturer:
match_status = (
"Match"
if validation_results.get("manufacturer_match", True)
else "Mismatch"
)
output.append(
f"- **Expected Manufacturer**: {manufacturer} ({match_status})"
)
output.append("")
if validation_results["parameters"]:
output.append("## Parameter Validation\n")
headers = ["Parameter", "Actual", "Expected", "Status"]
rows = []
for pname, data in validation_results["parameters"].items():
s = "Match" if data["match"] else "Mismatch"
rows.append([pname, data["actual"], data["expected"], s])
output.append(_format_markdown_table(headers, rows))
return "\n".join(output)
except Exception as e:
return f"Error validating sequence: {str(e)}"
@mcp.tool(
name="dicom_analyze_series",
annotations=ToolAnnotations(
title="Analyze DICOM Series",
readOnlyHint=True,
destructiveHint=False,
idempotentHint=True,
openWorldHint=False,
),
)
async def dicom_analyze_series(
directory: str,
response_format: ResponseFormat = ResponseFormat.MARKDOWN,
) -> str:
"""Analyze a complete DICOM series for consistency and completeness.
Analyzes all files in a series directory, checking for consistency of
acquisition parameters, completeness of the series, and identifying
any anomalies. Useful for validating test datasets.
"""
try:
dir_path = Path(directory)
if not dir_path.exists():
return f"Error: Directory not found: {directory}"
dicom_files, truncated = await asyncio.to_thread(
_find_dicom_files, dir_path, False
)
if not dicom_files:
return f"No DICOM files found in {directory}"
first_fp, first_ds = dicom_files[0]
series_info = {
"series_description": _safe_get_tag(first_ds, (0x0008, 0x103E)),
"series_number": _safe_get_tag(first_ds, (0x0020, 0x0011)),
"series_uid": _safe_get_tag(first_ds, (0x0020, 0x000E)),
"modality": _safe_get_tag(first_ds, (0x0008, 0x0060)),
"manufacturer": _safe_get_tag(first_ds, (0x0008, 0x0070)),
"sequence_type": _identify_sequence_type(first_ds).value,
}
consistency_issues = []
params_to_check = [
(0x0018, 0x0080), # RepetitionTime
(0x0018, 0x0081), # EchoTime
(0x0020, 0x000E), # SeriesInstanceUID
(0x0028, 0x0010), # Rows
(0x0028, 0x0011), # Columns
]
for tag in params_to_check:
tag_name = datadict.keyword_for_tag(tag)
values = set(_safe_get_tag(ds, tag) for _, ds in dicom_files)
if len(values) > 1:
consistency_issues.append(
f"{tag_name} varies across series: {', '.join(str(v) for v in values)}"
)
instance_numbers = []
for _, ds in dicom_files:
try:
inst_num = int(_safe_get_tag(ds, (0x0020, 0x0013)))
instance_numbers.append(inst_num)
except (ValueError, TypeError):
pass
if instance_numbers:
instance_numbers.sort()
expected_instances = max(instance_numbers)
expected_set = set(range(1, expected_instances + 1))
found_set = set(instance_numbers)
missing = sorted(list(expected_set - found_set))
complete = len(missing) == 0
else:
expected_instances = 0
missing = []
complete = False
consistency_issues.append("Could not determine instance numbers")
result = {
"series_info": series_info,
"file_count": len(dicom_files),
"truncated": truncated,
"consistency_check": {
"consistent_parameters": len(consistency_issues) == 0,
"issues": consistency_issues,
},
"completeness": {
"expected_instances": expected_instances,
"found_instances": len(instance_numbers),
"missing": missing,
"complete": complete,
},
}
if response_format == ResponseFormat.JSON:
return json.dumps(result, indent=2)
else:
output = [f"# Series Analysis: {dir_path.name}\n", "## Series Information"]
for key, value in series_info.items():
output.append(f"- **{key.replace('_', ' ').title()}**: {value}")
output.append(f"- **Total Files**: {len(dicom_files)}")
if truncated:
output.append(f"- **Warning**: Scan truncated at {MAX_FILES} files")
output.append("")
output.append("## Consistency Check")
if result["consistency_check"]["consistent_parameters"]:
output.append("All parameters are consistent across the series")
else:
output.append("Inconsistencies detected:")
for issue in result["consistency_check"]["issues"]:
output.append(f" - {issue}")
output.append("")
output.append("## Completeness Check")
if result["completeness"]["complete"]:
output.append(f"Series is complete ({expected_instances} instances)")
else:
output.append("Series may be incomplete")
output.append(
f"- Expected: {result['completeness']['expected_instances']} instances"
)
output.append(
f"- Found: {result['completeness']['found_instances']} instances"
)
if missing:
output.append(f"- Missing: {', '.join(str(m) for m in missing)}")
return "\n".join(output)
except Exception as e:
return f"Error analyzing series: {str(e)}"

View File

@ -0,0 +1,25 @@
# Claude DICOM-MCP Capabilities List
| # | What I can do | Module | Possible without the MCP? |
| --- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ----------------------------- |---------------------------------------------------------------------------------------------------|
| 1 | **Find DICOM files** in a folder, including subfolders, and tell you what's in them (series, sequence type, manufacturer). Can also just give a count. | `dicom_list_files` | ❌ No — I could list files with bash, but I couldn't read DICOM headers to identify what they are. |
| 2 | **Find Dixon sequences** in a folder and identify which images are water, fat, in-phase, and out-phase. | `dicom_find_dixon_series` | ❌ No — requires both DICOM parsing and domain-specific sequence identification logic. |
| 3 | **Read the metadata** (headers) from a single DICOM file — patient info, study details, scanner settings, geometry, etc. Can also resolve Philips private tags. | `dicom_get_metadata` | ❌ No — DICOM is a binary format. I can't read it with standard text tools. |
| 4 | **Compare headers side-by-side** across 210 DICOM files, highlighting what's different. | `dicom_compare_headers` | ❌ No — same reason; I can't parse the binary headers. |
| 5 | **Query any DICOM tag** across all files in a directory, optionally grouped by another tag (e.g. "show me all echo times, grouped by series"). | `dicom_query` | ❌ No. |
| 6 | **Summarise a whole directory** — give a high-level overview of what vendors, scanners, sequences, and series are present, with file counts. | `dicom_summarize_directory` | ❌ No. |
| 7 | **Validate scan parameters** (TR, TE, flip angle, etc.) against expected values for a given sequence. | `dicom_validate_sequence` | ❌ No — can't extract the parameters to validate. |
| 8 | **Analyse a full series** for consistency — check that all files in a series have matching parameters and that the series is complete. | `dicom_analyze_series` | ❌ No. |
| 9 | **Search for files** matching specific criteria using a filter language — text matching, numeric comparisons, or presence checks on any tag. | `dicom_search` | ❌ No. |
| 10 | **Read Philips private tags** by DD number and element offset, or list all private creator blocks in a file. | `dicom_query_philips_private` | ❌ No — these are vendor-specific proprietary tags buried in private DICOM groups. |
| 11 | **Extract pixel statistics** from an image — min, max, mean, standard deviation, percentiles, and optionally a histogram. Can focus on a specific rectangular region. | `dicom_read_pixels` | ❌ No — pixel data is encoded in the DICOM binary. |
| 12 | **Compute signal-to-noise ratio** by placing two regions of interest (signal and noise) on an image. | `dicom_compute_snr` | ❌ No. |
| 13 | **Render a DICOM image to PNG** with adjustable windowing and optional ROI overlays — this is how I showed you those three images. | `dicom_render_image` | ❌ No — I have no way to decode DICOM pixel data and produce a viewable image. |
| 14 | **Dump the full DICOM tree** — show the entire hierarchical structure of a file including nested sequences, with configurable depth. | `dicom_dump_tree` | ❌ No. |
| 15 | **Compare UID sets** between two directories — find which series/studies/instances are shared, missing, or extra. | `dicom_compare_uids` | ❌ No. |
| 16 | **Verify segmentation references** — check that segmentation DICOM files correctly point back to their source images. | `dicom_verify_segmentations` | ❌ No. |
| 17 | **Analyse inversion times** from MOLLI/NOLLI T1 mapping sequences — extract TI values across vendors (handling Philips private tags automatically), flag gaps, and show the acquisition order. | `dicom_analyze_ti` | ❌ No. |
All 17 modules live under `dicom_mcp/tools/` and are registered via the `@mcp.tool()` decorator at server startup. **In short: none of these capabilities exist without the MCP.** DICOM is a complex binary medical imaging format. Without the MCP, I can see that `.dcm` files exist, but I can't open, read, interpret, or visualise any of them. The MCP turns me from "completely DICOM-blind" into something that can meaningfully inspect, compare, and analyse medical imaging data across vendors.

View File

@ -0,0 +1,51 @@
# Behavioural Guidelines
## Purpose
This document defines the behavioural constraints for LLMs using the DICOM MCP server. These constraints exist to ensure that the MCP operates strictly as a **data inspection and QA tool**, and does not drift into clinical interpretation or diagnostic guidance.
## Regulatory Context
This MCP is intended for use with Software as a Medical Device (SaMD) workflows. When an LLM analyses DICOM data, there is a risk that its responses may cross the boundary from technical data reporting into clinical advice. Statements such as "this data would be useful for diagnosing condition X" or "these acquisition parameters are inadequate for clinical purpose Y" constitute clinical guidance, which carries serious regulatory implications.
To mitigate this risk, the MCP enforces a clear boundary: **report and describe, never advise or interpret clinically**.
## Constraints
### What the LLM should do
1. **Report** what is observed in the DICOM data — tag values, pixel statistics, series counts, parameter values, file counts, and structural relationships.
2. **Describe** technical characteristics — acquisition parameters, sequence types, vendor differences, file organisation, UID relationships, and image properties.
3. **Compare** data across files, series, vendors, or directories when asked — presenting the differences factually.
4. **Flag** technical anomalies such as missing files, inconsistent parameters, broken references, or unexpected tag values.
### What the LLM must not do
1. **Do not** suggest clinical utility or diagnostic applications for the data (e.g. "this sequence is useful for assessing liver fat").
2. **Do not** interpret findings in a clinical or diagnostic context (e.g. "the T1 values suggest abnormal tissue").
3. **Do not** assess data quality relative to specific clinical use cases (e.g. "this data is adequate for a clinical report").
4. **Do not** recommend clinical actions based on the data (e.g. "you should re-scan this patient" or "this study should be flagged for review").
5. **Do not** offer guidance on clinical workflows or protocols (e.g. "this type of series is typically used before treatment planning").
6. **Do not** make assertions about what a clinician, radiologist, or pathologist should conclude from the data.
### The principle
> Present data as-is. Qualified professionals draw the conclusions.
The MCP is a lens, not an adviser. It makes DICOM data visible and navigable. It does not tell the user what the data means in a clinical context.
## Enforcement
These constraints are enforced at three levels:
| Layer | Mechanism | Audience |
|-------|-----------|----------|
| **Protocol** | `FastMCP(instructions=...)` in `server.py` | Any MCP client (Claude, ChatGPT, Cursor, etc.) |
| **Session** | `CLAUDE.md` Behavioural Constraints section | Claude Code sessions |
| **Documentation** | This file (`docs/GUIDELINES.md`) | Developers, reviewers, auditors |
The protocol-level instructions are the primary enforcement mechanism. They are sent to any MCP-compliant client at connection time and apply to all tool interactions. The CLAUDE.md section reinforces these constraints for Claude Code users. This document serves as the authoritative human-readable reference.
## Scope
These constraints apply to the LLM's behaviour when using any of the 17 DICOM MCP tools. They do not restrict the tools themselves — the tools return raw data. The constraints govern how the LLM frames, contextualises, and presents that data to the user.

View File

@ -0,0 +1,95 @@
# DICOM MCP Server — TODO
Two remaining items. The original four-item enhancement plan (2026-02-11) has been completed — `dicom_query` and `dicom_search` were implemented, along with four additional tools (`dicom_dump_tree`, `dicom_compare_uids`, `dicom_verify_segmentations`, `dicom_analyze_ti`). The items below are follow-on improvements identified during testing.
---
## 1. Private Tag Exploration Tool (`dicom_private_tags`)
**Priority:** High — this is the trickiest to get right but the most valuable for cross-manufacturer QA.
### Problem
Manufacturers store critical acquisition parameters in private (vendor-specific) DICOM tags
rather than standard public tags. This causes real issues in multi-vendor QA workflows:
- **Philips TE=0 quirk:** The Erasmus Achieva 1.5T dataset shows `EchoTime = 0` for several
Dixon series (Body mDixon THRIVE, Thigh Dixon Volume). The actual multi-point Dixon echo
times are stored in Philips private tags, not the standard `(0018,0081)` EchoTime field.
This was confirmed via `dicom_query` grouped by SeriesDescription on the Erasmus dataset —
328 of 648 Thigh Dixon files and 100 of 157 mDixon THRIVE files report TE=0.
- **Manufacturer encoding differences:** Siemens stores echo times in public tags normally
(e.g. MOST series TEs of 2.3819.06 ms on Avanto_fit). Philips MOST TEs (2.37118.961 ms)
are in public tags too, but Dixon TEs are hidden in private tags. GE embeds Dixon image type
info in `ImageType` fields rather than series descriptions.
- **Nested sequences and binary blobs:** Philips private tags frequently contain nested
DICOM sequences, and some values are only interpretable if you know the specific software
version. Binary data needs special handling to avoid dumping unreadable content.
### Discussion Notes
From our initial conversation, we decided **not** to implement this tool immediately because:
1. Deciphering some private tags requires conditional logic based on the contents of certain
public tags (or other private tags). The exact rules are manufacturer-specific and need to
be rediscovered through hands-on exploration.
2. Building the wrong abstraction would be worse than no abstraction — we need to tinker with
real data first before committing to a tool design.
### Proposed Design (Single tool with three modes)
**`discover` mode** — Scan a file and list all private tag blocks with their creator strings.
Answers "what vendor modules are present?" Output: group number, creator string, tag count per block.
**`dump` mode** — Show all private tags within a specific creator block (or all private tags in a file).
For each tag: hex address, creator, VR, value. Binary values show first N bytes as hex + length.
Nested sequences show item count with optional one-level-deep recursion.
**`search` mode** — Scan across a directory looking for private tags matching a keyword in either
the creator string or the tag value. Useful for hunting down where manufacturers hide specific
parameters (e.g. "find any private tag with 'echo' in the creator or value").
### Additional Considerations
- **Creator filtering:** Filter by creator substring, e.g. `creator="Philips"` to only see Philips blocks.
- **Known tag dictionaries:** Embed a small lookup table for commonly useful private tags
(e.g. Philips `(2005,xx10)` for actual echo times). Start without this and add later.
- **Binary value display:** Show first 64 bytes as hex + total length, rather than full dumps.
### Suggested Next Steps
1. Start by exploring the Erasmus Philips data with `dicom_get_metadata` using custom hex tags
to see what private blocks exist and specifically chase down the TE=0 mystery.
2. Do the same on Siemens and GE data to understand the differences.
3. Once the patterns and conditional logic are clear, design the tool around real use cases.
---
## 2. `dicom_compare_headers` Directory Mode
**Priority:** Medium — useful for cross-series protocol checks but less urgent than private tags.
### Problem
`dicom_compare_headers` currently requires 210 explicit file paths. For cross-series protocol
validation (e.g. "are all MOST series using the same TR/FA across a study?"), you have to
manually pick representative files from each series first.
### Proposed Enhancement
Add a **directory mode** that automatically picks one representative file per series and compares
them. This would enable single-call cross-series protocol checks.
### Design Ideas
- New parameter: `directory` as an alternative to `file_paths`
- Auto-select one file per unique SeriesInstanceUID (first file encountered, or configurable)
- Reuse existing comparison logic
- Show series description in output to identify which series each column represents
- Optionally filter which series to include (by description pattern or sequence type)
---
*Last updated: 2026-02-25 — after adding 4 new tools (dicom_dump_tree, dicom_compare_uids,
dicom_verify_segmentations, dicom_analyze_ti) and smoke testing against Philips, Siemens, and
GE MOLLI/NOLLI data.*

View File

@ -0,0 +1,664 @@
# DICOM MCP Server — Usage Guide
Detailed usage examples, tool reference, and QA workflow patterns for the DICOM MCP server.
For installation and setup, see [INSTALL.md](../INSTALL.md). For project overview, see [README.md](../README.md).
> **Tip: Pair with a filesystem MCP server for maximum capability.** The DICOM MCP server works well on its own for metadata inspection and validation. However, if Claude also has access to a filesystem MCP server with media reading support (e.g. the `read_media_file` tool), it can *view* rendered images directly, enabling an interactive visual feedback loop — render an image, inspect it, adjust ROI placement, measure, iterate — all within a single conversation. Without a filesystem MCP, Claude can still render images and save them to disk, but you'll need to open them yourself in a viewer and describe what you see.
---
## Quick Examples
### Finding test data
```
List all Dixon sequences in my test data directory
```
The server will use `dicom_find_dixon_series` to locate all Dixon sequences and show what image types are available.
### Comparing fat/water selection
```
Compare the headers of these two files to see which Dixon images were selected:
- /data/test01/water.dcm
- /data/test01/fat.dcm
```
### Validating a protocol
```
Validate that /data/scan01/image001.dcm matches our Siemens protocol:
- TR should be 4.5
- TE should be 2.3
- Manufacturer should be Siemens
```
### Quick directory overview
```
Summarize what's in the /data/large_study directory
```
### Finding specific files
```
Find all files in /data/study where the series description contains "MOST"
and echo time is greater than 10
```
### Rendering an image
```
Render /data/liver_scan/slice_005.dcm with auto-windowing
```
### Measuring SNR
```
Compute the SNR on /data/scan.dcm with a signal ROI at [100, 200, 50, 50]
in the liver and a noise ROI at [20, 20, 40, 40] in background air
```
### Dumping full DICOM structure
```
Dump the full DICOM tree of /data/scan.dcm including nested sequences
```
### Comparing UIDs across directories
```
Compare the SeriesInstanceUIDs between /data/study_v1 and /data/study_v2
```
### Verifying segmentation references
```
Check that all segmentation files in /data/segs reference valid source images
```
### Analysing MOLLI inversion times
```
Analyze the inversion times in /data/molli_series — is it a proper 5(3)3 scheme?
```
### Inspecting Philips private tags
```
List all Philips Private Creator tags in /data/philips_scan/image001.dcm
```
```
Look up DD 001 element offset 0x85 in /data/philips_scan/image001.dcm
```
---
## PII Filtering
When PII filtering is enabled (via `DICOM_MCP_PII_FILTER=true`), the following patient tags are replaced with `[REDACTED]` in tool output:
- PatientName
- PatientID
- PatientBirthDate
- PatientSex
**Affected tools**: `dicom_get_metadata`, `dicom_compare_headers`, `dicom_summarize_directory`, `dicom_query`.
All other tools do not expose patient data and are unaffected. See [INSTALL.md](../INSTALL.md) for configuration instructions.
---
## Tool Reference
### Directory & File Discovery
#### `dicom_list_files`
List all DICOM files in a directory with metadata filtering.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `directory` | string | *required* | Path to search |
| `recursive` | bool | `true` | Search subdirectories |
| `filter_sequence_type` | string | `null` | Filter by type: `dixon`, `t1_mapping`, `multi_echo_gre`, `spin_echo_ir`, `t1`, `t2`, `flair`, `dwi`, `localizer` |
| `count_only` | bool | `false` | Return series breakdown with file counts instead of individual file listing |
| `response_format` | string | `markdown` | `markdown` or `json` |
```json
{
"directory": "/data/test_suite",
"filter_sequence_type": "dixon",
"count_only": true
}
```
#### `dicom_summarize_directory`
Get a high-level overview of DICOM directory contents showing unique values and file counts for patient info, manufacturer, scanner model, field strength, institution, series descriptions, and sequence types.
When PII filtering is enabled, patient tags (Name, ID, DOB, Sex) are redacted.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `directory` | string | *required* | Path to directory |
| `recursive` | bool | `true` | Search subdirectories |
| `include_series_overview` | bool | `true` | Include per-series table with acquisition parameters (TR, TE, TI, FA) |
| `response_format` | string | `markdown` | `markdown` or `json` |
```json
{
"directory": "/data/large_study",
"include_series_overview": true
}
```
#### `dicom_find_dixon_series`
Find and analyse Dixon (chemical shift) sequences. Automatically identifies Dixon series and detects image types (water, fat, in-phase, out-phase).
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `directory` | string | *required* | Path to search |
| `recursive` | bool | `true` | Search subdirectories |
| `response_format` | string | `markdown` | `markdown` or `json` |
```json
{
"directory": "/data/body_comp_study"
}
```
#### `dicom_search`
Find DICOM files matching specific filter criteria using AND logic across multiple filters.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `directory` | string | *required* | Path to directory |
| `filters` | list[string] | *required* | Filter expressions (see syntax below) |
| `recursive` | bool | `true` | Search subdirectories |
| `mode` | string | `summary` | `count`, `paths`, or `summary` |
| `response_format` | string | `markdown` | `markdown` or `json` |
**Filter syntax:**
| Operator type | Examples |
|---------------|----------|
| Text (case-insensitive) | `"SeriesDescription contains MOST"`, `"PatientName is not UNKNOWN"`, `"SeriesDescription starts with Thigh"`, `"SeriesDescription ends with Dixon"` |
| Numeric/symbolic | `"EchoTime > 10"`, `"FlipAngle <= 15"`, `"RepetitionTime = 516"`, `"SeriesNumber != 0"` |
| Presence | `"InversionTime exists"`, `"InversionTime missing"` |
Tags can be keywords (`EchoTime`) or hex codes (`0018,0081`).
```json
{
"directory": "/data/study",
"filters": ["SeriesDescription contains MOST", "EchoTime > 10"],
"mode": "paths"
}
```
---
### Metadata & Validation
#### `dicom_get_metadata`
Extract DICOM header information organised by tag groups. When PII filtering is enabled, patient tags are redacted.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `file_path` | string | *required* | Path to DICOM file |
| `tag_groups` | list[string] | `null` | Groups to extract: `patient_info`, `study_info`, `series_info`, `image_info`, `acquisition`, `manufacturer`, `equipment`, `geometry`, `pixel_data` |
| `custom_tags` | list[string] | `null` | Additional tags as hex codes, e.g. `["0018,0080"]` |
| `philips_private_tags` | list[dict] | `null` | Philips private tags to resolve (see below) |
| `response_format` | string | `markdown` | `markdown` or `json` |
**Philips private tags:**
For Philips DICOM files, you can resolve private tags inline without using the separate `dicom_query_philips_private` tool. Each entry is a dict with `dd_number` and `element_offset` (and optionally `private_group`, default `0x2005`).
```json
{
"file_path": "/data/philips_scan.dcm",
"tag_groups": ["acquisition", "manufacturer"],
"philips_private_tags": [
{"dd_number": 1, "element_offset": 133}
]
}
```
**Standard example:**
```json
{
"file_path": "/data/scan.dcm",
"tag_groups": ["acquisition", "manufacturer"],
"custom_tags": ["0018,0080", "0018,0081"]
}
```
#### `dicom_compare_headers`
Compare DICOM headers across multiple files side-by-side. When PII filtering is enabled, patient tags are redacted.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `file_paths` | list[string] | *required* | 2-10 files to compare |
| `tag_groups` | list[string] | `["acquisition", "series_info"]` | Which tag groups to compare |
| `show_differences_only` | bool | `false` | Only show tags that differ |
| `response_format` | string | `markdown` | `markdown` or `json` |
```json
{
"file_paths": [
"/data/test01/water.dcm",
"/data/test01/fat.dcm",
"/data/test01/in_phase.dcm"
],
"show_differences_only": true
}
```
#### `dicom_validate_sequence`
Validate MRI sequence parameters against expected values.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `file_path` | string | *required* | DICOM file to validate |
| `expected_parameters` | dict | `null` | Expected values. Supported keys: `RepetitionTime`, `EchoTime`, `InversionTime`, `FlipAngle`, `ScanningSequence`, `SequenceVariant`, `MRAcquisitionType` |
| `manufacturer` | string | `null` | Expected manufacturer name |
| `response_format` | string | `markdown` | `markdown` or `json` |
```json
{
"file_path": "/data/test.dcm",
"expected_parameters": {
"RepetitionTime": 4.5,
"EchoTime": 2.3,
"InversionTime": 100,
"FlipAngle": 10
},
"manufacturer": "Siemens"
}
```
#### `dicom_analyze_series`
Comprehensive analysis of a complete DICOM series — checks parameter consistency across all files, verifies completeness (no missing instances), and identifies anomalies.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `directory` | string | *required* | Path containing series files |
| `response_format` | string | `markdown` | `markdown` or `json` |
```json
{
"directory": "/data/series_001"
}
```
#### `dicom_query`
Query arbitrary DICOM tags across all files in a directory. Aggregates unique values with file counts. When PII filtering is enabled, patient tags are redacted.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `directory` | string | *required* | Path to directory |
| `tags` | list[string] | *required* | Tag keywords (e.g. `"EchoTime"`) or hex codes (e.g. `"0018,0081"`) |
| `recursive` | bool | `true` | Search subdirectories |
| `group_by` | string | `null` | Optional tag to group results by (e.g. `"SeriesDescription"`) |
| `response_format` | string | `markdown` | `markdown` or `json` |
```json
{
"directory": "/data/study",
"tags": ["EchoTime", "FlipAngle"],
"group_by": "SeriesDescription"
}
```
---
### Structure & Comparison
#### `dicom_dump_tree`
Full hierarchical dump of DICOM structure, including nested sequences (SQ elements) with tree-character formatting. Useful for understanding complex DICOM files, inspecting nested structures (e.g. ReferencedSeriesSequence, SourceImageSequence), and debugging.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `file_path` | string | *required* | Path to DICOM file |
| `max_depth` | int | `10` | Maximum nesting depth to display |
| `show_private` | bool | `true` | Include private tags in output |
| `response_format` | string | `markdown` | `markdown` or `json` |
When PII filtering is enabled, patient tags are redacted in the tree output. Pixel data `(7FE0,0010)` is always skipped.
```json
{
"file_path": "/data/scan.dcm",
"max_depth": 5,
"show_private": false
}
```
#### `dicom_compare_uids`
Compare UID sets between two DICOM directories. Identifies shared UIDs, UIDs unique to each directory, and reports counts and details.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `directory1` | string | *required* | First directory to compare |
| `directory2` | string | *required* | Second directory to compare |
| `recursive` | bool | `true` | Search subdirectories |
| `compare_tag` | string | `SeriesInstanceUID` | Tag to compare — keyword (e.g. `StudyInstanceUID`, `SOPInstanceUID`) or hex code (e.g. `0020,000E`) |
| `response_format` | string | `markdown` | `markdown` or `json` |
```json
{
"directory1": "/data/study_original",
"directory2": "/data/study_reprocessed",
"compare_tag": "SOPInstanceUID"
}
```
---
### Segmentation & T1 Mapping
#### `dicom_verify_segmentations`
Validate that segmentation files correctly reference valid source images. Detects segmentation files by SOPClassUID (`1.2.840.10008.5.1.4.1.1.66.4`) or by the presence of `SourceImageSequence`. For each segmentation, checks that every `ReferencedSOPInstanceUID` points to an existing source file in the same directory.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `directory` | string | *required* | Directory containing segmentation and source files |
| `recursive` | bool | `true` | Search subdirectories |
| `response_format` | string | `markdown` | `markdown` or `json` |
Reports total segmentation files, total references, matched vs unmatched counts, and details for any unmatched references.
```json
{
"directory": "/data/study_with_segmentations"
}
```
#### `dicom_analyze_ti`
Extract and validate inversion times from MOLLI / T1 mapping sequences. Handles vendor-specific differences automatically:
- **Siemens / GE**: reads the standard `InversionTime` tag `(0018,0082)`
- **Philips**: falls back to private tag DD 006, offset 0x72 in group 2005 (confirmed at `(2005,xx72)`) or DD 001, offset 0x20 in group 2001
Groups by series, sorts by instance number, and computes statistics including gap analysis to detect missing or out-of-range inversion times.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `directory` | string | *required* | Directory containing MOLLI / T1 mapping files |
| `recursive` | bool | `true` | Search subdirectories |
| `gap_threshold` | float | `2500.0` | Flag consecutive TI gaps exceeding this value (ms) |
| `response_format` | string | `markdown` | `markdown` or `json` |
**Output includes per series:**
- Ordered TI list sorted by instance number
- Count of zero-value TIs (typically output maps, not acquisitions)
- Count of non-zero TIs (actual inversions)
- TI range, mean, and median
- Largest consecutive gap, with warning if exceeding the threshold
```json
{
"directory": "/data/molli_series",
"gap_threshold": 3000.0
}
```
---
### Philips Private Tags
#### `dicom_query_philips_private`
Query Philips private DICOM tags using DD number and element offset. Philips MRI scanners store proprietary metadata in private tag blocks whose assignments vary across scanners and software versions — this tool resolves them dynamically.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `file_path` | string | *required* | Path to a Philips DICOM file |
| `dd_number` | int | `null` | DD number to look up (e.g. `1` for "DD 001") |
| `element_offset` | int | `null` | Element offset within the DD block (e.g. `133` for 0x85) |
| `private_group` | int | `0x2005` | DICOM private group to search |
| `list_creators` | bool | `false` | List all Private Creator tags instead of resolving a specific one |
| `response_format` | string | `markdown` | `markdown` or `json` |
**Two usage modes:**
1. **List creators** — discover what private tag blocks are available:
```json
{
"file_path": "/data/philips_scan.dcm",
"list_creators": true
}
```
2. **Resolve a specific tag** — look up a known DD number and offset:
```json
{
"file_path": "/data/philips_scan.dcm",
"dd_number": 1,
"element_offset": 133
}
```
**Common Philips DD numbers and offsets (group 2005):**
| DD # | Offset | Description |
|------|--------|-------------|
| 001 | 0x85 | Shim calculation values |
| 001 | 0x63 | Stack ID |
| 004 | 0x00 | MR Series data object |
---
### Pixel Analysis
#### `dicom_read_pixels`
Extract pixel statistics from a DICOM file. Values are rescaled using RescaleSlope and RescaleIntercept when present (standard for Philips, common on Siemens/GE).
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `file_path` | string | *required* | Path to DICOM file |
| `roi` | list[int] | `null` | Region of interest as `[x, y, width, height]` (top-left corner in pixel coordinates). If omitted, statistics cover the entire image. |
| `include_histogram` | bool | `false` | Include a binned histogram of pixel values |
| `histogram_bins` | int | `50` | Number of histogram bins |
| `response_format` | string | `markdown` | `markdown` or `json` |
**Returns**: min, max, mean, standard deviation, median, 5th/25th/75th/95th percentiles, and pixel count.
```json
{
"file_path": "/data/liver_scan/slice_005.dcm",
"roi": [100, 200, 50, 50],
"include_histogram": true,
"histogram_bins": 20
}
```
#### `dicom_compute_snr`
Compute signal-to-noise ratio from two ROIs in a DICOM image using the single-image method: SNR = mean(signal) / std(noise).
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `file_path` | string | *required* | Path to DICOM file |
| `signal_roi` | list[int] | *required* | Signal region as `[x, y, width, height]` |
| `noise_roi` | list[int] | *required* | Noise/background region as `[x, y, width, height]` |
| `response_format` | string | `markdown` | `markdown` or `json` |
**Tip**: Use `dicom_render_image` with `overlay_rois` first to visualise and verify ROI placement before measuring.
**Note**: Some manufacturers (notably Philips) zero-fill background air outside the reconstruction FOV, which results in zero noise standard deviation and infinite SNR. In such cases, consider using a homogeneous tissue region (e.g. subcutaneous fat or muscle) as the noise ROI instead.
```json
{
"file_path": "/data/liver_scan/slice_005.dcm",
"signal_roi": [100, 200, 50, 50],
"noise_roi": [20, 20, 40, 40]
}
```
#### `dicom_render_image`
Render a DICOM image to PNG with configurable windowing. Optionally overlays labelled ROI rectangles for visual verification.
| Parameter | Type | Default | Description |
|-----------|------|---------|-------------|
| `file_path` | string | *required* | Path to DICOM file |
| `output_path` | string | *required* | Path where the PNG will be saved |
| `window_center` | float | `null` | Manual window center |
| `window_width` | float | `null` | Manual window width |
| `auto_window` | bool | `false` | Auto-calculate window from 5th-95th percentile |
| `overlay_rois` | list[dict] | `null` | ROI overlays (see below) |
| `show_info` | bool | `true` | Burn in series description and windowing values |
| `response_format` | string | `markdown` | `markdown` or `json` |
**Windowing priority:**
1. Explicit `window_center` + `window_width` parameters
2. `auto_window=True` → 5th-95th percentile range
3. DICOM header WindowCenter/WindowWidth values
4. Fallback → full pixel range (min to max)
**ROI overlay format:**
```json
{
"roi": [x, y, width, height],
"label": "Signal (Liver)",
"color": "green"
}
```
Available colours: `red`, `green`, `blue`, `yellow`, `cyan`, `magenta`, `white`.
**Full example:**
```json
{
"file_path": "/data/scan.dcm",
"output_path": "/data/renders/scan_auto.png",
"auto_window": true,
"overlay_rois": [
{"roi": [100, 200, 50, 50], "label": "Signal", "color": "green"},
{"roi": [20, 20, 40, 40], "label": "Noise", "color": "red"}
]
}
```
**Viewing rendered images:** If Claude also has access to a filesystem MCP server with media reading capability (e.g. `read_media_file`), it can view the rendered PNG directly and use visual feedback to refine ROI placement iteratively — no need to switch to a separate DICOM viewer.
---
## Output Formats
All tools support two output formats via the `response_format` parameter:
**Markdown** (default) — Human-readable with tables, formatting, and visual indicators. Best for conversational use in Claude Desktop.
**JSON** — Machine-readable structured data with consistent schemas. Best for programmatic processing or piping into other tools.
---
## Performance Considerations
### File scanning limits
All directory scanning tools are limited to processing **1000 files** per scan by default. This limit is configurable via the `DICOM_MCP_MAX_FILES` environment variable (see [INSTALL.md](INSTALL.md#environment-variables)). If this limit is reached, a warning indicates results were truncated (and `truncated: true` in JSON output). Narrow your search to a more specific subdirectory if needed.
### Optimisation tips
- Use `dicom_summarize_directory` instead of `dicom_list_files` when you only need an overview
- Use `dicom_list_files` with `count_only: true` for a compact series inventory
- Use `dicom_search` with `mode: "count"` to quickly check match counts before fetching details
- Use `dicom_query` instead of `dicom_get_metadata` on multiple files when you need aggregate tag values
- Set `recursive: false` to scan only the immediate directory
- For pixel tools, use `dicom_render_image` to verify ROI placement before running `dicom_compute_snr`
- Use `dicom_dump_tree` with `show_private: false` to focus on standard DICOM structure
- Use `dicom_compare_uids` to quickly detect differences between study directories without inspecting every file
---
## QA Workflow Examples
### Workflow 1: Dixon Image Selection Investigation
When you suspect incorrect fat/water selection:
1. `dicom_find_dixon_series` — locate the Dixon series
2. `dicom_list_files` with `filter_sequence_type: "dixon"` — see all Dixon files
3. `dicom_compare_headers` on suspected files — focus on ImageType tags
4. `dicom_get_metadata` — extract full headers for documentation
### Workflow 2: Multi-Manufacturer Validation
When testing across GE, Siemens, and Philips:
1. `dicom_summarize_directory` — check patient info consistency and get a quick inventory
2. `dicom_query` with `group_by: "SeriesDescription"` — compare timing parameters across series
3. `dicom_validate_sequence` with manufacturer-specific expected parameters
4. `dicom_compare_headers` — identify parameter variations
5. Document differences for test specification updates
### Workflow 3: Test Dataset Verification
Before running automated tests:
1. `dicom_analyze_series` on each test case directory
2. `dicom_find_dixon_series` to confirm Dixon sequences are present
3. `dicom_validate_sequence` to check protocol compliance
4. `dicom_search` to find files with unexpected parameter values (e.g. `"FlipAngle != 15"`)
### Workflow 4: Image Quality Assessment
For checking signal quality and image rendering:
1. `dicom_render_image` with `auto_window: true` — render the image and visually inspect
2. `dicom_render_image` with `overlay_rois` — place and verify signal/noise ROI positions
3. `dicom_read_pixels` — check pixel value distributions and histograms
4. `dicom_compute_snr` — measure SNR with verified ROI placements
5. Repeat across series or manufacturers to compare image quality
**Note on Philips data:** Background air is often zero-filled, making the traditional background-air SNR method return infinite. Use a homogeneous tissue region (subcutaneous fat, muscle) as the noise proxy instead.
### Workflow 5: MOLLI / T1 Mapping Validation
When verifying T1 mapping acquisitions across vendors:
1. `dicom_analyze_ti` — extract all inversion times, check for proper MOLLI scheme (e.g. 5(3)3 = 11 TI-weighted images + output maps)
2. Check for gap warnings — large gaps between consecutive TIs may indicate missing heartbeats or acquisition failures
3. `dicom_dump_tree` on a representative file — inspect nested sequence structure if TI extraction fails
4. For Philips data: `dicom_query_philips_private` with `dd_number: 6, element_offset: 114` (0x72) — manually verify the private TI tag if needed
### Workflow 6: Segmentation Verification
When validating segmentation files reference the correct source images:
1. `dicom_verify_segmentations` — check all segmentation-to-source references in one pass
2. Review any unmatched references — missing source files or incorrect ReferencedSOPInstanceUIDs
3. `dicom_compare_uids` with `compare_tag: "SOPInstanceUID"` — compare two directories to find missing or extra files
### Workflow 7: Philips Private Tag Investigation
When investigating Philips-specific metadata:
1. `dicom_query_philips_private` with `list_creators: true` — discover available DD blocks
2. `dicom_query_philips_private` with specific `dd_number` and `element_offset` — resolve individual tags
3. Or use `dicom_get_metadata` with the `philips_private_tags` parameter to extract private tags alongside standard metadata in a single call
### Example Screenshot From Claude Desktop
<img src="../img/claude_desktop_example.png" width="623" alt="Claude Desktop session example">

Binary file not shown.

After

Width:  |  Height:  |  Size: 155 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 65 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

72
mcps/dicom_mcp/install.sh Executable file
View File

@ -0,0 +1,72 @@
#!/bin/bash
# DICOM MCP Server Installation Script
# This script installs dependencies and configures Claude Desktop
set -e # Exit on error
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
echo -e "${GREEN}=== DICOM MCP Server Installation ===${NC}\n"
# Get the absolute path to this script's directory
SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
# Check if poetry is installed
if ! command -v poetry &> /dev/null; then
echo -e "${RED}Error: Poetry is not installed.${NC}"
echo "Please install Poetry first: https://python-poetry.org/docs/#installation"
exit 1
fi
echo -e "${YELLOW}Installing dependencies...${NC}"
poetry install --with dev
echo -e "\n${GREEN}✓ Dependencies installed successfully${NC}\n"
# Configure Claude Desktop
CONFIG_FILE="$HOME/Library/Application Support/Claude/claude_desktop_config.json"
echo -e "${YELLOW}Configuring Claude Desktop...${NC}"
# Create config directory if it doesn't exist
mkdir -p "$HOME/Library/Application Support/Claude"
# Check if config file exists
if [ -f "$CONFIG_FILE" ]; then
echo -e "${YELLOW}Existing configuration found.${NC}"
echo -e "Please add the following to your mcpServers section in:"
echo -e "${YELLOW}$CONFIG_FILE${NC}\n"
echo '{
"dicom_mcp": {
"command": "poetry",
"args": ["run", "python", "-m", "dicom_mcp"],
"cwd": "'"$SCRIPT_DIR"'"
}
}'
else
echo -e "${YELLOW}Creating new configuration file...${NC}"
cat > "$CONFIG_FILE" << EOF
{
"mcpServers": {
"dicom_mcp": {
"command": "poetry",
"args": ["run", "python", "-m", "dicom_mcp"],
"cwd": "$SCRIPT_DIR"
}
}
}
EOF
echo -e "${GREEN}✓ Configuration file created${NC}"
fi
echo -e "\n${GREEN}=== Installation Complete ===${NC}\n"
echo -e "Next steps:"
echo -e "1. Restart Claude Desktop"
echo -e "2. The DICOM MCP server should be available with 7 tools\n"
echo -e "To test the server manually, run:"
echo -e "${YELLOW}poetry run python -m dicom_mcp${NC}\n"

View File

@ -0,0 +1,29 @@
[tool.poetry]
name = "dicom-mcp"
version = "0.1.0"
description = "DICOM MCP Server for Medical Imaging QA"
authors = ["Gregory Gauthier <gmgauthier@protonmail.com>"]
readme = "README.md"
packages = [{include = "dicom_mcp"}]
[tool.poetry.dependencies]
python = "^3.12,<4.0"
fastmcp = "^2.0.0"
numpy = ">=1.24"
Pillow = ">=10.0"
pydicom = "^3.0.1"
black = "^26.1.0"
ruff = "^0.15.4"
[tool.poetry.group.dev.dependencies]
pytest = "^8.0.0"
pytest-asyncio = "^0.23.0"
[build-system]
requires = ["poetry-core>=2.0.0"]
build-backend = "poetry.core.masonry.api"
[tool.pytest.ini_options]
filterwarnings = [
"ignore::DeprecationWarning:pytest_asyncio.*",
]

View File

@ -0,0 +1,15 @@
#!/usr/bin/env zsh
#
if [[ "$(uname)" == "Linux" ]]; then
PATH_PREFIX="/data/Projects"
elif [[ "$(uname)" == "Darwin" ]]; then
PATH_PREFIX="/Users/gregory.gauthier/Projects"
else
# Always fallback to the linux default
PATH_PREFIX="/data/Projects"
fi
# shellcheck disable=SC2164
cd "${PATH_PREFIX}/mcp_servers/dicom_mcp"
poetry run python ./dicom_mcp

File diff suppressed because it is too large Load Diff

26
mcps/filesystem_mcp/install.sh Executable file
View File

@ -0,0 +1,26 @@
#!/usr/bin/env bash
echo "=== Installing @cyanheads/filesystem-mcp-server ==="
# Check for Node.js
if ! command -v node >/dev/null 2>&1; then
echo "Error: Node.js not found! Please install Node.js first." >&2
exit 1
fi
echo "Node.js version: $(node --version)"
# Install globally via npm
echo "Installing @cyanheads/filesystem-mcp-server globally..."
npm install -g @cyanheads/filesystem-mcp-server
if [ $? -eq 0 ]; then
echo "Installation successful!"
echo "The package is now available at: $(npm list -g @cyanheads/filesystem-mcp-server --depth=0)"
echo ""
echo "You can now run the launcher: ./launch_filesystem_mcp.sh"
echo "Or use it directly with npx or in your MCP client config."
else
echo "Installation failed!" >&2
exit 1
fi

View File

@ -0,0 +1,19 @@
#!/usr/bin/env zsh
echo "=== Filesystem MCP Server Launcher ==="
# Use the globally installed package
if command -v node >/dev/null 2>&1; then
echo "Starting @cyanheads/filesystem-mcp-server (stdio transport)..."
echo "Log level: debug"
echo "Press Ctrl+C to stop."
echo "----------------------------------------"
MCP_LOG_LEVEL=debug \
MCP_TRANSPORT_TYPE=stdio \
node "/Users/gregory.gauthier/.nvm/versions/node/v22.19.0/lib/node_modules/@cyanheads/filesystem-mcp-server/dist/index.js" \
2> filesystem_mcp.log
else
echo "Error: Node.js not found!" >&2
exit 1
fi

View File

@ -0,0 +1,12 @@
{
"mcpServers": {
"filesystem": {
"command": "zsh",
"args": ["./launch_filesystem_mcp.sh"],
"env": {
"MCP_LOG_LEVEL": "debug",
"MCP_TRANSPORT_TYPE": "stdio"
}
}
}
}

View File

@ -0,0 +1,60 @@
# open-meteo-mcp
An MCP server for global weather data via the [Open-Meteo API](https://open-meteo.com/). No API key required.
## Tools
### get_current_weather
Returns current conditions for a given latitude/longitude:
- Temperature and "feels like"
- Humidity, wind speed/direction/gusts
- Precipitation, cloud cover, pressure
- WMO weather code decoded to plain English
### get_forecast
Returns a multi-day forecast (1-16 days) for a given latitude/longitude:
- Daily high/low temperatures
- Precipitation totals and probability
- Wind speed, gusts, and dominant direction
- Sunrise/sunset times
- Supports celsius or fahrenheit
## Setup
Requires Python 3.10+ and [Poetry](https://python-poetry.org/).
```sh
cd /data/Projects/open_meteo_mcp
poetry install
```
## Usage
### Stdio (for Claude Code)
```sh
.venv/bin/open-meteo-mcp-stdio
```
Add to `~/.claude/settings.json`:
```json
"open_meteo": {
"type": "stdio",
"command": "/data/Projects/open_meteo_mcp/.venv/bin/open-meteo-mcp-stdio",
"args": [],
"env": {}
}
```
### Streamable HTTP
```sh
.venv/bin/open-meteo-mcp
```
Starts on `http://127.0.0.1:8000/mcp` by default.
## Data Source
All weather data comes from [Open-Meteo](https://open-meteo.com/), which aggregates national weather services worldwide. Free for non-commercial use.

View File

@ -0,0 +1,242 @@
"""
open_meteo_mcp - MCP server for global weather data via the Open-Meteo API.
Provides tools for fetching current weather conditions and multi-day forecasts
for any location worldwide. No API key required.
"""
import logging
from typing import Optional
import httpx
from mcp.server.fastmcp import FastMCP
# ---------------------------------------------------------------------------
# Logging & Output Redirection
# ---------------------------------------------------------------------------
import os
import sys
LOG_FILE = os.environ.get("OPEN_METEO_MCP_LOG_FILE", "open_meteo_mcp.log")
def setup_redirection():
"""Reduces noise by suppressing stderr and logging to a file."""
# Suppress stderr
devnull = open(os.devnull, "w")
os.dup2(devnull.fileno(), sys.stderr.fileno())
# Configure logging to write to the log file directly
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
filename=LOG_FILE,
filemode="a"
)
setup_redirection()
logger = logging.getLogger("open_meteo_mcp")
# ---------------------------------------------------------------------------
# Constants
# ---------------------------------------------------------------------------
OPEN_METEO_BASE = "https://api.open-meteo.com/v1/forecast"
# WMO Weather interpretation codes
WMO_CODES = {
0: "Clear sky",
1: "Mainly clear",
2: "Partly cloudy",
3: "Overcast",
45: "Fog",
48: "Depositing rime fog",
51: "Light drizzle",
53: "Moderate drizzle",
55: "Dense drizzle",
56: "Light freezing drizzle",
57: "Dense freezing drizzle",
61: "Slight rain",
63: "Moderate rain",
65: "Heavy rain",
66: "Light freezing rain",
67: "Heavy freezing rain",
71: "Slight snowfall",
73: "Moderate snowfall",
75: "Heavy snowfall",
77: "Snow grains",
80: "Slight rain showers",
81: "Moderate rain showers",
82: "Violent rain showers",
85: "Slight snow showers",
86: "Heavy snow showers",
95: "Thunderstorm",
96: "Thunderstorm with slight hail",
99: "Thunderstorm with heavy hail",
}
# ---------------------------------------------------------------------------
# MCP Server
# ---------------------------------------------------------------------------
mcp = FastMCP(
"open_meteo_mcp",
instructions="Global weather data via Open-Meteo API",
)
def _describe_weather_code(code: int) -> str:
return WMO_CODES.get(code, f"Unknown ({code})")
def _c_to_f(celsius: float) -> float:
return round(celsius * 9 / 5 + 32, 1)
def _temp_both(celsius: float) -> str:
return f"{celsius}°C / {_c_to_f(celsius)}°F"
def _kmh_to_mph(kmh: float) -> float:
return round(kmh * 0.621371, 1)
def _wind_both(kmh: float) -> str:
return f"{kmh}km/h / {_kmh_to_mph(kmh)}mph"
@mcp.tool()
async def get_current_weather(latitude: float, longitude: float) -> str:
"""Get current weather conditions for a location.
Args:
latitude: Latitude of the location (e.g. 51.752 for Oxford, UK)
longitude: Longitude of the location (e.g. -1.258 for Oxford, UK)
"""
try:
params = {
"latitude": latitude,
"longitude": longitude,
"current": ",".join([
"temperature_2m",
"relative_humidity_2m",
"apparent_temperature",
"weather_code",
"wind_speed_10m",
"wind_direction_10m",
"wind_gusts_10m",
"precipitation",
"cloud_cover",
"pressure_msl",
]),
"timezone": "auto",
}
async with httpx.AsyncClient() as client:
resp = await client.get(OPEN_METEO_BASE, params=params, timeout=30)
resp.raise_for_status()
data = resp.json()
current = data["current"]
units = data["current_units"]
tz = data.get("timezone", "Unknown")
lines = [
f"Current Weather (timezone: {tz})",
f" Time: {current['time']}",
f" Condition: {_describe_weather_code(current['weather_code'])}",
f" Temperature: {_temp_both(current['temperature_2m'])}",
f" Feels like: {_temp_both(current['apparent_temperature'])}",
f" Humidity: {current['relative_humidity_2m']}{units['relative_humidity_2m']}",
f" Wind: {_wind_both(current['wind_speed_10m'])} from {current['wind_direction_10m']}{units['wind_direction_10m']}",
f" Gusts: {_wind_both(current['wind_gusts_10m'])}",
f" Precipitation: {current['precipitation']}{units['precipitation']}",
f" Cloud cover: {current['cloud_cover']}{units['cloud_cover']}",
f" Pressure: {current['pressure_msl']}{units['pressure_msl']}",
]
return "\n".join(lines)
except Exception as e:
logger.error("Error in get_current_weather(%s, %s): %s", latitude, longitude, e, exc_info=True)
return f"Error fetching current weather: {type(e).__name__}: {e}"
@mcp.tool()
async def get_forecast(
latitude: float,
longitude: float,
days: int = 7,
) -> str:
"""Get a multi-day weather forecast for a location.
Args:
latitude: Latitude of the location
longitude: Longitude of the location
days: Number of forecast days (1-16, default 7)
"""
try:
days = max(1, min(16, days))
params = {
"latitude": latitude,
"longitude": longitude,
"daily": ",".join([
"weather_code",
"temperature_2m_max",
"temperature_2m_min",
"apparent_temperature_max",
"apparent_temperature_min",
"precipitation_sum",
"precipitation_probability_max",
"wind_speed_10m_max",
"wind_gusts_10m_max",
"wind_direction_10m_dominant",
"sunrise",
"sunset",
]),
"temperature_unit": "celsius",
"timezone": "auto",
"forecast_days": days,
}
async with httpx.AsyncClient() as client:
resp = await client.get(OPEN_METEO_BASE, params=params, timeout=30)
resp.raise_for_status()
data = resp.json()
daily = data["daily"]
units = data["daily_units"]
tz = data.get("timezone", "Unknown")
sections = [f"Forecast for {days} day(s) (timezone: {tz})"]
for i in range(len(daily["time"])):
section = [
f"\n--- {daily['time'][i]} ---",
f" Condition: {_describe_weather_code(daily['weather_code'][i])}",
f" High: {_temp_both(daily['temperature_2m_max'][i])} Low: {_temp_both(daily['temperature_2m_min'][i])}",
f" Feels like: {_temp_both(daily['apparent_temperature_max'][i])} / {_temp_both(daily['apparent_temperature_min'][i])}",
f" Precipitation: {daily['precipitation_sum'][i]}{units['precipitation_sum']} (chance: {daily['precipitation_probability_max'][i]}{units['precipitation_probability_max']})",
f" Wind: {_wind_both(daily['wind_speed_10m_max'][i])} gusts {_wind_both(daily['wind_gusts_10m_max'][i])} from {daily['wind_direction_10m_dominant'][i]}{units['wind_direction_10m_dominant']}",
f" Sunrise: {daily['sunrise'][i]} Sunset: {daily['sunset'][i]}",
]
sections.append("\n".join(section))
return "\n".join(sections)
except Exception as e:
logger.error("Error in get_forecast(%s, %s, days=%s): %s", latitude, longitude, days, e, exc_info=True)
return f"Error fetching forecast: {type(e).__name__}: {e}"
# ---------------------------------------------------------------------------
# Entrypoints
# ---------------------------------------------------------------------------
def main():
logger.info("Starting open_meteo_mcp on streamable-http")
mcp.run(transport="streamable-http")
def main_stdio():
logger.info("Starting open_meteo_mcp via stdio")
mcp.run(transport="stdio")
if __name__ == "__main__":
main()

941
mcps/open_meteo_mcp/poetry.lock generated Normal file
View File

@ -0,0 +1,941 @@
# This file is automatically @generated by Poetry 2.1.4 and should not be changed by hand.
[[package]]
name = "annotated-types"
version = "0.7.0"
description = "Reusable constraint types to use with typing.Annotated"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"},
{file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"},
]
[[package]]
name = "anyio"
version = "4.13.0"
description = "High-level concurrency and networking framework on top of asyncio or Trio"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "anyio-4.13.0-py3-none-any.whl", hash = "sha256:08b310f9e24a9594186fd75b4f73f4a4152069e3853f1ed8bfbf58369f4ad708"},
{file = "anyio-4.13.0.tar.gz", hash = "sha256:334b70e641fd2221c1505b3890c69882fe4a2df910cba14d97019b90b24439dc"},
]
[package.dependencies]
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""}
[package.extras]
trio = ["trio (>=0.32.0)"]
[[package]]
name = "attrs"
version = "26.1.0"
description = "Classes Without Boilerplate"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "attrs-26.1.0-py3-none-any.whl", hash = "sha256:c647aa4a12dfbad9333ca4e71fe62ddc36f4e63b2d260a37a8b83d2f043ac309"},
{file = "attrs-26.1.0.tar.gz", hash = "sha256:d03ceb89cb322a8fd706d4fb91940737b6642aa36998fe130a9bc96c985eff32"},
]
[[package]]
name = "certifi"
version = "2026.2.25"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "certifi-2026.2.25-py3-none-any.whl", hash = "sha256:027692e4402ad994f1c42e52a4997a9763c646b73e4096e4d5d6db8af1d6f0fa"},
{file = "certifi-2026.2.25.tar.gz", hash = "sha256:e887ab5cee78ea814d3472169153c2d12cd43b14bd03329a39a9c6e2e80bfba7"},
]
[[package]]
name = "cffi"
version = "2.0.0"
description = "Foreign Function Interface for Python calling C code."
optional = false
python-versions = ">=3.9"
groups = ["main"]
markers = "platform_python_implementation != \"PyPy\""
files = [
{file = "cffi-2.0.0-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:0cf2d91ecc3fcc0625c2c530fe004f82c110405f101548512cce44322fa8ac44"},
{file = "cffi-2.0.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:f73b96c41e3b2adedc34a7356e64c8eb96e03a3782b535e043a986276ce12a49"},
{file = "cffi-2.0.0-cp310-cp310-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:53f77cbe57044e88bbd5ed26ac1d0514d2acf0591dd6bb02a3ae37f76811b80c"},
{file = "cffi-2.0.0-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3e837e369566884707ddaf85fc1744b47575005c0a229de3327f8f9a20f4efeb"},
{file = "cffi-2.0.0-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:5eda85d6d1879e692d546a078b44251cdd08dd1cfb98dfb77b670c97cee49ea0"},
{file = "cffi-2.0.0-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9332088d75dc3241c702d852d4671613136d90fa6881da7d770a483fd05248b4"},
{file = "cffi-2.0.0-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fc7de24befaeae77ba923797c7c87834c73648a05a4bde34b3b7e5588973a453"},
{file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:cf364028c016c03078a23b503f02058f1814320a56ad535686f90565636a9495"},
{file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:e11e82b744887154b182fd3e7e8512418446501191994dbf9c9fc1f32cc8efd5"},
{file = "cffi-2.0.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8ea985900c5c95ce9db1745f7933eeef5d314f0565b27625d9a10ec9881e1bfb"},
{file = "cffi-2.0.0-cp310-cp310-win32.whl", hash = "sha256:1f72fb8906754ac8a2cc3f9f5aaa298070652a0ffae577e0ea9bd480dc3c931a"},
{file = "cffi-2.0.0-cp310-cp310-win_amd64.whl", hash = "sha256:b18a3ed7d5b3bd8d9ef7a8cb226502c6bf8308df1525e1cc676c3680e7176739"},
{file = "cffi-2.0.0-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:b4c854ef3adc177950a8dfc81a86f5115d2abd545751a304c5bcf2c2c7283cfe"},
{file = "cffi-2.0.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2de9a304e27f7596cd03d16f1b7c72219bd944e99cc52b84d0145aefb07cbd3c"},
{file = "cffi-2.0.0-cp311-cp311-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:baf5215e0ab74c16e2dd324e8ec067ef59e41125d3eade2b863d294fd5035c92"},
{file = "cffi-2.0.0-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:730cacb21e1bdff3ce90babf007d0a0917cc3e6492f336c2f0134101e0944f93"},
{file = "cffi-2.0.0-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:6824f87845e3396029f3820c206e459ccc91760e8fa24422f8b0c3d1731cbec5"},
{file = "cffi-2.0.0-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:9de40a7b0323d889cf8d23d1ef214f565ab154443c42737dfe52ff82cf857664"},
{file = "cffi-2.0.0-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8941aaadaf67246224cee8c3803777eed332a19d909b47e29c9842ef1e79ac26"},
{file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:a05d0c237b3349096d3981b727493e22147f934b20f6f125a3eba8f994bec4a9"},
{file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:94698a9c5f91f9d138526b48fe26a199609544591f859c870d477351dc7b2414"},
{file = "cffi-2.0.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:5fed36fccc0612a53f1d4d9a816b50a36702c28a2aa880cb8a122b3466638743"},
{file = "cffi-2.0.0-cp311-cp311-win32.whl", hash = "sha256:c649e3a33450ec82378822b3dad03cc228b8f5963c0c12fc3b1e0ab940f768a5"},
{file = "cffi-2.0.0-cp311-cp311-win_amd64.whl", hash = "sha256:66f011380d0e49ed280c789fbd08ff0d40968ee7b665575489afa95c98196ab5"},
{file = "cffi-2.0.0-cp311-cp311-win_arm64.whl", hash = "sha256:c6638687455baf640e37344fe26d37c404db8b80d037c3d29f58fe8d1c3b194d"},
{file = "cffi-2.0.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:6d02d6655b0e54f54c4ef0b94eb6be0607b70853c45ce98bd278dc7de718be5d"},
{file = "cffi-2.0.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8eca2a813c1cb7ad4fb74d368c2ffbbb4789d377ee5bb8df98373c2cc0dee76c"},
{file = "cffi-2.0.0-cp312-cp312-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:21d1152871b019407d8ac3985f6775c079416c282e431a4da6afe7aefd2bccbe"},
{file = "cffi-2.0.0-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:b21e08af67b8a103c71a250401c78d5e0893beff75e28c53c98f4de42f774062"},
{file = "cffi-2.0.0-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:1e3a615586f05fc4065a8b22b8152f0c1b00cdbc60596d187c2a74f9e3036e4e"},
{file = "cffi-2.0.0-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:81afed14892743bbe14dacb9e36d9e0e504cd204e0b165062c488942b9718037"},
{file = "cffi-2.0.0-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:3e17ed538242334bf70832644a32a7aae3d83b57567f9fd60a26257e992b79ba"},
{file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:3925dd22fa2b7699ed2617149842d2e6adde22b262fcbfada50e3d195e4b3a94"},
{file = "cffi-2.0.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2c8f814d84194c9ea681642fd164267891702542f028a15fc97d4674b6206187"},
{file = "cffi-2.0.0-cp312-cp312-win32.whl", hash = "sha256:da902562c3e9c550df360bfa53c035b2f241fed6d9aef119048073680ace4a18"},
{file = "cffi-2.0.0-cp312-cp312-win_amd64.whl", hash = "sha256:da68248800ad6320861f129cd9c1bf96ca849a2771a59e0344e88681905916f5"},
{file = "cffi-2.0.0-cp312-cp312-win_arm64.whl", hash = "sha256:4671d9dd5ec934cb9a73e7ee9676f9362aba54f7f34910956b84d727b0d73fb6"},
{file = "cffi-2.0.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:00bdf7acc5f795150faa6957054fbbca2439db2f775ce831222b66f192f03beb"},
{file = "cffi-2.0.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:45d5e886156860dc35862657e1494b9bae8dfa63bf56796f2fb56e1679fc0bca"},
{file = "cffi-2.0.0-cp313-cp313-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:07b271772c100085dd28b74fa0cd81c8fb1a3ba18b21e03d7c27f3436a10606b"},
{file = "cffi-2.0.0-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d48a880098c96020b02d5a1f7d9251308510ce8858940e6fa99ece33f610838b"},
{file = "cffi-2.0.0-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f93fd8e5c8c0a4aa1f424d6173f14a892044054871c771f8566e4008eaa359d2"},
{file = "cffi-2.0.0-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:dd4f05f54a52fb558f1ba9f528228066954fee3ebe629fc1660d874d040ae5a3"},
{file = "cffi-2.0.0-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c8d3b5532fc71b7a77c09192b4a5a200ea992702734a2e9279a37f2478236f26"},
{file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d9b29c1f0ae438d5ee9acb31cadee00a58c46cc9c0b2f9038c6b0b3470877a8c"},
{file = "cffi-2.0.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:6d50360be4546678fc1b79ffe7a66265e28667840010348dd69a314145807a1b"},
{file = "cffi-2.0.0-cp313-cp313-win32.whl", hash = "sha256:74a03b9698e198d47562765773b4a8309919089150a0bb17d829ad7b44b60d27"},
{file = "cffi-2.0.0-cp313-cp313-win_amd64.whl", hash = "sha256:19f705ada2530c1167abacb171925dd886168931e0a7b78f5bffcae5c6b5be75"},
{file = "cffi-2.0.0-cp313-cp313-win_arm64.whl", hash = "sha256:256f80b80ca3853f90c21b23ee78cd008713787b1b1e93eae9f3d6a7134abd91"},
{file = "cffi-2.0.0-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:fc33c5141b55ed366cfaad382df24fe7dcbc686de5be719b207bb248e3053dc5"},
{file = "cffi-2.0.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c654de545946e0db659b3400168c9ad31b5d29593291482c43e3564effbcee13"},
{file = "cffi-2.0.0-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:24b6f81f1983e6df8db3adc38562c83f7d4a0c36162885ec7f7b77c7dcbec97b"},
{file = "cffi-2.0.0-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:12873ca6cb9b0f0d3a0da705d6086fe911591737a59f28b7936bdfed27c0d47c"},
{file = "cffi-2.0.0-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:d9b97165e8aed9272a6bb17c01e3cc5871a594a446ebedc996e2397a1c1ea8ef"},
{file = "cffi-2.0.0-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:afb8db5439b81cf9c9d0c80404b60c3cc9c3add93e114dcae767f1477cb53775"},
{file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:737fe7d37e1a1bffe70bd5754ea763a62a066dc5913ca57e957824b72a85e205"},
{file = "cffi-2.0.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:38100abb9d1b1435bc4cc340bb4489635dc2f0da7456590877030c9b3d40b0c1"},
{file = "cffi-2.0.0-cp314-cp314-win32.whl", hash = "sha256:087067fa8953339c723661eda6b54bc98c5625757ea62e95eb4898ad5e776e9f"},
{file = "cffi-2.0.0-cp314-cp314-win_amd64.whl", hash = "sha256:203a48d1fb583fc7d78a4c6655692963b860a417c0528492a6bc21f1aaefab25"},
{file = "cffi-2.0.0-cp314-cp314-win_arm64.whl", hash = "sha256:dbd5c7a25a7cb98f5ca55d258b103a2054f859a46ae11aaf23134f9cc0d356ad"},
{file = "cffi-2.0.0-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:9a67fc9e8eb39039280526379fb3a70023d77caec1852002b4da7e8b270c4dd9"},
{file = "cffi-2.0.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:7a66c7204d8869299919db4d5069a82f1561581af12b11b3c9f48c584eb8743d"},
{file = "cffi-2.0.0-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7cc09976e8b56f8cebd752f7113ad07752461f48a58cbba644139015ac24954c"},
{file = "cffi-2.0.0-cp314-cp314t-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:92b68146a71df78564e4ef48af17551a5ddd142e5190cdf2c5624d0c3ff5b2e8"},
{file = "cffi-2.0.0-cp314-cp314t-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b1e74d11748e7e98e2f426ab176d4ed720a64412b6a15054378afdb71e0f37dc"},
{file = "cffi-2.0.0-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:28a3a209b96630bca57cce802da70c266eb08c6e97e5afd61a75611ee6c64592"},
{file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:7553fb2090d71822f02c629afe6042c299edf91ba1bf94951165613553984512"},
{file = "cffi-2.0.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:6c6c373cfc5c83a975506110d17457138c8c63016b563cc9ed6e056a82f13ce4"},
{file = "cffi-2.0.0-cp314-cp314t-win32.whl", hash = "sha256:1fc9ea04857caf665289b7a75923f2c6ed559b8298a1b8c49e59f7dd95c8481e"},
{file = "cffi-2.0.0-cp314-cp314t-win_amd64.whl", hash = "sha256:d68b6cef7827e8641e8ef16f4494edda8b36104d79773a334beaa1e3521430f6"},
{file = "cffi-2.0.0-cp314-cp314t-win_arm64.whl", hash = "sha256:0a1527a803f0a659de1af2e1fd700213caba79377e27e4693648c2923da066f9"},
{file = "cffi-2.0.0-cp39-cp39-macosx_10_13_x86_64.whl", hash = "sha256:fe562eb1a64e67dd297ccc4f5addea2501664954f2692b69a76449ec7913ecbf"},
{file = "cffi-2.0.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:de8dad4425a6ca6e4e5e297b27b5c824ecc7581910bf9aee86cb6835e6812aa7"},
{file = "cffi-2.0.0-cp39-cp39-manylinux1_i686.manylinux2014_i686.manylinux_2_17_i686.manylinux_2_5_i686.whl", hash = "sha256:4647afc2f90d1ddd33441e5b0e85b16b12ddec4fca55f0d9671fef036ecca27c"},
{file = "cffi-2.0.0-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3f4d46d8b35698056ec29bca21546e1551a205058ae1a181d871e278b0b28165"},
{file = "cffi-2.0.0-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:e6e73b9e02893c764e7e8d5bb5ce277f1a009cd5243f8228f75f842bf937c534"},
{file = "cffi-2.0.0-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:cb527a79772e5ef98fb1d700678fe031e353e765d1ca2d409c92263c6d43e09f"},
{file = "cffi-2.0.0-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:61d028e90346df14fedc3d1e5441df818d095f3b87d286825dfcbd6459b7ef63"},
{file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:0f6084a0ea23d05d20c3edcda20c3d006f9b6f3fefeac38f59262e10cef47ee2"},
{file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:1cd13c99ce269b3ed80b417dcd591415d3372bcac067009b6e0f59c7d4015e65"},
{file = "cffi-2.0.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:89472c9762729b5ae1ad974b777416bfda4ac5642423fa93bd57a09204712322"},
{file = "cffi-2.0.0-cp39-cp39-win32.whl", hash = "sha256:2081580ebb843f759b9f617314a24ed5738c51d2aee65d31e02f6f7a2b97707a"},
{file = "cffi-2.0.0-cp39-cp39-win_amd64.whl", hash = "sha256:b882b3df248017dba09d6b16defe9b5c407fe32fc7c65a9c69798e6175601be9"},
{file = "cffi-2.0.0.tar.gz", hash = "sha256:44d1b5909021139fe36001ae048dbdde8214afa20200eda0f64c068cac5d5529"},
]
[package.dependencies]
pycparser = {version = "*", markers = "implementation_name != \"PyPy\""}
[[package]]
name = "click"
version = "8.3.2"
description = "Composable command line interface toolkit"
optional = false
python-versions = ">=3.10"
groups = ["main"]
markers = "sys_platform != \"emscripten\""
files = [
{file = "click-8.3.2-py3-none-any.whl", hash = "sha256:1924d2c27c5653561cd2cae4548d1406039cb79b858b747cfea24924bbc1616d"},
{file = "click-8.3.2.tar.gz", hash = "sha256:14162b8b3b3550a7d479eafa77dfd3c38d9dc8951f6f69c78913a8f9a7540fd5"},
]
[package.dependencies]
colorama = {version = "*", markers = "platform_system == \"Windows\""}
[[package]]
name = "colorama"
version = "0.4.6"
description = "Cross-platform colored terminal text."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
groups = ["main"]
markers = "sys_platform != \"emscripten\" and platform_system == \"Windows\""
files = [
{file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
{file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
]
[[package]]
name = "cryptography"
version = "46.0.6"
description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
optional = false
python-versions = "!=3.9.0,!=3.9.1,>=3.8"
groups = ["main"]
files = [
{file = "cryptography-46.0.6-cp311-abi3-macosx_10_9_universal2.whl", hash = "sha256:64235194bad039a10bb6d2d930ab3323baaec67e2ce36215fd0952fad0930ca8"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:26031f1e5ca62fcb9d1fcb34b2b60b390d1aacaa15dc8b895a9ed00968b97b30"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9a693028b9cbe51b5a1136232ee8f2bc242e4e19d456ded3fa7c86e43c713b4a"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:67177e8a9f421aa2d3a170c3e56eca4e0128883cf52a071a7cbf53297f18b175"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:d9528b535a6c4f8ff37847144b8986a9a143585f0540fbcb1a98115b543aa463"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:22259338084d6ae497a19bae5d4c66b7ca1387d3264d1c2c0e72d9e9b6a77b97"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:760997a4b950ff00d418398ad73fbc91aa2894b5c1db7ccb45b4f68b42a63b3c"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:3dfa6567f2e9e4c5dceb8ccb5a708158a2a871052fa75c8b78cb0977063f1507"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:cdcd3edcbc5d55757e5f5f3d330dd00007ae463a7e7aa5bf132d1f22a4b62b19"},
{file = "cryptography-46.0.6-cp311-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:d4e4aadb7fc1f88687f47ca20bb7227981b03afaae69287029da08096853b738"},
{file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:2b417edbe8877cda9022dde3a008e2deb50be9c407eef034aeeb3a8b11d9db3c"},
{file = "cryptography-46.0.6-cp311-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:380343e0653b1c9d7e1f55b52aaa2dbb2fdf2730088d48c43ca1c7c0abb7cc2f"},
{file = "cryptography-46.0.6-cp311-abi3-win32.whl", hash = "sha256:bcb87663e1f7b075e48c3be3ecb5f0b46c8fc50b50a97cf264e7f60242dca3f2"},
{file = "cryptography-46.0.6-cp311-abi3-win_amd64.whl", hash = "sha256:6739d56300662c468fddb0e5e291f9b4d084bead381667b9e654c7dd81705124"},
{file = "cryptography-46.0.6-cp314-cp314t-macosx_10_9_universal2.whl", hash = "sha256:2ef9e69886cbb137c2aef9772c2e7138dc581fad4fcbcf13cc181eb5a3ab6275"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:7f417f034f91dcec1cb6c5c35b07cdbb2ef262557f701b4ecd803ee8cefed4f4"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d24c13369e856b94892a89ddf70b332e0b70ad4a5c43cf3e9cb71d6d7ffa1f7b"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_aarch64.whl", hash = "sha256:aad75154a7ac9039936d50cf431719a2f8d4ed3d3c277ac03f3339ded1a5e707"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_ppc64le.whl", hash = "sha256:3c21d92ed15e9cfc6eb64c1f5a0326db22ca9c2566ca46d845119b45b4400361"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_28_x86_64.whl", hash = "sha256:4668298aef7cddeaf5c6ecc244c2302a2b8e40f384255505c22875eebb47888b"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_31_armv7l.whl", hash = "sha256:8ce35b77aaf02f3b59c90b2c8a05c73bac12cea5b4e8f3fbece1f5fddea5f0ca"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_aarch64.whl", hash = "sha256:c89eb37fae9216985d8734c1afd172ba4927f5a05cfd9bf0e4863c6d5465b013"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_ppc64le.whl", hash = "sha256:ed418c37d095aeddf5336898a132fba01091f0ac5844e3e8018506f014b6d2c4"},
{file = "cryptography-46.0.6-cp314-cp314t-manylinux_2_34_x86_64.whl", hash = "sha256:69cf0056d6947edc6e6760e5f17afe4bea06b56a9ac8a06de9d2bd6b532d4f3a"},
{file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:8e7304c4f4e9490e11efe56af6713983460ee0780f16c63f219984dab3af9d2d"},
{file = "cryptography-46.0.6-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b928a3ca837c77a10e81a814a693f2295200adb3352395fad024559b7be7a736"},
{file = "cryptography-46.0.6-cp314-cp314t-win32.whl", hash = "sha256:97c8115b27e19e592a05c45d0dd89c57f81f841cc9880e353e0d3bf25b2139ed"},
{file = "cryptography-46.0.6-cp314-cp314t-win_amd64.whl", hash = "sha256:c797e2517cb7880f8297e2c0f43bb910e91381339336f75d2c1c2cbf811b70b4"},
{file = "cryptography-46.0.6-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:12cae594e9473bca1a7aceb90536060643128bb274fcea0fc459ab90f7d1ae7a"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:639301950939d844a9e1c4464d7e07f902fe9a7f6b215bb0d4f28584729935d8"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:ed3775295fb91f70b4027aeba878d79b3e55c0b3e97eaa4de71f8f23a9f2eb77"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:8927ccfbe967c7df312ade694f987e7e9e22b2425976ddbf28271d7e58845290"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_ppc64le.whl", hash = "sha256:b12c6b1e1651e42ab5de8b1e00dc3b6354fdfd778e7fa60541ddacc27cd21410"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:063b67749f338ca9c5a0b7fe438a52c25f9526b851e24e6c9310e7195aad3b4d"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_31_armv7l.whl", hash = "sha256:02fad249cb0e090b574e30b276a3da6a149e04ee2f049725b1f69e7b8351ec70"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_aarch64.whl", hash = "sha256:7e6142674f2a9291463e5e150090b95a8519b2fb6e6aaec8917dd8d094ce750d"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_ppc64le.whl", hash = "sha256:456b3215172aeefb9284550b162801d62f5f264a081049a3e94307fe20792cfa"},
{file = "cryptography-46.0.6-cp38-abi3-manylinux_2_34_x86_64.whl", hash = "sha256:341359d6c9e68834e204ceaf25936dffeafea3829ab80e9503860dcc4f4dac58"},
{file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_aarch64.whl", hash = "sha256:9a9c42a2723999a710445bc0d974e345c32adfd8d2fac6d8a251fa829ad31cfb"},
{file = "cryptography-46.0.6-cp38-abi3-musllinux_1_2_x86_64.whl", hash = "sha256:6617f67b1606dfd9fe4dbfa354a9508d4a6d37afe30306fe6c101b7ce3274b72"},
{file = "cryptography-46.0.6-cp38-abi3-win32.whl", hash = "sha256:7f6690b6c55e9c5332c0b59b9c8a3fb232ebf059094c17f9019a51e9827df91c"},
{file = "cryptography-46.0.6-cp38-abi3-win_amd64.whl", hash = "sha256:79e865c642cfc5c0b3eb12af83c35c5aeff4fa5c672dc28c43721c2c9fdd2f0f"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:2ea0f37e9a9cf0df2952893ad145fd9627d326a59daec9b0802480fa3bcd2ead"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:a3e84d5ec9ba01f8fd03802b2147ba77f0c8f2617b2aff254cedd551844209c8"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:12f0fa16cc247b13c43d56d7b35287ff1569b5b1f4c5e87e92cc4fcc00cd10c0"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_aarch64.whl", hash = "sha256:50575a76e2951fe7dbd1f56d181f8c5ceeeb075e9ff88e7ad997d2f42af06e7b"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-manylinux_2_34_x86_64.whl", hash = "sha256:90e5f0a7b3be5f40c3a0a0eafb32c681d8d2c181fc2a1bdabe9b3f611d9f6b1a"},
{file = "cryptography-46.0.6-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:6728c49e3b2c180ef26f8e9f0a883a2c585638db64cf265b49c9ba10652d430e"},
{file = "cryptography-46.0.6.tar.gz", hash = "sha256:27550628a518c5c6c903d84f637fbecf287f6cb9ced3804838a1295dc1fd0759"},
]
[package.dependencies]
cffi = {version = ">=2.0.0", markers = "python_full_version >= \"3.9.0\" and platform_python_implementation != \"PyPy\""}
typing-extensions = {version = ">=4.13.2", markers = "python_full_version < \"3.11.0\""}
[package.extras]
docs = ["sphinx (>=5.3.0)", "sphinx-inline-tabs", "sphinx-rtd-theme (>=3.0.0)"]
docstest = ["pyenchant (>=3)", "readme-renderer (>=30.0)", "sphinxcontrib-spelling (>=7.3.1)"]
nox = ["nox[uv] (>=2024.4.15)"]
pep8test = ["check-sdist", "click (>=8.0.1)", "mypy (>=1.14)", "ruff (>=0.11.11)"]
sdist = ["build (>=1.0.0)"]
ssh = ["bcrypt (>=3.1.5)"]
test = ["certifi (>=2024)", "cryptography-vectors (==46.0.6)", "pretend (>=0.7)", "pytest (>=7.4.0)", "pytest-benchmark (>=4.0)", "pytest-cov (>=2.10.1)", "pytest-xdist (>=3.5.0)"]
test-randomorder = ["pytest-randomly"]
[[package]]
name = "exceptiongroup"
version = "1.3.1"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
groups = ["main"]
markers = "python_version == \"3.10\""
files = [
{file = "exceptiongroup-1.3.1-py3-none-any.whl", hash = "sha256:a7a39a3bd276781e98394987d3a5701d0c4edffb633bb7a5144577f82c773598"},
{file = "exceptiongroup-1.3.1.tar.gz", hash = "sha256:8b412432c6055b0b7d14c310000ae93352ed6754f70fa8f7c34141f91c4e3219"},
]
[package.dependencies]
typing-extensions = {version = ">=4.6.0", markers = "python_version < \"3.13\""}
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "h11"
version = "0.16.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"},
{file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"},
]
[[package]]
name = "httpcore"
version = "1.0.9"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55"},
{file = "httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8"},
]
[package.dependencies]
certifi = "*"
h11 = ">=0.16"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<1.0)"]
[[package]]
name = "httpx"
version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
{file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
[package.extras]
brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "httpx-sse"
version = "0.4.3"
description = "Consume Server-Sent Event (SSE) messages with HTTPX."
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "httpx_sse-0.4.3-py3-none-any.whl", hash = "sha256:0ac1c9fe3c0afad2e0ebb25a934a59f4c7823b60792691f779fad2c5568830fc"},
{file = "httpx_sse-0.4.3.tar.gz", hash = "sha256:9b1ed0127459a66014aec3c56bebd93da3c1bc8bb6618c8082039a44889a755d"},
]
[[package]]
name = "idna"
version = "3.11"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "idna-3.11-py3-none-any.whl", hash = "sha256:771a87f49d9defaf64091e6e6fe9c18d4833f140bd19464795bc32d966ca37ea"},
{file = "idna-3.11.tar.gz", hash = "sha256:795dafcc9c04ed0c1fb032c2aa73654d8e8c5023a7df64a53f39190ada629902"},
]
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "jsonschema"
version = "4.26.0"
description = "An implementation of JSON Schema validation for Python"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "jsonschema-4.26.0-py3-none-any.whl", hash = "sha256:d489f15263b8d200f8387e64b4c3a75f06629559fb73deb8fdfb525f2dab50ce"},
{file = "jsonschema-4.26.0.tar.gz", hash = "sha256:0c26707e2efad8aa1bfc5b7ce170f3fccc2e4918ff85989ba9ffa9facb2be326"},
]
[package.dependencies]
attrs = ">=22.2.0"
jsonschema-specifications = ">=2023.03.6"
referencing = ">=0.28.4"
rpds-py = ">=0.25.0"
[package.extras]
format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "rfc3987-syntax (>=1.1.0)", "uri-template", "webcolors (>=24.6.0)"]
[[package]]
name = "jsonschema-specifications"
version = "2025.9.1"
description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "jsonschema_specifications-2025.9.1-py3-none-any.whl", hash = "sha256:98802fee3a11ee76ecaca44429fda8a41bff98b00a0f2838151b113f210cc6fe"},
{file = "jsonschema_specifications-2025.9.1.tar.gz", hash = "sha256:b540987f239e745613c7a9176f3edb72b832a4ac465cf02712288397832b5e8d"},
]
[package.dependencies]
referencing = ">=0.31.0"
[[package]]
name = "mcp"
version = "1.27.0"
description = "Model Context Protocol SDK"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "mcp-1.27.0-py3-none-any.whl", hash = "sha256:5ce1fa81614958e267b21fb2aa34e0aea8e2c6ede60d52aba45fd47246b4d741"},
{file = "mcp-1.27.0.tar.gz", hash = "sha256:d3dc35a7eec0d458c1da4976a48f982097ddaab87e278c5511d5a4a56e852b83"},
]
[package.dependencies]
anyio = ">=4.5"
httpx = ">=0.27.1"
httpx-sse = ">=0.4"
jsonschema = ">=4.20.0"
pydantic = ">=2.11.0,<3.0.0"
pydantic-settings = ">=2.5.2"
pyjwt = {version = ">=2.10.1", extras = ["crypto"]}
python-multipart = ">=0.0.9"
pywin32 = {version = ">=310", markers = "sys_platform == \"win32\""}
sse-starlette = ">=1.6.1"
starlette = ">=0.27"
typing-extensions = ">=4.9.0"
typing-inspection = ">=0.4.1"
uvicorn = {version = ">=0.31.1", markers = "sys_platform != \"emscripten\""}
[package.extras]
cli = ["python-dotenv (>=1.0.0)", "typer (>=0.16.0)"]
rich = ["rich (>=13.9.4)"]
ws = ["websockets (>=15.0.1)"]
[[package]]
name = "pycparser"
version = "3.0"
description = "C parser in Python"
optional = false
python-versions = ">=3.10"
groups = ["main"]
markers = "platform_python_implementation != \"PyPy\" and implementation_name != \"PyPy\""
files = [
{file = "pycparser-3.0-py3-none-any.whl", hash = "sha256:b727414169a36b7d524c1c3e31839a521725078d7b2ff038656844266160a992"},
{file = "pycparser-3.0.tar.gz", hash = "sha256:600f49d217304a5902ac3c37e1281c9fe94e4d0489de643a9504c5cdfdfc6b29"},
]
[[package]]
name = "pydantic"
version = "2.12.5"
description = "Data validation using Python type hints"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d"},
{file = "pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49"},
]
[package.dependencies]
annotated-types = ">=0.6.0"
pydantic-core = "2.41.5"
typing-extensions = ">=4.14.1"
typing-inspection = ">=0.4.2"
[package.extras]
email = ["email-validator (>=2.0.0)"]
timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""]
[[package]]
name = "pydantic-core"
version = "2.41.5"
description = "Core functionality for Pydantic validation and serialization"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "pydantic_core-2.41.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:77b63866ca88d804225eaa4af3e664c5faf3568cea95360d21f4725ab6e07146"},
{file = "pydantic_core-2.41.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dfa8a0c812ac681395907e71e1274819dec685fec28273a28905df579ef137e2"},
{file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5921a4d3ca3aee735d9fd163808f5e8dd6c6972101e4adbda9a4667908849b97"},
{file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e25c479382d26a2a41b7ebea1043564a937db462816ea07afa8a44c0866d52f9"},
{file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f547144f2966e1e16ae626d8ce72b4cfa0caedc7fa28052001c94fb2fcaa1c52"},
{file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6f52298fbd394f9ed112d56f3d11aabd0d5bd27beb3084cc3d8ad069483b8941"},
{file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:100baa204bb412b74fe285fb0f3a385256dad1d1879f0a5cb1499ed2e83d132a"},
{file = "pydantic_core-2.41.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05a2c8852530ad2812cb7914dc61a1125dc4e06252ee98e5638a12da6cc6fb6c"},
{file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:29452c56df2ed968d18d7e21f4ab0ac55e71dc59524872f6fc57dcf4a3249ed2"},
{file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:d5160812ea7a8a2ffbe233d8da666880cad0cbaf5d4de74ae15c313213d62556"},
{file = "pydantic_core-2.41.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:df3959765b553b9440adfd3c795617c352154e497a4eaf3752555cfb5da8fc49"},
{file = "pydantic_core-2.41.5-cp310-cp310-win32.whl", hash = "sha256:1f8d33a7f4d5a7889e60dc39856d76d09333d8a6ed0f5f1190635cbec70ec4ba"},
{file = "pydantic_core-2.41.5-cp310-cp310-win_amd64.whl", hash = "sha256:62de39db01b8d593e45871af2af9e497295db8d73b085f6bfd0b18c83c70a8f9"},
{file = "pydantic_core-2.41.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a3a52f6156e73e7ccb0f8cced536adccb7042be67cb45f9562e12b319c119da6"},
{file = "pydantic_core-2.41.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:7f3bf998340c6d4b0c9a2f02d6a400e51f123b59565d74dc60d252ce888c260b"},
{file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:378bec5c66998815d224c9ca994f1e14c0c21cb95d2f52b6021cc0b2a58f2a5a"},
{file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e7b576130c69225432866fe2f4a469a85a54ade141d96fd396dffcf607b558f8"},
{file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:6cb58b9c66f7e4179a2d5e0f849c48eff5c1fca560994d6eb6543abf955a149e"},
{file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:88942d3a3dff3afc8288c21e565e476fc278902ae4d6d134f1eeda118cc830b1"},
{file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f31d95a179f8d64d90f6831d71fa93290893a33148d890ba15de25642c5d075b"},
{file = "pydantic_core-2.41.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c1df3d34aced70add6f867a8cf413e299177e0c22660cc767218373d0779487b"},
{file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:4009935984bd36bd2c774e13f9a09563ce8de4abaa7226f5108262fa3e637284"},
{file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:34a64bc3441dc1213096a20fe27e8e128bd3ff89921706e83c0b1ac971276594"},
{file = "pydantic_core-2.41.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:c9e19dd6e28fdcaa5a1de679aec4141f691023916427ef9bae8584f9c2fb3b0e"},
{file = "pydantic_core-2.41.5-cp311-cp311-win32.whl", hash = "sha256:2c010c6ded393148374c0f6f0bf89d206bf3217f201faa0635dcd56bd1520f6b"},
{file = "pydantic_core-2.41.5-cp311-cp311-win_amd64.whl", hash = "sha256:76ee27c6e9c7f16f47db7a94157112a2f3a00e958bc626e2f4ee8bec5c328fbe"},
{file = "pydantic_core-2.41.5-cp311-cp311-win_arm64.whl", hash = "sha256:4bc36bbc0b7584de96561184ad7f012478987882ebf9f9c389b23f432ea3d90f"},
{file = "pydantic_core-2.41.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:f41a7489d32336dbf2199c8c0a215390a751c5b014c2c1c5366e817202e9cdf7"},
{file = "pydantic_core-2.41.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:070259a8818988b9a84a449a2a7337c7f430a22acc0859c6b110aa7212a6d9c0"},
{file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e96cea19e34778f8d59fe40775a7a574d95816eb150850a85a7a4c8f4b94ac69"},
{file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ed2e99c456e3fadd05c991f8f437ef902e00eedf34320ba2b0842bd1c3ca3a75"},
{file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:65840751b72fbfd82c3c640cff9284545342a4f1eb1586ad0636955b261b0b05"},
{file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e536c98a7626a98feb2d3eaf75944ef6f3dbee447e1f841eae16f2f0a72d8ddc"},
{file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eceb81a8d74f9267ef4081e246ffd6d129da5d87e37a77c9bde550cb04870c1c"},
{file = "pydantic_core-2.41.5-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d38548150c39b74aeeb0ce8ee1d8e82696f4a4e16ddc6de7b1d8823f7de4b9b5"},
{file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:c23e27686783f60290e36827f9c626e63154b82b116d7fe9adba1fda36da706c"},
{file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:482c982f814460eabe1d3bb0adfdc583387bd4691ef00b90575ca0d2b6fe2294"},
{file = "pydantic_core-2.41.5-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:bfea2a5f0b4d8d43adf9d7b8bf019fb46fdd10a2e5cde477fbcb9d1fa08c68e1"},
{file = "pydantic_core-2.41.5-cp312-cp312-win32.whl", hash = "sha256:b74557b16e390ec12dca509bce9264c3bbd128f8a2c376eaa68003d7f327276d"},
{file = "pydantic_core-2.41.5-cp312-cp312-win_amd64.whl", hash = "sha256:1962293292865bca8e54702b08a4f26da73adc83dd1fcf26fbc875b35d81c815"},
{file = "pydantic_core-2.41.5-cp312-cp312-win_arm64.whl", hash = "sha256:1746d4a3d9a794cacae06a5eaaccb4b8643a131d45fbc9af23e353dc0a5ba5c3"},
{file = "pydantic_core-2.41.5-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:941103c9be18ac8daf7b7adca8228f8ed6bb7a1849020f643b3a14d15b1924d9"},
{file = "pydantic_core-2.41.5-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:112e305c3314f40c93998e567879e887a3160bb8689ef3d2c04b6cc62c33ac34"},
{file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0cbaad15cb0c90aa221d43c00e77bb33c93e8d36e0bf74760cd00e732d10a6a0"},
{file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:03ca43e12fab6023fc79d28ca6b39b05f794ad08ec2feccc59a339b02f2b3d33"},
{file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:dc799088c08fa04e43144b164feb0c13f9a0bc40503f8df3e9fde58a3c0c101e"},
{file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97aeba56665b4c3235a0e52b2c2f5ae9cd071b8a8310ad27bddb3f7fb30e9aa2"},
{file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:406bf18d345822d6c21366031003612b9c77b3e29ffdb0f612367352aab7d586"},
{file = "pydantic_core-2.41.5-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b93590ae81f7010dbe380cdeab6f515902ebcbefe0b9327cc4804d74e93ae69d"},
{file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:01a3d0ab748ee531f4ea6c3e48ad9dac84ddba4b0d82291f87248f2f9de8d740"},
{file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:6561e94ba9dacc9c61bce40e2d6bdc3bfaa0259d3ff36ace3b1e6901936d2e3e"},
{file = "pydantic_core-2.41.5-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:915c3d10f81bec3a74fbd4faebe8391013ba61e5a1a8d48c4455b923bdda7858"},
{file = "pydantic_core-2.41.5-cp313-cp313-win32.whl", hash = "sha256:650ae77860b45cfa6e2cdafc42618ceafab3a2d9a3811fcfbd3bbf8ac3c40d36"},
{file = "pydantic_core-2.41.5-cp313-cp313-win_amd64.whl", hash = "sha256:79ec52ec461e99e13791ec6508c722742ad745571f234ea6255bed38c6480f11"},
{file = "pydantic_core-2.41.5-cp313-cp313-win_arm64.whl", hash = "sha256:3f84d5c1b4ab906093bdc1ff10484838aca54ef08de4afa9de0f5f14d69639cd"},
{file = "pydantic_core-2.41.5-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:3f37a19d7ebcdd20b96485056ba9e8b304e27d9904d233d7b1015db320e51f0a"},
{file = "pydantic_core-2.41.5-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:1d1d9764366c73f996edd17abb6d9d7649a7eb690006ab6adbda117717099b14"},
{file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25e1c2af0fce638d5f1988b686f3b3ea8cd7de5f244ca147c777769e798a9cd1"},
{file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:506d766a8727beef16b7adaeb8ee6217c64fc813646b424d0804d67c16eddb66"},
{file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4819fa52133c9aa3c387b3328f25c1facc356491e6135b459f1de698ff64d869"},
{file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2b761d210c9ea91feda40d25b4efe82a1707da2ef62901466a42492c028553a2"},
{file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22f0fb8c1c583a3b6f24df2470833b40207e907b90c928cc8d3594b76f874375"},
{file = "pydantic_core-2.41.5-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2782c870e99878c634505236d81e5443092fba820f0373997ff75f90f68cd553"},
{file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_aarch64.whl", hash = "sha256:0177272f88ab8312479336e1d777f6b124537d47f2123f89cb37e0accea97f90"},
{file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_armv7l.whl", hash = "sha256:63510af5e38f8955b8ee5687740d6ebf7c2a0886d15a6d65c32814613681bc07"},
{file = "pydantic_core-2.41.5-cp314-cp314-musllinux_1_1_x86_64.whl", hash = "sha256:e56ba91f47764cc14f1daacd723e3e82d1a89d783f0f5afe9c364b8bb491ccdb"},
{file = "pydantic_core-2.41.5-cp314-cp314-win32.whl", hash = "sha256:aec5cf2fd867b4ff45b9959f8b20ea3993fc93e63c7363fe6851424c8a7e7c23"},
{file = "pydantic_core-2.41.5-cp314-cp314-win_amd64.whl", hash = "sha256:8e7c86f27c585ef37c35e56a96363ab8de4e549a95512445b85c96d3e2f7c1bf"},
{file = "pydantic_core-2.41.5-cp314-cp314-win_arm64.whl", hash = "sha256:e672ba74fbc2dc8eea59fb6d4aed6845e6905fc2a8afe93175d94a83ba2a01a0"},
{file = "pydantic_core-2.41.5-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:8566def80554c3faa0e65ac30ab0932b9e3a5cd7f8323764303d468e5c37595a"},
{file = "pydantic_core-2.41.5-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b80aa5095cd3109962a298ce14110ae16b8c1aece8b72f9dafe81cf597ad80b3"},
{file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3006c3dd9ba34b0c094c544c6006cc79e87d8612999f1a5d43b769b89181f23c"},
{file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:72f6c8b11857a856bcfa48c86f5368439f74453563f951e473514579d44aa612"},
{file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5cb1b2f9742240e4bb26b652a5aeb840aa4b417c7748b6f8387927bc6e45e40d"},
{file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3d54f38609ff308209bd43acea66061494157703364ae40c951f83ba99a1a9"},
{file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2ff4321e56e879ee8d2a879501c8e469414d948f4aba74a2d4593184eb326660"},
{file = "pydantic_core-2.41.5-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d0d2568a8c11bf8225044aa94409e21da0cb09dcdafe9ecd10250b2baad531a9"},
{file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_aarch64.whl", hash = "sha256:a39455728aabd58ceabb03c90e12f71fd30fa69615760a075b9fec596456ccc3"},
{file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_armv7l.whl", hash = "sha256:239edca560d05757817c13dc17c50766136d21f7cd0fac50295499ae24f90fdf"},
{file = "pydantic_core-2.41.5-cp314-cp314t-musllinux_1_1_x86_64.whl", hash = "sha256:2a5e06546e19f24c6a96a129142a75cee553cc018ffee48a460059b1185f4470"},
{file = "pydantic_core-2.41.5-cp314-cp314t-win32.whl", hash = "sha256:b4ececa40ac28afa90871c2cc2b9ffd2ff0bf749380fbdf57d165fd23da353aa"},
{file = "pydantic_core-2.41.5-cp314-cp314t-win_amd64.whl", hash = "sha256:80aa89cad80b32a912a65332f64a4450ed00966111b6615ca6816153d3585a8c"},
{file = "pydantic_core-2.41.5-cp314-cp314t-win_arm64.whl", hash = "sha256:35b44f37a3199f771c3eaa53051bc8a70cd7b54f333531c59e29fd4db5d15008"},
{file = "pydantic_core-2.41.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:8bfeaf8735be79f225f3fefab7f941c712aaca36f1128c9d7e2352ee1aa87bdf"},
{file = "pydantic_core-2.41.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:346285d28e4c8017da95144c7f3acd42740d637ff41946af5ce6e5e420502dd5"},
{file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a75dafbf87d6276ddc5b2bf6fae5254e3d0876b626eb24969a574fff9149ee5d"},
{file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7b93a4d08587e2b7e7882de461e82b6ed76d9026ce91ca7915e740ecc7855f60"},
{file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e8465ab91a4bd96d36dde3263f06caa6a8a6019e4113f24dc753d79a8b3a3f82"},
{file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:299e0a22e7ae2b85c1a57f104538b2656e8ab1873511fd718a1c1c6f149b77b5"},
{file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:707625ef0983fcfb461acfaf14de2067c5942c6bb0f3b4c99158bed6fedd3cf3"},
{file = "pydantic_core-2.41.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:f41eb9797986d6ebac5e8edff36d5cef9de40def462311b3eb3eeded1431e425"},
{file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:0384e2e1021894b1ff5a786dbf94771e2986ebe2869533874d7e43bc79c6f504"},
{file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:f0cd744688278965817fd0839c4a4116add48d23890d468bc436f78beb28abf5"},
{file = "pydantic_core-2.41.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:753e230374206729bf0a807954bcc6c150d3743928a73faffee51ac6557a03c3"},
{file = "pydantic_core-2.41.5-cp39-cp39-win32.whl", hash = "sha256:873e0d5b4fb9b89ef7c2d2a963ea7d02879d9da0da8d9d4933dee8ee86a8b460"},
{file = "pydantic_core-2.41.5-cp39-cp39-win_amd64.whl", hash = "sha256:e4f4a984405e91527a0d62649ee21138f8e3d0ef103be488c1dc11a80d7f184b"},
{file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_10_12_x86_64.whl", hash = "sha256:b96d5f26b05d03cc60f11a7761a5ded1741da411e7fe0909e27a5e6a0cb7b034"},
{file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-macosx_11_0_arm64.whl", hash = "sha256:634e8609e89ceecea15e2d61bc9ac3718caaaa71963717bf3c8f38bfde64242c"},
{file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:93e8740d7503eb008aa2df04d3b9735f845d43ae845e6dcd2be0b55a2da43cd2"},
{file = "pydantic_core-2.41.5-graalpy311-graalpy242_311_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f15489ba13d61f670dcc96772e733aad1a6f9c429cc27574c6cdaed82d0146ad"},
{file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_10_12_x86_64.whl", hash = "sha256:7da7087d756b19037bc2c06edc6c170eeef3c3bafcb8f532ff17d64dc427adfd"},
{file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-macosx_11_0_arm64.whl", hash = "sha256:aabf5777b5c8ca26f7824cb4a120a740c9588ed58df9b2d196ce92fba42ff8dc"},
{file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c007fe8a43d43b3969e8469004e9845944f1a80e6acd47c150856bb87f230c56"},
{file = "pydantic_core-2.41.5-graalpy312-graalpy250_312_native-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:76d0819de158cd855d1cbb8fcafdf6f5cf1eb8e470abe056d5d161106e38062b"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b5819cd790dbf0c5eb9f82c73c16b39a65dd6dd4d1439dcdea7816ec9adddab8"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:5a4e67afbc95fa5c34cf27d9089bca7fcab4e51e57278d710320a70b956d1b9a"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ece5c59f0ce7d001e017643d8d24da587ea1f74f6993467d85ae8a5ef9d4f42b"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16f80f7abe3351f8ea6858914ddc8c77e02578544a0ebc15b4c2e1a0e813b0b2"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:33cb885e759a705b426baada1fe68cbb0a2e68e34c5d0d0289a364cf01709093"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:c8d8b4eb992936023be7dee581270af5c6e0697a8559895f527f5b7105ecd36a"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:242a206cd0318f95cd21bdacff3fcc3aab23e79bba5cac3db5a841c9ef9c6963"},
{file = "pydantic_core-2.41.5-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:d3a978c4f57a597908b7e697229d996d77a6d3c94901e9edee593adada95ce1a"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b2379fa7ed44ddecb5bfe4e48577d752db9fc10be00a6b7446e9663ba143de26"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:266fb4cbf5e3cbd0b53669a6d1b039c45e3ce651fd5442eff4d07c2cc8d66808"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58133647260ea01e4d0500089a8c4f07bd7aa6ce109682b1426394988d8aaacc"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:287dad91cfb551c363dc62899a80e9e14da1f0e2b6ebde82c806612ca2a13ef1"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:03b77d184b9eb40240ae9fd676ca364ce1085f203e1b1256f8ab9984dca80a84"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:a668ce24de96165bb239160b3d854943128f4334822900534f2fe947930e5770"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f14f8f046c14563f8eb3f45f499cc658ab8d10072961e07225e507adb700e93f"},
{file = "pydantic_core-2.41.5-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:56121965f7a4dc965bff783d70b907ddf3d57f6eba29b6d2e5dabfaf07799c51"},
{file = "pydantic_core-2.41.5.tar.gz", hash = "sha256:08daa51ea16ad373ffd5e7606252cc32f07bc72b28284b6bc9c6df804816476e"},
]
[package.dependencies]
typing-extensions = ">=4.14.1"
[[package]]
name = "pydantic-settings"
version = "2.13.1"
description = "Settings management using Pydantic"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "pydantic_settings-2.13.1-py3-none-any.whl", hash = "sha256:d56fd801823dbeae7f0975e1f8c8e25c258eb75d278ea7abb5d9cebb01b56237"},
{file = "pydantic_settings-2.13.1.tar.gz", hash = "sha256:b4c11847b15237fb0171e1462bf540e294affb9b86db4d9aa5c01730bdbe4025"},
]
[package.dependencies]
pydantic = ">=2.7.0"
python-dotenv = ">=0.21.0"
typing-inspection = ">=0.4.0"
[package.extras]
aws-secrets-manager = ["boto3 (>=1.35.0)", "boto3-stubs[secretsmanager]"]
azure-key-vault = ["azure-identity (>=1.16.0)", "azure-keyvault-secrets (>=4.8.0)"]
gcp-secret-manager = ["google-cloud-secret-manager (>=2.23.1)"]
toml = ["tomli (>=2.0.1)"]
yaml = ["pyyaml (>=6.0.1)"]
[[package]]
name = "pyjwt"
version = "2.12.1"
description = "JSON Web Token implementation in Python"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "pyjwt-2.12.1-py3-none-any.whl", hash = "sha256:28ca37c070cad8ba8cd9790cd940535d40274d22f80ab87f3ac6a713e6e8454c"},
{file = "pyjwt-2.12.1.tar.gz", hash = "sha256:c74a7a2adf861c04d002db713dd85f84beb242228e671280bf709d765b03672b"},
]
[package.dependencies]
cryptography = {version = ">=3.4.0", optional = true, markers = "extra == \"crypto\""}
typing_extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
[package.extras]
crypto = ["cryptography (>=3.4.0)"]
dev = ["coverage[toml] (==7.10.7)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=8.4.2,<9.0.0)", "sphinx", "sphinx-rtd-theme", "zope.interface"]
docs = ["sphinx", "sphinx-rtd-theme", "zope.interface"]
tests = ["coverage[toml] (==7.10.7)", "pytest (>=8.4.2,<9.0.0)"]
[[package]]
name = "python-dotenv"
version = "1.2.2"
description = "Read key-value pairs from a .env file and set them as environment variables"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "python_dotenv-1.2.2-py3-none-any.whl", hash = "sha256:1d8214789a24de455a8b8bd8ae6fe3c6b69a5e3d64aa8a8e5d68e694bbcb285a"},
{file = "python_dotenv-1.2.2.tar.gz", hash = "sha256:2c371a91fbd7ba082c2c1dc1f8bf89ca22564a087c2c287cd9b662adde799cf3"},
]
[package.extras]
cli = ["click (>=5.0)"]
[[package]]
name = "python-multipart"
version = "0.0.24"
description = "A streaming multipart parser for Python"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "python_multipart-0.0.24-py3-none-any.whl", hash = "sha256:9b110a98db707df01a53c194f0af075e736a770dc5058089650d70b4a182f950"},
{file = "python_multipart-0.0.24.tar.gz", hash = "sha256:9574c97e1c026e00bc30340ef7c7d76739512ab4dfd428fec8c330fa6a5cc3c8"},
]
[[package]]
name = "pywin32"
version = "311"
description = "Python for Window Extensions"
optional = false
python-versions = "*"
groups = ["main"]
markers = "sys_platform == \"win32\""
files = [
{file = "pywin32-311-cp310-cp310-win32.whl", hash = "sha256:d03ff496d2a0cd4a5893504789d4a15399133fe82517455e78bad62efbb7f0a3"},
{file = "pywin32-311-cp310-cp310-win_amd64.whl", hash = "sha256:797c2772017851984b97180b0bebe4b620bb86328e8a884bb626156295a63b3b"},
{file = "pywin32-311-cp310-cp310-win_arm64.whl", hash = "sha256:0502d1facf1fed4839a9a51ccbcc63d952cf318f78ffc00a7e78528ac27d7a2b"},
{file = "pywin32-311-cp311-cp311-win32.whl", hash = "sha256:184eb5e436dea364dcd3d2316d577d625c0351bf237c4e9a5fabbcfa5a58b151"},
{file = "pywin32-311-cp311-cp311-win_amd64.whl", hash = "sha256:3ce80b34b22b17ccbd937a6e78e7225d80c52f5ab9940fe0506a1a16f3dab503"},
{file = "pywin32-311-cp311-cp311-win_arm64.whl", hash = "sha256:a733f1388e1a842abb67ffa8e7aad0e70ac519e09b0f6a784e65a136ec7cefd2"},
{file = "pywin32-311-cp312-cp312-win32.whl", hash = "sha256:750ec6e621af2b948540032557b10a2d43b0cee2ae9758c54154d711cc852d31"},
{file = "pywin32-311-cp312-cp312-win_amd64.whl", hash = "sha256:b8c095edad5c211ff31c05223658e71bf7116daa0ecf3ad85f3201ea3190d067"},
{file = "pywin32-311-cp312-cp312-win_arm64.whl", hash = "sha256:e286f46a9a39c4a18b319c28f59b61de793654af2f395c102b4f819e584b5852"},
{file = "pywin32-311-cp313-cp313-win32.whl", hash = "sha256:f95ba5a847cba10dd8c4d8fefa9f2a6cf283b8b88ed6178fa8a6c1ab16054d0d"},
{file = "pywin32-311-cp313-cp313-win_amd64.whl", hash = "sha256:718a38f7e5b058e76aee1c56ddd06908116d35147e133427e59a3983f703a20d"},
{file = "pywin32-311-cp313-cp313-win_arm64.whl", hash = "sha256:7b4075d959648406202d92a2310cb990fea19b535c7f4a78d3f5e10b926eeb8a"},
{file = "pywin32-311-cp314-cp314-win32.whl", hash = "sha256:b7a2c10b93f8986666d0c803ee19b5990885872a7de910fc460f9b0c2fbf92ee"},
{file = "pywin32-311-cp314-cp314-win_amd64.whl", hash = "sha256:3aca44c046bd2ed8c90de9cb8427f581c479e594e99b5c0bb19b29c10fd6cb87"},
{file = "pywin32-311-cp314-cp314-win_arm64.whl", hash = "sha256:a508e2d9025764a8270f93111a970e1d0fbfc33f4153b388bb649b7eec4f9b42"},
{file = "pywin32-311-cp38-cp38-win32.whl", hash = "sha256:6c6f2969607b5023b0d9ce2541f8d2cbb01c4f46bc87456017cf63b73f1e2d8c"},
{file = "pywin32-311-cp38-cp38-win_amd64.whl", hash = "sha256:c8015b09fb9a5e188f83b7b04de91ddca4658cee2ae6f3bc483f0b21a77ef6cd"},
{file = "pywin32-311-cp39-cp39-win32.whl", hash = "sha256:aba8f82d551a942cb20d4a83413ccbac30790b50efb89a75e4f586ac0bb8056b"},
{file = "pywin32-311-cp39-cp39-win_amd64.whl", hash = "sha256:e0c4cfb0621281fe40387df582097fd796e80430597cb9944f0ae70447bacd91"},
{file = "pywin32-311-cp39-cp39-win_arm64.whl", hash = "sha256:62ea666235135fee79bb154e695f3ff67370afefd71bd7fea7512fc70ef31e3d"},
]
[[package]]
name = "referencing"
version = "0.37.0"
description = "JSON Referencing + Python"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "referencing-0.37.0-py3-none-any.whl", hash = "sha256:381329a9f99628c9069361716891d34ad94af76e461dcb0335825aecc7692231"},
{file = "referencing-0.37.0.tar.gz", hash = "sha256:44aefc3142c5b842538163acb373e24cce6632bd54bdb01b21ad5863489f50d8"},
]
[package.dependencies]
attrs = ">=22.2.0"
rpds-py = ">=0.7.0"
typing-extensions = {version = ">=4.4.0", markers = "python_version < \"3.13\""}
[[package]]
name = "rpds-py"
version = "0.30.0"
description = "Python bindings to Rust's persistent data structures (rpds)"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "rpds_py-0.30.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:679ae98e00c0e8d68a7fda324e16b90fd5260945b45d3b824c892cec9eea3288"},
{file = "rpds_py-0.30.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4cc2206b76b4f576934f0ed374b10d7ca5f457858b157ca52064bdfc26b9fc00"},
{file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:389a2d49eded1896c3d48b0136ead37c48e221b391c052fba3f4055c367f60a6"},
{file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:32c8528634e1bf7121f3de08fa85b138f4e0dc47657866630611b03967f041d7"},
{file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f207f69853edd6f6700b86efb84999651baf3789e78a466431df1331608e5324"},
{file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:67b02ec25ba7a9e8fa74c63b6ca44cf5707f2fbfadae3ee8e7494297d56aa9df"},
{file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0e95f6819a19965ff420f65578bacb0b00f251fefe2c8b23347c37174271f3"},
{file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:a452763cc5198f2f98898eb98f7569649fe5da666c2dc6b5ddb10fde5a574221"},
{file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e0b65193a413ccc930671c55153a03ee57cecb49e6227204b04fae512eb657a7"},
{file = "rpds_py-0.30.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:858738e9c32147f78b3ac24dc0edb6610000e56dc0f700fd5f651d0a0f0eb9ff"},
{file = "rpds_py-0.30.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:da279aa314f00acbb803da1e76fa18666778e8a8f83484fba94526da5de2cba7"},
{file = "rpds_py-0.30.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7c64d38fb49b6cdeda16ab49e35fe0da2e1e9b34bc38bd78386530f218b37139"},
{file = "rpds_py-0.30.0-cp310-cp310-win32.whl", hash = "sha256:6de2a32a1665b93233cde140ff8b3467bdb9e2af2b91079f0333a0974d12d464"},
{file = "rpds_py-0.30.0-cp310-cp310-win_amd64.whl", hash = "sha256:1726859cd0de969f88dc8673bdd954185b9104e05806be64bcd87badbe313169"},
{file = "rpds_py-0.30.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a2bffea6a4ca9f01b3f8e548302470306689684e61602aa3d141e34da06cf425"},
{file = "rpds_py-0.30.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dc4f992dfe1e2bc3ebc7444f6c7051b4bc13cd8e33e43511e8ffd13bf407010d"},
{file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:422c3cb9856d80b09d30d2eb255d0754b23e090034e1deb4083f8004bd0761e4"},
{file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07ae8a593e1c3c6b82ca3292efbe73c30b61332fd612e05abee07c79359f292f"},
{file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12f90dd7557b6bd57f40abe7747e81e0c0b119bef015ea7726e69fe550e394a4"},
{file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99b47d6ad9a6da00bec6aabe5a6279ecd3c06a329d4aa4771034a21e335c3a97"},
{file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33f559f3104504506a44bb666b93a33f5d33133765b0c216a5bf2f1e1503af89"},
{file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:946fe926af6e44f3697abbc305ea168c2c31d3e3ef1058cf68f379bf0335a78d"},
{file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:495aeca4b93d465efde585977365187149e75383ad2684f81519f504f5c13038"},
{file = "rpds_py-0.30.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9a0ca5da0386dee0655b4ccdf46119df60e0f10da268d04fe7cc87886872ba7"},
{file = "rpds_py-0.30.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d6d1cc13664ec13c1b84241204ff3b12f9bb82464b8ad6e7a5d3486975c2eed"},
{file = "rpds_py-0.30.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3896fa1be39912cf0757753826bc8bdc8ca331a28a7c4ae46b7a21280b06bb85"},
{file = "rpds_py-0.30.0-cp311-cp311-win32.whl", hash = "sha256:55f66022632205940f1827effeff17c4fa7ae1953d2b74a8581baaefb7d16f8c"},
{file = "rpds_py-0.30.0-cp311-cp311-win_amd64.whl", hash = "sha256:a51033ff701fca756439d641c0ad09a41d9242fa69121c7d8769604a0a629825"},
{file = "rpds_py-0.30.0-cp311-cp311-win_arm64.whl", hash = "sha256:47b0ef6231c58f506ef0b74d44e330405caa8428e770fec25329ed2cb971a229"},
{file = "rpds_py-0.30.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a161f20d9a43006833cd7068375a94d035714d73a172b681d8881820600abfad"},
{file = "rpds_py-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6abc8880d9d036ecaafe709079969f56e876fcf107f7a8e9920ba6d5a3878d05"},
{file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca28829ae5f5d569bb62a79512c842a03a12576375d5ece7d2cadf8abe96ec28"},
{file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1010ed9524c73b94d15919ca4d41d8780980e1765babf85f9a2f90d247153dd"},
{file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d1736cfb49381ba528cd5baa46f82fdc65c06e843dab24dd70b63d09121b3f"},
{file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d948b135c4693daff7bc2dcfc4ec57237a29bd37e60c2fabf5aff2bbacf3e2f1"},
{file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47f236970bccb2233267d89173d3ad2703cd36a0e2a6e92d0560d333871a3d23"},
{file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:2e6ecb5a5bcacf59c3f912155044479af1d0b6681280048b338b28e364aca1f6"},
{file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8fa71a2e078c527c3e9dc9fc5a98c9db40bcc8a92b4e8858e36d329f8684b51"},
{file = "rpds_py-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73c67f2db7bc334e518d097c6d1e6fed021bbc9b7d678d6cc433478365d1d5f5"},
{file = "rpds_py-0.30.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5ba103fb455be00f3b1c2076c9d4264bfcb037c976167a6047ed82f23153f02e"},
{file = "rpds_py-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee9c752c0364588353e627da8a7e808a66873672bcb5f52890c33fd965b394"},
{file = "rpds_py-0.30.0-cp312-cp312-win32.whl", hash = "sha256:1ab5b83dbcf55acc8b08fc62b796ef672c457b17dbd7820a11d6c52c06839bdf"},
{file = "rpds_py-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:a090322ca841abd453d43456ac34db46e8b05fd9b3b4ac0c78bcde8b089f959b"},
{file = "rpds_py-0.30.0-cp312-cp312-win_arm64.whl", hash = "sha256:669b1805bd639dd2989b281be2cfd951c6121b65e729d9b843e9639ef1fd555e"},
{file = "rpds_py-0.30.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f83424d738204d9770830d35290ff3273fbb02b41f919870479fab14b9d303b2"},
{file = "rpds_py-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7536cd91353c5273434b4e003cbda89034d67e7710eab8761fd918ec6c69cf8"},
{file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2771c6c15973347f50fece41fc447c054b7ac2ae0502388ce3b6738cd366e3d4"},
{file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0a59119fc6e3f460315fe9d08149f8102aa322299deaa5cab5b40092345c2136"},
{file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76fec018282b4ead0364022e3c54b60bf368b9d926877957a8624b58419169b7"},
{file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bef75a5525db97318e8cd061542b5a79812d711ea03dbc1f6f8dbb0c5f0d2"},
{file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9027da1ce107104c50c81383cae773ef5c24d296dd11c99e2629dbd7967a20c6"},
{file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:9cf69cdda1f5968a30a359aba2f7f9aa648a9ce4b580d6826437f2b291cfc86e"},
{file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4796a717bf12b9da9d3ad002519a86063dcac8988b030e405704ef7d74d2d9d"},
{file = "rpds_py-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d4c2aa7c50ad4728a094ebd5eb46c452e9cb7edbfdb18f9e1221f597a73e1e7"},
{file = "rpds_py-0.30.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ba81a9203d07805435eb06f536d95a266c21e5b2dfbf6517748ca40c98d19e31"},
{file = "rpds_py-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:945dccface01af02675628334f7cf49c2af4c1c904748efc5cf7bbdf0b579f95"},
{file = "rpds_py-0.30.0-cp313-cp313-win32.whl", hash = "sha256:b40fb160a2db369a194cb27943582b38f79fc4887291417685f3ad693c5a1d5d"},
{file = "rpds_py-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:806f36b1b605e2d6a72716f321f20036b9489d29c51c91f4dd29a3e3afb73b15"},
{file = "rpds_py-0.30.0-cp313-cp313-win_arm64.whl", hash = "sha256:d96c2086587c7c30d44f31f42eae4eac89b60dabbac18c7669be3700f13c3ce1"},
{file = "rpds_py-0.30.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:eb0b93f2e5c2189ee831ee43f156ed34e2a89a78a66b98cadad955972548be5a"},
{file = "rpds_py-0.30.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:922e10f31f303c7c920da8981051ff6d8c1a56207dbdf330d9047f6d30b70e5e"},
{file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdc62c8286ba9bf7f47befdcea13ea0e26bf294bda99758fd90535cbaf408000"},
{file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47f9a91efc418b54fb8190a6b4aa7813a23fb79c51f4bb84e418f5476c38b8db"},
{file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3587eb9b17f3789ad50824084fa6f81921bbf9a795826570bda82cb3ed91f2"},
{file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39c02563fc592411c2c61d26b6c5fe1e51eaa44a75aa2c8735ca88b0d9599daa"},
{file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51a1234d8febafdfd33a42d97da7a43f5dcb120c1060e352a3fbc0c6d36e2083"},
{file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:eb2c4071ab598733724c08221091e8d80e89064cd472819285a9ab0f24bcedb9"},
{file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bdfdb946967d816e6adf9a3d8201bfad269c67efe6cefd7093ef959683c8de0"},
{file = "rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c77afbd5f5250bf27bf516c7c4a016813eb2d3e116139aed0096940c5982da94"},
{file = "rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:61046904275472a76c8c90c9ccee9013d70a6d0f73eecefd38c1ae7c39045a08"},
{file = "rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c5f36a861bc4b7da6516dbdf302c55313afa09b81931e8280361a4f6c9a2d27"},
{file = "rpds_py-0.30.0-cp313-cp313t-win32.whl", hash = "sha256:3d4a69de7a3e50ffc214ae16d79d8fbb0922972da0356dcf4d0fdca2878559c6"},
{file = "rpds_py-0.30.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f14fc5df50a716f7ece6a80b6c78bb35ea2ca47c499e422aa4463455dd96d56d"},
{file = "rpds_py-0.30.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:68f19c879420aa08f61203801423f6cd5ac5f0ac4ac82a2368a9fcd6a9a075e0"},
{file = "rpds_py-0.30.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ec7c4490c672c1a0389d319b3a9cfcd098dcdc4783991553c332a15acf7249be"},
{file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f251c812357a3fed308d684a5079ddfb9d933860fc6de89f2b7ab00da481e65f"},
{file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac98b175585ecf4c0348fd7b29c3864bda53b805c773cbf7bfdaffc8070c976f"},
{file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3e62880792319dbeb7eb866547f2e35973289e7d5696c6e295476448f5b63c87"},
{file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e7fc54e0900ab35d041b0601431b0a0eb495f0851a0639b6ef90f7741b39a18"},
{file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47e77dc9822d3ad616c3d5759ea5631a75e5809d5a28707744ef79d7a1bcfcad"},
{file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:b4dc1a6ff022ff85ecafef7979a2c6eb423430e05f1165d6688234e62ba99a07"},
{file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4559c972db3a360808309e06a74628b95eaccbf961c335c8fe0d590cf587456f"},
{file = "rpds_py-0.30.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0ed177ed9bded28f8deb6ab40c183cd1192aa0de40c12f38be4d59cd33cb5c65"},
{file = "rpds_py-0.30.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ad1fa8db769b76ea911cb4e10f049d80bf518c104f15b3edb2371cc65375c46f"},
{file = "rpds_py-0.30.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:46e83c697b1f1c72b50e5ee5adb4353eef7406fb3f2043d64c33f20ad1c2fc53"},
{file = "rpds_py-0.30.0-cp314-cp314-win32.whl", hash = "sha256:ee454b2a007d57363c2dfd5b6ca4a5d7e2c518938f8ed3b706e37e5d470801ed"},
{file = "rpds_py-0.30.0-cp314-cp314-win_amd64.whl", hash = "sha256:95f0802447ac2d10bcc69f6dc28fe95fdf17940367b21d34e34c737870758950"},
{file = "rpds_py-0.30.0-cp314-cp314-win_arm64.whl", hash = "sha256:613aa4771c99f03346e54c3f038e4cc574ac09a3ddfb0e8878487335e96dead6"},
{file = "rpds_py-0.30.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7e6ecfcb62edfd632e56983964e6884851786443739dbfe3582947e87274f7cb"},
{file = "rpds_py-0.30.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a1d0bc22a7cdc173fedebb73ef81e07faef93692b8c1ad3733b67e31e1b6e1b8"},
{file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d08f00679177226c4cb8c5265012eea897c8ca3b93f429e546600c971bcbae7"},
{file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5965af57d5848192c13534f90f9dd16464f3c37aaf166cc1da1cae1fd5a34898"},
{file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a4e86e34e9ab6b667c27f3211ca48f73dba7cd3d90f8d5b11be56e5dbc3fb4e"},
{file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d3e6b26f2c785d65cc25ef1e5267ccbe1b069c5c21b8cc724efee290554419"},
{file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:626a7433c34566535b6e56a1b39a7b17ba961e97ce3b80ec62e6f1312c025551"},
{file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:acd7eb3f4471577b9b5a41baf02a978e8bdeb08b4b355273994f8b87032000a8"},
{file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fe5fa731a1fa8a0a56b0977413f8cacac1768dad38d16b3a296712709476fbd5"},
{file = "rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:74a3243a411126362712ee1524dfc90c650a503502f135d54d1b352bd01f2404"},
{file = "rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3e8eeb0544f2eb0d2581774be4c3410356eba189529a6b3e36bbbf9696175856"},
{file = "rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:dbd936cde57abfee19ab3213cf9c26be06d60750e60a8e4dd85d1ab12c8b1f40"},
{file = "rpds_py-0.30.0-cp314-cp314t-win32.whl", hash = "sha256:dc824125c72246d924f7f796b4f63c1e9dc810c7d9e2355864b3c3a73d59ade0"},
{file = "rpds_py-0.30.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27f4b0e92de5bfbc6f86e43959e6edd1425c33b5e69aab0984a72047f2bcf1e3"},
{file = "rpds_py-0.30.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c2262bdba0ad4fc6fb5545660673925c2d2a5d9e2e0fb603aad545427be0fc58"},
{file = "rpds_py-0.30.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ee6af14263f25eedc3bb918a3c04245106a42dfd4f5c2285ea6f997b1fc3f89a"},
{file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3adbb8179ce342d235c31ab8ec511e66c73faa27a47e076ccc92421add53e2bb"},
{file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:250fa00e9543ac9b97ac258bd37367ff5256666122c2d0f2bc97577c60a1818c"},
{file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9854cf4f488b3d57b9aaeb105f06d78e5529d3145b1e4a41750167e8c213c6d3"},
{file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:993914b8e560023bc0a8bf742c5f303551992dcb85e247b1e5c7f4a7d145bda5"},
{file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58edca431fb9b29950807e301826586e5bbf24163677732429770a697ffe6738"},
{file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:dea5b552272a944763b34394d04577cf0f9bd013207bc32323b5a89a53cf9c2f"},
{file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ba3af48635eb83d03f6c9735dfb21785303e73d22ad03d489e88adae6eab8877"},
{file = "rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:dff13836529b921e22f15cb099751209a60009731a68519630a24d61f0b1b30a"},
{file = "rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:1b151685b23929ab7beec71080a8889d4d6d9fa9a983d213f07121205d48e2c4"},
{file = "rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ac37f9f516c51e5753f27dfdef11a88330f04de2d564be3991384b2f3535d02e"},
{file = "rpds_py-0.30.0.tar.gz", hash = "sha256:dd8ff7cf90014af0c0f787eea34794ebf6415242ee1d6fa91eaba725cc441e84"},
]
[[package]]
name = "sse-starlette"
version = "3.3.4"
description = "SSE plugin for Starlette"
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "sse_starlette-3.3.4-py3-none-any.whl", hash = "sha256:84bb06e58939a8b38d8341f1bc9792f06c2b53f48c608dd207582b664fc8f3c1"},
{file = "sse_starlette-3.3.4.tar.gz", hash = "sha256:aaf92fc067af8a5427192895ac028e947b484ac01edbc3caf00e7e7137c7bef1"},
]
[package.dependencies]
anyio = ">=4.7.0"
starlette = ">=0.49.1"
[package.extras]
daphne = ["daphne (>=4.2.0)"]
examples = ["aiosqlite (>=0.21.0)", "fastapi (>=0.115.12)", "sqlalchemy[asyncio] (>=2.0.41)", "uvicorn (>=0.34.0)"]
granian = ["granian (>=2.3.1)"]
uvicorn = ["uvicorn (>=0.34.0)"]
[[package]]
name = "starlette"
version = "1.0.0"
description = "The little ASGI library that shines."
optional = false
python-versions = ">=3.10"
groups = ["main"]
files = [
{file = "starlette-1.0.0-py3-none-any.whl", hash = "sha256:d3ec55e0bb321692d275455ddfd3df75fff145d009685eb40dc91fc66b03d38b"},
{file = "starlette-1.0.0.tar.gz", hash = "sha256:6a4beaf1f81bb472fd19ea9b918b50dc3a77a6f2e190a12954b25e6ed5eea149"},
]
[package.dependencies]
anyio = ">=3.6.2,<5"
typing-extensions = {version = ">=4.10.0", markers = "python_version < \"3.13\""}
[package.extras]
full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"]
[[package]]
name = "typing-extensions"
version = "4.15.0"
description = "Backported and Experimental Type Hints for Python 3.9+"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "typing_extensions-4.15.0-py3-none-any.whl", hash = "sha256:f0fa19c6845758ab08074a0cfa8b7aecb71c999ca73d62883bc25cc018c4e548"},
{file = "typing_extensions-4.15.0.tar.gz", hash = "sha256:0cea48d173cc12fa28ecabc3b837ea3cf6f38c6d1136f85cbaaf598984861466"},
]
[[package]]
name = "typing-inspection"
version = "0.4.2"
description = "Runtime typing introspection tools"
optional = false
python-versions = ">=3.9"
groups = ["main"]
files = [
{file = "typing_inspection-0.4.2-py3-none-any.whl", hash = "sha256:4ed1cacbdc298c220f1bd249ed5287caa16f34d44ef4e9c3d0cbad5b521545e7"},
{file = "typing_inspection-0.4.2.tar.gz", hash = "sha256:ba561c48a67c5958007083d386c3295464928b01faa735ab8547c5692e87f464"},
]
[package.dependencies]
typing-extensions = ">=4.12.0"
[[package]]
name = "uvicorn"
version = "0.44.0"
description = "The lightning-fast ASGI server."
optional = false
python-versions = ">=3.10"
groups = ["main"]
markers = "sys_platform != \"emscripten\""
files = [
{file = "uvicorn-0.44.0-py3-none-any.whl", hash = "sha256:ce937c99a2cc70279556967274414c087888e8cec9f9c94644dfca11bd3ced89"},
{file = "uvicorn-0.44.0.tar.gz", hash = "sha256:6c942071b68f07e178264b9152f1f16dfac5da85880c4ce06366a96d70d4f31e"},
]
[package.dependencies]
click = ">=7.0"
h11 = ">=0.8"
typing-extensions = {version = ">=4.0", markers = "python_version < \"3.11\""}
[package.extras]
standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.20)", "websockets (>=10.4)"]
[metadata]
lock-version = "2.1"
python-versions = "^3.10"
content-hash = "7b99892e3738f4bb5297bf8a112b5785559535e21026690e3afb1904a219a47f"

View File

@ -0,0 +1,21 @@
[tool.poetry]
name = "open-meteo-mcp"
version = "0.1.0"
description = "MCP server for global weather data via Open-Meteo API"
authors = []
license = "MIT"
readme = "README.md"
packages = [{include = "open_meteo_mcp_server.py"}]
[tool.poetry.dependencies]
python = "^3.10"
mcp = ">=1.9.0"
httpx = ">=0.27.0"
[tool.poetry.scripts]
open-meteo-mcp = "open_meteo_mcp_server:main"
open-meteo-mcp-stdio = "open_meteo_mcp_server:main_stdio"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"

View File

@ -0,0 +1,14 @@
#!/usr/bin/env zsh
#
if [[ "$(uname)" == "Linux" ]]; then
PATH_PREFIX="/data/Projects"
elif [[ "$(uname)" == "Darwin" ]]; then
PATH_PREFIX="/Users/gregory.gauthier/Projects"
else
# Default fallback is Linux
PATH_PREFIX="/data/Projects"
fi
# shellcheck disable=SC2164
cd "${PATH_PREFIX}/mcp_servers/open_meteo_mcp"
poetry run open-meteo-mcp-stdio

View File

@ -0,0 +1,4 @@
# Playwright MCP
This is a placeholder sub-project, where management of the playwright mcp will be done through a
convenience script.

View File

@ -0,0 +1,26 @@
#!/usr/bin/env zsh
if [[ "$(uname)" == "Linux" ]]; then
PATH_PREFIX="/data/Projects"
elif [[ "$(uname)" == "Darwin" ]]; then
PATH_PREFIX="/Users/gregory.gauthier/Projects"
else
# Default fallback
PATH_PREFIX="/data/Projects"
fi
# shellcheck disable=SC2164
cd "${PATH_PREFIX}/mcp_servers/playwright_mcp"
# Install dependencies if not present
if [[ ! -d "node_modules" ]]; then
npm install @playwright/test @playwright/mcp
fi
# Ensure browsers are installed
npx playwright install chromium
# Launch the server
npx @playwright/mcp --browser chromium --headless 2> playwright_mcp.log

1
mcps/playwright_mcp/node_modules/.bin/playwright generated vendored Symbolic link
View File

@ -0,0 +1 @@
../@playwright/test/cli.js

1
mcps/playwright_mcp/node_modules/.bin/playwright-core generated vendored Symbolic link
View File

@ -0,0 +1 @@
../playwright-core/cli.js

1
mcps/playwright_mcp/node_modules/.bin/playwright-mcp generated vendored Symbolic link
View File

@ -0,0 +1 @@
../@playwright/mcp/cli.js

114
mcps/playwright_mcp/node_modules/.package-lock.json generated vendored Normal file
View File

@ -0,0 +1,114 @@
{
"name": "playwright_mcp",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"node_modules/@playwright/mcp": {
"version": "0.0.70",
"resolved": "https://registry.npmjs.org/@playwright/mcp/-/mcp-0.0.70.tgz",
"integrity": "sha512-Kl0a6l9VL8rvT1oBou3hS5yArjwWV9UlwAkq+0skfK1YVg8XfmmNaAmwZhMeNx/ZhGiWXfCllo6rD/jvZz+WuA==",
"license": "Apache-2.0",
"dependencies": {
"playwright": "1.60.0-alpha-1774999321000",
"playwright-core": "1.60.0-alpha-1774999321000"
},
"bin": {
"playwright-mcp": "cli.js"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@playwright/test": {
"version": "1.59.1",
"resolved": "https://registry.npmjs.org/@playwright/test/-/test-1.59.1.tgz",
"integrity": "sha512-PG6q63nQg5c9rIi4/Z5lR5IVF7yU5MqmKaPOe0HSc0O2cX1fPi96sUQu5j7eo4gKCkB2AnNGoWt7y4/Xx3Kcqg==",
"license": "Apache-2.0",
"dependencies": {
"playwright": "1.59.1"
},
"bin": {
"playwright": "cli.js"
},
"engines": {
"node": ">=18"
}
},
"node_modules/@playwright/test/node_modules/playwright": {
"version": "1.59.1",
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.59.1.tgz",
"integrity": "sha512-C8oWjPR3F81yljW9o5OxcWzfh6avkVwDD2VYdwIGqTkl+OGFISgypqzfu7dOe4QNLL2aqcWBmI3PMtLIK233lw==",
"license": "Apache-2.0",
"dependencies": {
"playwright-core": "1.59.1"
},
"bin": {
"playwright": "cli.js"
},
"engines": {
"node": ">=18"
},
"optionalDependencies": {
"fsevents": "2.3.2"
}
},
"node_modules/@playwright/test/node_modules/playwright-core": {
"version": "1.59.1",
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.59.1.tgz",
"integrity": "sha512-HBV/RJg81z5BiiZ9yPzIiClYV/QMsDCKUyogwH9p3MCP6IYjUFu/MActgYAvK0oWyV9NlwM3GLBjADyWgydVyg==",
"license": "Apache-2.0",
"bin": {
"playwright-core": "cli.js"
},
"engines": {
"node": ">=18"
}
},
"node_modules/fsevents": {
"version": "2.3.2",
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
"integrity": "sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==",
"hasInstallScript": true,
"ideallyInert": true,
"license": "MIT",
"optional": true,
"os": [
"darwin"
],
"engines": {
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
}
},
"node_modules/playwright": {
"version": "1.60.0-alpha-1774999321000",
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.60.0-alpha-1774999321000.tgz",
"integrity": "sha512-Bd5DkzYKG+2g1jLO6NeTXmGLbBYSFffJIOsR4l4hUBkJvzvGGdLZ7jZb2tOtb0WIoWXQKdQj3Ap6WthV4DBS8w==",
"license": "Apache-2.0",
"dependencies": {
"playwright-core": "1.60.0-alpha-1774999321000"
},
"bin": {
"playwright": "cli.js"
},
"engines": {
"node": ">=18"
},
"optionalDependencies": {
"fsevents": "2.3.2"
}
},
"node_modules/playwright-core": {
"version": "1.60.0-alpha-1774999321000",
"resolved": "https://registry.npmjs.org/playwright-core/-/playwright-core-1.60.0-alpha-1774999321000.tgz",
"integrity": "sha512-ams3Zo4VXxeOg5ZTTh16GkE8g48Bmxo/9pg9gXl9SVKlVohCU7Jaog7XntY8yFuzENA6dJc1Fz7Z/NNTm9nGEw==",
"license": "Apache-2.0",
"bin": {
"playwright-core": "cli.js"
},
"engines": {
"node": ">=18"
}
}
}
}

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright (c) Microsoft Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

File diff suppressed because it is too large Load Diff

32
mcps/playwright_mcp/node_modules/@playwright/mcp/cli.js generated vendored Executable file
View File

@ -0,0 +1,32 @@
#!/usr/bin/env node
/**
* Copyright (c) Microsoft Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const { program } = require('playwright-core/lib/utilsBundle');
const { decorateMCPCommand } = require('playwright-core/lib/tools/mcp/program');
if (process.argv.includes('install-browser')) {
const argv = process.argv.map(arg => arg === 'install-browser' ? 'install' : arg);
const { program: mainProgram } = require('playwright-core/lib/cli/program');
mainProgram.parse(argv);
return;
}
const packageJSON = require('./package.json');
const p = program.version('Version ' + packageJSON.version).name('Playwright MCP');
decorateMCPCommand(p, packageJSON.version)
void program.parseAsync(process.argv);

View File

@ -0,0 +1,230 @@
/**
* Copyright (c) Microsoft Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import type * as playwright from '../../..';
export type ToolCapability =
'config' |
'core' |
'core-navigation' |
'core-tabs' |
'core-input' |
'core-install' |
'network' |
'pdf' |
'storage' |
'testing' |
'vision' |
'devtools';
export type Config = {
/**
* The browser to use.
*/
browser?: {
/**
* The type of browser to use.
*/
browserName?: 'chromium' | 'firefox' | 'webkit';
/**
* Keep the browser profile in memory, do not save it to disk.
*/
isolated?: boolean;
/**
* Path to a user data directory for browser profile persistence.
* Temporary directory is created by default.
*/
userDataDir?: string;
/**
* Launch options passed to
* @see https://playwright.dev/docs/api/class-browsertype#browser-type-launch-persistent-context
*
* This is useful for settings options like `channel`, `headless`, `executablePath`, etc.
*/
launchOptions?: playwright.LaunchOptions;
/**
* Context options for the browser context.
*
* This is useful for settings options like `viewport`.
*/
contextOptions?: playwright.BrowserContextOptions;
/**
* Chrome DevTools Protocol endpoint to connect to an existing browser instance in case of Chromium family browsers.
*/
cdpEndpoint?: string;
/**
* CDP headers to send with the connect request.
*/
cdpHeaders?: Record<string, string>;
/**
* Timeout in milliseconds for connecting to CDP endpoint. Defaults to 30000 (30 seconds). Pass 0 to disable timeout.
*/
cdpTimeout?: number;
/**
* Remote endpoint to connect to an existing Playwright server.
*/
remoteEndpoint?: string;
/**
* Paths to TypeScript files to add as initialization scripts for Playwright page.
*/
initPage?: string[];
/**
* Paths to JavaScript files to add as initialization scripts.
* The scripts will be evaluated in every page before any of the page's scripts.
*/
initScript?: string[];
},
/**
* Connect to a running browser instance (Edge/Chrome only). If specified, `browser`
* config is ignored.
* Requires the "Playwright MCP Bridge" browser extension to be installed.
*/
extension?: boolean;
server?: {
/**
* The port to listen on for SSE or MCP transport.
*/
port?: number;
/**
* The host to bind the server to. Default is localhost. Use 0.0.0.0 to bind to all interfaces.
*/
host?: string;
/**
* The hosts this server is allowed to serve from. Defaults to the host server is bound to.
* This is not for CORS, but rather for the DNS rebinding protection.
*/
allowedHosts?: string[];
},
/**
* List of enabled tool capabilities. Possible values:
* - 'core': Core browser automation features.
* - 'pdf': PDF generation and manipulation.
* - 'vision': Coordinate-based interactions.
* - 'devtools': Developer tools features.
*/
capabilities?: ToolCapability[];
/**
* Whether to save the Playwright session into the output directory.
*/
saveSession?: boolean;
/**
* Reuse the same browser context between all connected HTTP clients.
*/
sharedBrowserContext?: boolean;
/**
* Secrets are used to replace matching plain text in the tool responses to prevent the LLM
* from accidentally getting sensitive data. It is a convenience and not a security feature,
* make sure to always examine information coming in and from the tool on the client.
*/
secrets?: Record<string, string>;
/**
* The directory to save output files.
*/
outputDir?: string;
console?: {
/**
* The level of console messages to return. Each level includes the messages of more severe levels. Defaults to "info".
*/
level?: 'error' | 'warning' | 'info' | 'debug';
},
network?: {
/**
* List of origins to allow the browser to request. Default is to allow all. Origins matching both `allowedOrigins` and `blockedOrigins` will be blocked.
*
* Supported formats:
* - Full origin: `https://example.com:8080` - matches only that origin
* - Wildcard port: `http://localhost:*` - matches any port on localhost with http protocol
*/
allowedOrigins?: string[];
/**
* List of origins to block the browser to request. Origins matching both `allowedOrigins` and `blockedOrigins` will be blocked.
*
* Supported formats:
* - Full origin: `https://example.com:8080` - matches only that origin
* - Wildcard port: `http://localhost:*` - matches any port on localhost with http protocol
*/
blockedOrigins?: string[];
};
/**
* Specify the attribute to use for test ids, defaults to "data-testid".
*/
testIdAttribute?: string;
timeouts?: {
/*
* Configures default action timeout: https://playwright.dev/docs/api/class-page#page-set-default-timeout. Defaults to 5000ms.
*/
action?: number;
/*
* Configures default navigation timeout: https://playwright.dev/docs/api/class-page#page-set-default-navigation-timeout. Defaults to 60000ms.
*/
navigation?: number;
/**
* Configures default expect timeout: https://playwright.dev/docs/test-timeouts#expect-timeout. Defaults to 5000ms.
*/
expect?: number;
};
/**
* Whether to send image responses to the client. Can be "allow", "omit", or "auto". Defaults to "auto", which sends images if the client can display them.
*/
imageResponses?: 'allow' | 'omit';
snapshot?: {
/**
* When taking snapshots for responses, specifies the mode to use.
*/
mode?: 'full' | 'none';
};
/**
* allowUnrestrictedFileAccess acts as a guardrail to prevent the LLM from accidentally
* wandering outside its intended workspace. It is a convenience defense to catch unintended
* file access, not a secure boundary; a deliberate attempt to reach other directories can be
* easily worked around, so always rely on client-level permissions for true security.
*/
allowUnrestrictedFileAccess?: boolean;
/**
* Specify the language to use for code generation.
*/
codegen?: 'typescript' | 'none';
};

View File

@ -0,0 +1,23 @@
#!/usr/bin/env node
/**
* Copyright (c) Microsoft Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import type { Server } from '@modelcontextprotocol/sdk/server/index.js';
import type { Config } from './config';
import type { BrowserContext } from 'playwright';
export declare function createConnection(config?: Config, contextGetter?: () => Promise<BrowserContext>): Promise<Server>;
export {};

19
mcps/playwright_mcp/node_modules/@playwright/mcp/index.js generated vendored Executable file
View File

@ -0,0 +1,19 @@
#!/usr/bin/env node
/**
* Copyright (c) Microsoft Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const { createConnection } = require('playwright-core/lib/tools/exports');
module.exports = { createConnection };

View File

@ -0,0 +1,42 @@
{
"name": "@playwright/mcp",
"version": "0.0.70",
"description": "Playwright Tools for MCP",
"repository": {
"type": "git",
"url": "git+https://github.com/microsoft/playwright-mcp.git"
},
"homepage": "https://playwright.dev",
"engines": {
"node": ">=18"
},
"author": {
"name": "Microsoft Corporation"
},
"license": "Apache-2.0",
"mcpName": "io.github.microsoft/playwright-mcp",
"scripts": {
"lint": "node update-readme.js",
"test": "playwright test",
"ctest": "playwright test --project=chrome",
"ftest": "playwright test --project=firefox",
"wtest": "playwright test --project=webkit",
"dtest": "MCP_IN_DOCKER=1 playwright test --project=chromium-docker",
"build": "echo OK",
"npm-publish": "npm run lint && npm run test && npm publish"
},
"exports": {
"./package.json": "./package.json",
".": {
"types": "./index.d.ts",
"default": "./index.js"
}
},
"dependencies": {
"playwright": "1.60.0-alpha-1774999321000",
"playwright-core": "1.60.0-alpha-1774999321000"
},
"bin": {
"playwright-mcp": "cli.js"
}
}

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Portions Copyright (c) Microsoft Corporation.
Portions Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,5 @@
Playwright
Copyright (c) Microsoft Corporation
This software contains code derived from the Puppeteer project (https://github.com/puppeteer/puppeteer),
available under the Apache 2.0 license (https://github.com/puppeteer/puppeteer/blob/master/LICENSE).

View File

@ -0,0 +1,170 @@
# 🎭 Playwright
[![npm version](https://img.shields.io/npm/v/playwright.svg)](https://www.npmjs.com/package/playwright) <!-- GEN:chromium-version-badge -->[![Chromium version](https://img.shields.io/badge/chromium-147.0.7727.15-blue.svg?logo=google-chrome)](https://www.chromium.org/Home)<!-- GEN:stop --> <!-- GEN:firefox-version-badge -->[![Firefox version](https://img.shields.io/badge/firefox-148.0.2-blue.svg?logo=firefoxbrowser)](https://www.mozilla.org/en-US/firefox/new/)<!-- GEN:stop --> <!-- GEN:webkit-version-badge -->[![WebKit version](https://img.shields.io/badge/webkit-26.4-blue.svg?logo=safari)](https://webkit.org/)<!-- GEN:stop --> [![Join Discord](https://img.shields.io/badge/join-discord-informational)](https://aka.ms/playwright/discord)
## [Documentation](https://playwright.dev) | [API reference](https://playwright.dev/docs/api/class-playwright)
Playwright is a framework for Web Testing and Automation. It allows testing [Chromium](https://www.chromium.org/Home)<sup>1</sup>, [Firefox](https://www.mozilla.org/en-US/firefox/new/) and [WebKit](https://webkit.org/) with a single API. Playwright is built to enable cross-browser web automation that is **ever-green**, **capable**, **reliable**, and **fast**.
| | Linux | macOS | Windows |
| :--- | :---: | :---: | :---: |
| Chromium<sup>1</sup> <!-- GEN:chromium-version -->147.0.7727.15<!-- GEN:stop --> | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| WebKit <!-- GEN:webkit-version -->26.4<!-- GEN:stop --> | :white_check_mark: | :white_check_mark: | :white_check_mark: |
| Firefox <!-- GEN:firefox-version -->148.0.2<!-- GEN:stop --> | :white_check_mark: | :white_check_mark: | :white_check_mark: |
Headless execution is supported for all browsers on all platforms. Check out [system requirements](https://playwright.dev/docs/intro#system-requirements) for details.
Looking for Playwright for [Python](https://playwright.dev/python/docs/intro), [.NET](https://playwright.dev/dotnet/docs/intro), or [Java](https://playwright.dev/java/docs/intro)?
<sup>1</sup> Playwright uses [Chrome for Testing](https://developer.chrome.com/blog/chrome-for-testing) by default.
## Installation
Playwright has its own test runner for end-to-end tests, we call it Playwright Test.
### Using init command
The easiest way to get started with Playwright Test is to run the init command.
```Shell
# Run from your project's root directory
npm init playwright@latest
# Or create a new project
npm init playwright@latest new-project
```
This will create a configuration file, optionally add examples, a GitHub Action workflow and a first test example.spec.ts. You can now jump directly to writing assertions section.
### Manually
Add dependency and install browsers.
```Shell
npm i -D @playwright/test
# install supported browsers
npx playwright install
```
You can optionally install only selected browsers, see [install browsers](https://playwright.dev/docs/cli#install-browsers) for more details. Or you can install no browsers at all and use existing [browser channels](https://playwright.dev/docs/browsers).
* [Getting started](https://playwright.dev/docs/intro)
* [API reference](https://playwright.dev/docs/api/class-playwright)
## Capabilities
### Resilient • No flaky tests
**Auto-wait**. Playwright waits for elements to be actionable prior to performing actions. It also has a rich set of introspection events. The combination of the two eliminates the need for artificial timeouts - a primary cause of flaky tests.
**Web-first assertions**. Playwright assertions are created specifically for the dynamic web. Checks are automatically retried until the necessary conditions are met.
**Tracing**. Configure test retry strategy, capture execution trace, videos and screenshots to eliminate flakes.
### No trade-offs • No limits
Browsers run web content belonging to different origins in different processes. Playwright is aligned with the architecture of the modern browsers and runs tests out-of-process. This makes Playwright free of the typical in-process test runner limitations.
**Multiple everything**. Test scenarios that span multiple tabs, multiple origins and multiple users. Create scenarios with different contexts for different users and run them against your server, all in one test.
**Trusted events**. Hover elements, interact with dynamic controls and produce trusted events. Playwright uses real browser input pipeline indistinguishable from the real user.
Test frames, pierce Shadow DOM. Playwright selectors pierce shadow DOM and allow entering frames seamlessly.
### Full isolation • Fast execution
**Browser contexts**. Playwright creates a browser context for each test. Browser context is equivalent to a brand new browser profile. This delivers full test isolation with zero overhead. Creating a new browser context only takes a handful of milliseconds.
**Log in once**. Save the authentication state of the context and reuse it in all the tests. This bypasses repetitive log-in operations in each test, yet delivers full isolation of independent tests.
### Powerful Tooling
**[Codegen](https://playwright.dev/docs/codegen)**. Generate tests by recording your actions. Save them into any language.
**[Playwright inspector](https://playwright.dev/docs/inspector)**. Inspect page, generate selectors, step through the test execution, see click points and explore execution logs.
**[Trace Viewer](https://playwright.dev/docs/trace-viewer)**. Capture all the information to investigate the test failure. Playwright trace contains test execution screencast, live DOM snapshots, action explorer, test source and many more.
Looking for Playwright for [TypeScript](https://playwright.dev/docs/intro), [JavaScript](https://playwright.dev/docs/intro), [Python](https://playwright.dev/python/docs/intro), [.NET](https://playwright.dev/dotnet/docs/intro), or [Java](https://playwright.dev/java/docs/intro)?
## Examples
To learn how to run these Playwright Test examples, check out our [getting started docs](https://playwright.dev/docs/intro).
#### Page screenshot
This code snippet navigates to Playwright homepage and saves a screenshot.
```TypeScript
import { test } from '@playwright/test';
test('Page Screenshot', async ({ page }) => {
await page.goto('https://playwright.dev/');
await page.screenshot({ path: `example.png` });
});
```
#### Mobile and geolocation
This snippet emulates Mobile Safari on a device at given geolocation, navigates to maps.google.com, performs the action and takes a screenshot.
```TypeScript
import { test, devices } from '@playwright/test';
test.use({
...devices['iPhone 13 Pro'],
locale: 'en-US',
geolocation: { longitude: 12.492507, latitude: 41.889938 },
permissions: ['geolocation'],
})
test('Mobile and geolocation', async ({ page }) => {
await page.goto('https://maps.google.com');
await page.getByText('Your location').click();
await page.waitForRequest(/.*preview\/pwa/);
await page.screenshot({ path: 'colosseum-iphone.png' });
});
```
#### Evaluate in browser context
This code snippet navigates to example.com, and executes a script in the page context.
```TypeScript
import { test } from '@playwright/test';
test('Evaluate in browser context', async ({ page }) => {
await page.goto('https://www.example.com/');
const dimensions = await page.evaluate(() => {
return {
width: document.documentElement.clientWidth,
height: document.documentElement.clientHeight,
deviceScaleFactor: window.devicePixelRatio
}
});
console.log(dimensions);
});
```
#### Intercept network requests
This code snippet sets up request routing for a page to log all network requests.
```TypeScript
import { test } from '@playwright/test';
test('Intercept network requests', async ({ page }) => {
// Log and continue all network requests
await page.route('**', route => {
console.log(route.request().url());
route.continue();
});
await page.goto('http://todomvc.com');
});
```
## Resources
* [Documentation](https://playwright.dev)
* [API reference](https://playwright.dev/docs/api/class-playwright/)
* [Contribution guide](CONTRIBUTING.md)
* [Changelog](https://github.com/microsoft/playwright/releases)

19
mcps/playwright_mcp/node_modules/@playwright/test/cli.js generated vendored Executable file
View File

@ -0,0 +1,19 @@
#!/usr/bin/env node
/**
* Copyright (c) Microsoft Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
const { program } = require('playwright/lib/program');
program.parse(process.argv);

View File

@ -0,0 +1,18 @@
/**
* Copyright (c) Microsoft Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export * from 'playwright/test';
export { default } from 'playwright/test';

View File

@ -0,0 +1,17 @@
/**
* Copyright (c) Microsoft Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module.exports = require('playwright/test');

View File

@ -0,0 +1,18 @@
/**
* Copyright (c) Microsoft Corporation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
export * from 'playwright/test';
export { default } from 'playwright/test';

View File

@ -0,0 +1 @@
../playwright/cli.js

View File

@ -0,0 +1 @@
../playwright-core/cli.js

View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Portions Copyright (c) Microsoft Corporation.
Portions Copyright 2017 Google Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,5 @@
Playwright
Copyright (c) Microsoft Corporation
This software contains code derived from the Puppeteer project (https://github.com/puppeteer/puppeteer),
available under the Apache 2.0 license (https://github.com/puppeteer/puppeteer/blob/master/LICENSE).

View File

@ -0,0 +1,3 @@
# playwright-core
This package contains the no-browser flavor of [Playwright](http://github.com/microsoft/playwright).

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,5 @@
$osInfo = Get-WmiObject -Class Win32_OperatingSystem
# check if running on Windows Server
if ($osInfo.ProductType -eq 3) {
Install-WindowsFeature Server-Media-Foundation
}

View File

@ -0,0 +1,33 @@
$ErrorActionPreference = 'Stop'
# This script sets up a WSL distribution that will be used to run WebKit.
$Distribution = "playwright"
$Username = "pwuser"
$distributions = (wsl --list --quiet) -split "\r?\n"
if ($distributions -contains $Distribution) {
Write-Host "WSL distribution '$Distribution' already exists. Skipping installation."
} else {
Write-Host "Installing new WSL distribution '$Distribution'..."
$VhdSize = "10GB"
wsl --install -d Ubuntu-24.04 --name $Distribution --no-launch --vhd-size $VhdSize
wsl -d $Distribution -u root adduser --gecos GECOS --disabled-password $Username
}
$pwshDirname = (Resolve-Path -Path $PSScriptRoot).Path;
$playwrightCoreRoot = Resolve-Path (Join-Path $pwshDirname "..")
$initScript = @"
if [ ! -f "/home/$Username/node/bin/node" ]; then
mkdir -p /home/$Username/node
curl -fsSL https://nodejs.org/dist/v22.17.0/node-v22.17.0-linux-x64.tar.xz -o /home/$Username/node/node-v22.17.0-linux-x64.tar.xz
tar -xJf /home/$Username/node/node-v22.17.0-linux-x64.tar.xz -C /home/$Username/node --strip-components=1
sudo -u $Username echo 'export PATH=/home/$Username/node/bin:\`$PATH' >> /home/$Username/.profile
fi
/home/$Username/node/bin/node cli.js install-deps webkit
sudo -u $Username PLAYWRIGHT_SKIP_BROWSER_GC=1 /home/$Username/node/bin/node cli.js install webkit
"@ -replace "\r\n", "`n"
wsl -d $Distribution --cd $playwrightCoreRoot -u root -- bash -c "$initScript"
Write-Host "Done!"

View File

@ -0,0 +1,42 @@
#!/usr/bin/env bash
set -e
set -x
if [[ $(arch) == "aarch64" ]]; then
echo "ERROR: not supported on Linux Arm64"
exit 1
fi
if [ -z "$PLAYWRIGHT_HOST_PLATFORM_OVERRIDE" ]; then
if [[ ! -f "/etc/os-release" ]]; then
echo "ERROR: cannot install on unknown linux distribution (/etc/os-release is missing)"
exit 1
fi
ID=$(bash -c 'source /etc/os-release && echo $ID')
if [[ "${ID}" != "ubuntu" && "${ID}" != "debian" ]]; then
echo "ERROR: cannot install on $ID distribution - only Ubuntu and Debian are supported"
exit 1
fi
fi
# 1. make sure to remove old beta if any.
if dpkg --get-selections | grep -q "^google-chrome-beta[[:space:]]*install$" >/dev/null; then
apt-get remove -y google-chrome-beta
fi
# 2. Update apt lists (needed to install curl and chrome dependencies)
apt-get update
# 3. Install curl to download chrome
if ! command -v curl >/dev/null; then
apt-get install -y curl
fi
# 4. download chrome beta from dl.google.com and install it.
cd /tmp
curl -O https://dl.google.com/linux/direct/google-chrome-beta_current_amd64.deb
apt-get install -y ./google-chrome-beta_current_amd64.deb
rm -rf ./google-chrome-beta_current_amd64.deb
cd -
google-chrome-beta --version

View File

@ -0,0 +1,13 @@
#!/usr/bin/env bash
set -e
set -x
rm -rf "/Applications/Google Chrome Beta.app"
cd /tmp
curl --retry 3 -o ./googlechromebeta.dmg https://dl.google.com/chrome/mac/universal/beta/googlechromebeta.dmg
hdiutil attach -nobrowse -quiet -noautofsck -noautoopen -mountpoint /Volumes/googlechromebeta.dmg ./googlechromebeta.dmg
cp -pR "/Volumes/googlechromebeta.dmg/Google Chrome Beta.app" /Applications
hdiutil detach /Volumes/googlechromebeta.dmg
rm -rf /tmp/googlechromebeta.dmg
/Applications/Google\ Chrome\ Beta.app/Contents/MacOS/Google\ Chrome\ Beta --version

View File

@ -0,0 +1,24 @@
$ErrorActionPreference = 'Stop'
$url = 'https://dl.google.com/tag/s/dl/chrome/install/beta/googlechromebetastandaloneenterprise64.msi'
Write-Host "Downloading Google Chrome Beta"
$wc = New-Object net.webclient
$msiInstaller = "$env:temp\google-chrome-beta.msi"
$wc.Downloadfile($url, $msiInstaller)
Write-Host "Installing Google Chrome Beta"
$arguments = "/i `"$msiInstaller`" /quiet"
Start-Process msiexec.exe -ArgumentList $arguments -Wait
Remove-Item $msiInstaller
$suffix = "\\Google\\Chrome Beta\\Application\\chrome.exe"
if (Test-Path "${env:ProgramFiles(x86)}$suffix") {
(Get-Item "${env:ProgramFiles(x86)}$suffix").VersionInfo
} elseif (Test-Path "${env:ProgramFiles}$suffix") {
(Get-Item "${env:ProgramFiles}$suffix").VersionInfo
} else {
Write-Host "ERROR: Failed to install Google Chrome Beta."
Write-Host "ERROR: This could be due to insufficient privileges, in which case re-running as Administrator may help."
exit 1
}

View File

@ -0,0 +1,42 @@
#!/usr/bin/env bash
set -e
set -x
if [[ $(arch) == "aarch64" ]]; then
echo "ERROR: not supported on Linux Arm64"
exit 1
fi
if [ -z "$PLAYWRIGHT_HOST_PLATFORM_OVERRIDE" ]; then
if [[ ! -f "/etc/os-release" ]]; then
echo "ERROR: cannot install on unknown linux distribution (/etc/os-release is missing)"
exit 1
fi
ID=$(bash -c 'source /etc/os-release && echo $ID')
if [[ "${ID}" != "ubuntu" && "${ID}" != "debian" ]]; then
echo "ERROR: cannot install on $ID distribution - only Ubuntu and Debian are supported"
exit 1
fi
fi
# 1. make sure to remove old stable if any.
if dpkg --get-selections | grep -q "^google-chrome[[:space:]]*install$" >/dev/null; then
apt-get remove -y google-chrome
fi
# 2. Update apt lists (needed to install curl and chrome dependencies)
apt-get update
# 3. Install curl to download chrome
if ! command -v curl >/dev/null; then
apt-get install -y curl
fi
# 4. download chrome stable from dl.google.com and install it.
cd /tmp
curl -O https://dl.google.com/linux/direct/google-chrome-stable_current_amd64.deb
apt-get install -y ./google-chrome-stable_current_amd64.deb
rm -rf ./google-chrome-stable_current_amd64.deb
cd -
google-chrome --version

View File

@ -0,0 +1,12 @@
#!/usr/bin/env bash
set -e
set -x
rm -rf "/Applications/Google Chrome.app"
cd /tmp
curl --retry 3 -o ./googlechrome.dmg https://dl.google.com/chrome/mac/universal/stable/GGRO/googlechrome.dmg
hdiutil attach -nobrowse -quiet -noautofsck -noautoopen -mountpoint /Volumes/googlechrome.dmg ./googlechrome.dmg
cp -pR "/Volumes/googlechrome.dmg/Google Chrome.app" /Applications
hdiutil detach /Volumes/googlechrome.dmg
rm -rf /tmp/googlechrome.dmg
/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --version

View File

@ -0,0 +1,24 @@
$ErrorActionPreference = 'Stop'
$url = 'https://dl.google.com/tag/s/dl/chrome/install/googlechromestandaloneenterprise64.msi'
$wc = New-Object net.webclient
$msiInstaller = "$env:temp\google-chrome.msi"
Write-Host "Downloading Google Chrome"
$wc.Downloadfile($url, $msiInstaller)
Write-Host "Installing Google Chrome"
$arguments = "/i `"$msiInstaller`" /quiet"
Start-Process msiexec.exe -ArgumentList $arguments -Wait
Remove-Item $msiInstaller
$suffix = "\\Google\\Chrome\\Application\\chrome.exe"
if (Test-Path "${env:ProgramFiles(x86)}$suffix") {
(Get-Item "${env:ProgramFiles(x86)}$suffix").VersionInfo
} elseif (Test-Path "${env:ProgramFiles}$suffix") {
(Get-Item "${env:ProgramFiles}$suffix").VersionInfo
} else {
Write-Host "ERROR: Failed to install Google Chrome."
Write-Host "ERROR: This could be due to insufficient privileges, in which case re-running as Administrator may help."
exit 1
}

View File

@ -0,0 +1,48 @@
#!/usr/bin/env bash
set -e
set -x
if [[ $(arch) == "aarch64" ]]; then
echo "ERROR: not supported on Linux Arm64"
exit 1
fi
if [ -z "$PLAYWRIGHT_HOST_PLATFORM_OVERRIDE" ]; then
if [[ ! -f "/etc/os-release" ]]; then
echo "ERROR: cannot install on unknown linux distribution (/etc/os-release is missing)"
exit 1
fi
ID=$(bash -c 'source /etc/os-release && echo $ID')
if [[ "${ID}" != "ubuntu" && "${ID}" != "debian" ]]; then
echo "ERROR: cannot install on $ID distribution - only Ubuntu and Debian are supported"
exit 1
fi
fi
# 1. make sure to remove old beta if any.
if dpkg --get-selections | grep -q "^microsoft-edge-beta[[:space:]]*install$" >/dev/null; then
apt-get remove -y microsoft-edge-beta
fi
# 2. Install curl to download Microsoft gpg key
if ! command -v curl >/dev/null; then
apt-get update
apt-get install -y curl
fi
# GnuPG is not preinstalled in slim images
if ! command -v gpg >/dev/null; then
apt-get update
apt-get install -y gpg
fi
# 3. Add the GPG key, the apt repo, update the apt cache, and install the package
curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /tmp/microsoft.gpg
install -o root -g root -m 644 /tmp/microsoft.gpg /etc/apt/trusted.gpg.d/
sh -c 'echo "deb [arch=amd64] https://packages.microsoft.com/repos/edge stable main" > /etc/apt/sources.list.d/microsoft-edge-dev.list'
rm /tmp/microsoft.gpg
apt-get update && apt-get install -y microsoft-edge-beta
microsoft-edge-beta --version

View File

@ -0,0 +1,11 @@
#!/usr/bin/env bash
set -e
set -x
cd /tmp
curl --retry 3 -o ./msedge_beta.pkg "$1"
# Note: there's no way to uninstall previously installed MSEdge.
# However, running PKG again seems to update installation.
sudo installer -pkg /tmp/msedge_beta.pkg -target /
rm -rf /tmp/msedge_beta.pkg
/Applications/Microsoft\ Edge\ Beta.app/Contents/MacOS/Microsoft\ Edge\ Beta --version

View File

@ -0,0 +1,23 @@
$ErrorActionPreference = 'Stop'
$url = $args[0]
Write-Host "Downloading Microsoft Edge Beta"
$wc = New-Object net.webclient
$msiInstaller = "$env:temp\microsoft-edge-beta.msi"
$wc.Downloadfile($url, $msiInstaller)
Write-Host "Installing Microsoft Edge Beta"
$arguments = "/i `"$msiInstaller`" /quiet"
Start-Process msiexec.exe -ArgumentList $arguments -Wait
Remove-Item $msiInstaller
$suffix = "\\Microsoft\\Edge Beta\\Application\\msedge.exe"
if (Test-Path "${env:ProgramFiles(x86)}$suffix") {
(Get-Item "${env:ProgramFiles(x86)}$suffix").VersionInfo
} elseif (Test-Path "${env:ProgramFiles}$suffix") {
(Get-Item "${env:ProgramFiles}$suffix").VersionInfo
} else {
Write-Host "ERROR: Failed to install Microsoft Edge Beta."
Write-Host "ERROR: This could be due to insufficient privileges, in which case re-running as Administrator may help."
exit 1
}

View File

@ -0,0 +1,48 @@
#!/usr/bin/env bash
set -e
set -x
if [[ $(arch) == "aarch64" ]]; then
echo "ERROR: not supported on Linux Arm64"
exit 1
fi
if [ -z "$PLAYWRIGHT_HOST_PLATFORM_OVERRIDE" ]; then
if [[ ! -f "/etc/os-release" ]]; then
echo "ERROR: cannot install on unknown linux distribution (/etc/os-release is missing)"
exit 1
fi
ID=$(bash -c 'source /etc/os-release && echo $ID')
if [[ "${ID}" != "ubuntu" && "${ID}" != "debian" ]]; then
echo "ERROR: cannot install on $ID distribution - only Ubuntu and Debian are supported"
exit 1
fi
fi
# 1. make sure to remove old dev if any.
if dpkg --get-selections | grep -q "^microsoft-edge-dev[[:space:]]*install$" >/dev/null; then
apt-get remove -y microsoft-edge-dev
fi
# 2. Install curl to download Microsoft gpg key
if ! command -v curl >/dev/null; then
apt-get update
apt-get install -y curl
fi
# GnuPG is not preinstalled in slim images
if ! command -v gpg >/dev/null; then
apt-get update
apt-get install -y gpg
fi
# 3. Add the GPG key, the apt repo, update the apt cache, and install the package
curl https://packages.microsoft.com/keys/microsoft.asc | gpg --dearmor > /tmp/microsoft.gpg
install -o root -g root -m 644 /tmp/microsoft.gpg /etc/apt/trusted.gpg.d/
sh -c 'echo "deb [arch=amd64] https://packages.microsoft.com/repos/edge stable main" > /etc/apt/sources.list.d/microsoft-edge-dev.list'
rm /tmp/microsoft.gpg
apt-get update && apt-get install -y microsoft-edge-dev
microsoft-edge-dev --version

View File

@ -0,0 +1,11 @@
#!/usr/bin/env bash
set -e
set -x
cd /tmp
curl --retry 3 -o ./msedge_dev.pkg "$1"
# Note: there's no way to uninstall previously installed MSEdge.
# However, running PKG again seems to update installation.
sudo installer -pkg /tmp/msedge_dev.pkg -target /
rm -rf /tmp/msedge_dev.pkg
/Applications/Microsoft\ Edge\ Dev.app/Contents/MacOS/Microsoft\ Edge\ Dev --version

Some files were not shown because too many files have changed in this diff Show More