#
Python Integration Guide
This page provides a complete, ready-to-use Python script for integrating the LeapAI CXR API into your third-party application. The script demonstrates authentication, single inference, and bulk inference.
#
Prerequisites
Install the required Python package:
pip install requests
#
Configuration
Update the following constants in the script with your environment details:
BASE_URL = "http://<server-ip>:8500"
TOKEN = "your_pre_issued_jwt_token"
#
Complete Integration Script
"""
LeapAI CXR — Python Integration Script
========================================
A complete reference implementation for interacting with
the LeapAI CXR AI inference API.
Usage:
python leapai_cxr_client.py
Requirements:
pip install requests
"""
import os
import json
import requests
from typing import Optional
# =============================================================================
# Configuration
# =============================================================================
BASE_URL = "http://localhost:8500" # LeapAI CXR server URL
TOKEN = "your_pre_issued_jwt_token" # JWT token provided post-installation
# =============================================================================
# Health Check
# =============================================================================
def health_check(base_url: str) -> bool:
"""
Check if the LeapAI CXR server is running and responsive.
Args:
base_url: Base URL of the LeapAI CXR server
Returns:
True if server is healthy, False otherwise.
"""
try:
response = requests.get(f"{base_url}/ping")
if response.status_code == 200:
data = response.json()
print(f"[✓] Server is healthy: {data}")
return True
else:
print(f"[✗] Server returned status {response.status_code}")
return False
except requests.exceptions.ConnectionError:
print(f"[✗] Server is not reachable at {base_url}")
return False
# =============================================================================
# Single Inference
# =============================================================================
def run_single_inference(
base_url: str,
token: str,
input_image_path: str,
output_folder_path: str
) -> dict:
"""
Run AI inference on a single DICOM image.
This will generate the following output files in a sub-folder
named by the Patient ID:
- <patient_id>_predictions.json
- <patient_id>_heatmap.png
- <patient_id>_source_image.png
- <patient_id>_report.pdf
Args:
base_url: Base URL of the LeapAI CXR server
token: JWT authentication token
input_image_path: Absolute path to a .dcm DICOM file on the server
output_folder_path: Absolute path to the output directory on the server
Returns:
API response as a dictionary.
"""
url = f"{base_url}/single_inference"
payload = {
"input_image_path": input_image_path,
"output_folder_path": output_folder_path
}
headers = {
"Content-Type": "application/json",
"Authorization": token # NOTE: No "Bearer" prefix
}
try:
response = requests.post(url, json=payload, headers=headers, timeout=300)
response_data = response.json()
if response.status_code == 200 and response_data.get("Result") == "Success":
print(f"[✓] Single inference completed successfully.")
else:
print(f"[✗] Single inference failed: {response_data.get('Message')}")
return response_data
except requests.exceptions.Timeout:
print("[✗] Request timed out. The image may be too large.")
return {"Result": "Failure", "Message": "Request timeout"}
except Exception as e:
print(f"[✗] Single inference error: {e}")
return {"Result": "Failure", "Message": str(e)}
# =============================================================================
# Bulk Inference
# =============================================================================
def run_bulk_inference(
base_url: str,
token: str,
input_folder_path: str,
output_folder_path: str,
timeout: int = 3600
) -> dict:
"""
Run AI inference on all DICOM images in a folder.
The engine scans the input folder recursively for .dcm files and
processes each one, saving results per patient.
Args:
base_url: Base URL of the LeapAI CXR server
token: JWT authentication token
input_folder_path: Absolute path to folder containing .dcm files
output_folder_path: Absolute path to the output directory
timeout: Request timeout in seconds (default: 3600 for large batches)
Returns:
API response as a dictionary.
"""
url = f"{base_url}/bulk_inference"
payload = {
"input_folder_path": input_folder_path,
"output_folder_path": output_folder_path
}
headers = {
"Content-Type": "application/json",
"Authorization": token # NOTE: No "Bearer" prefix
}
try:
print(f"[…] Starting bulk inference on: {input_folder_path}")
print(f" Output will be saved to: {output_folder_path}")
print(f" Timeout: {timeout}s. This may take a while...")
response = requests.post(url, json=payload, headers=headers, timeout=timeout)
response_data = response.json()
if response.status_code == 200 and response_data.get("Result") == "Success":
print(f"[✓] Bulk inference completed successfully.")
else:
print(f"[✗] Bulk inference failed: {response_data.get('Message')}")
return response_data
except requests.exceptions.Timeout:
print("[✗] Request timed out. Consider increasing the timeout for large batches.")
return {"Result": "Failure", "Message": "Request timeout"}
except Exception as e:
print(f"[✗] Bulk inference error: {e}")
return {"Result": "Failure", "Message": str(e)}
# =============================================================================
# Read Prediction Results
# =============================================================================
def read_prediction_json(json_path: str) -> Optional[dict]:
"""
Read and parse a prediction JSON file generated by the inference engine.
Args:
json_path: Absolute path to a *_predictions.json file.
Returns:
Parsed prediction dictionary or None.
"""
try:
with open(json_path, "r") as f:
data = json.load(f)
print(f"\n{'='*60}")
print(f" Prediction Results: {os.path.basename(json_path)}")
print(f"{'='*60}")
print(f" Overall Result : {data.get('result', 'N/A')}")
print(f" Patient ID : {data.get('metadata', {}).get('patient_id', 'N/A')}")
print(f" Patient Name : {data.get('metadata', {}).get('patient_name', 'N/A')}")
print(f" Model Version : {data.get('metadata', {}).get('model_version', 'N/A')}")
print(f"{'='*60}")
print(f" {'Disease':<25} {'Detected':<10} {'Confidence':<12}")
print(f" {'-'*47}")
for finding in data.get("findings", []):
name = finding.get("name", "Unknown")
presence = "YES" if finding.get("presence") else "No"
confidence = finding.get("confidence", 0.0)
marker = "⚠️ " if finding.get("presence") else " "
print(f" {marker}{name:<23} {presence:<10} {confidence:.4f}")
print(f"{'='*60}")
print(f" Heatmap : {data.get('heatmap', 'N/A')}")
print(f" Source Image : {data.get('source_image', 'N/A')}")
print(f"{'='*60}\n")
return data
except FileNotFoundError:
print(f"[✗] File not found: {json_path}")
return None
except json.JSONDecodeError:
print(f"[✗] Invalid JSON: {json_path}")
return None
# =============================================================================
# Main — Example Usage
# =============================================================================
def main():
"""
Example workflow demonstrating the full LeapAI CXR integration:
1. Health check
2. Run single inference
3. Run bulk inference (optional)
4. Parse prediction results
"""
print("\n" + "=" * 60)
print(" LeapAI CXR — Python Integration Example")
print("=" * 60 + "\n")
# ---- Step 1: Health Check ----
print("[Step 1] Checking server health...")
if not health_check(BASE_URL):
print("Server is not available. Exiting.")
return
# ---- Step 2: Single Inference ----
print("\n[Step 2] Running single inference...")
single_result = run_single_inference(
base_url=BASE_URL,
token=TOKEN,
input_image_path="/path/to/dicom/image.dcm", # <-- Update this path
output_folder_path="/path/to/output/" # <-- Update this path
)
print(f" Response: {json.dumps(single_result, indent=2)}")
# ---- Step 3: Bulk Inference (Optional) ----
print("\n[Step 3] Running bulk inference...")
bulk_result = run_bulk_inference(
base_url=BASE_URL,
token=TOKEN,
input_folder_path="/path/to/dicom/folder/", # <-- Update this path
output_folder_path="/path/to/output/bulk/", # <-- Update this path
timeout=3600
)
print(f" Response: {json.dumps(bulk_result, indent=2)}")
# ---- Step 4: Read Prediction Results ----
print("\n[Step 4] Reading prediction results...")
# After inference, read the generated prediction JSON
prediction_file = "/path/to/output/<patient_id>/<patient_id>_predictions.json"
read_prediction_json(prediction_file) # <-- Update this path
if __name__ == "__main__":
main()
#
Function Reference
#
Key Notes
Authorization Header
The Authorization header must contain the raw JWT token — do not prefix it with Bearer.
# ✓ Correct
headers = {"Authorization": "eyJhbGciOiJIUzI1NiIsIn..."}
# ✗ Incorrect
headers = {"Authorization": "Bearer eyJhbGciOiJIUzI1NiIsIn..."}
Timeout for Bulk Inference
Bulk inference can take a long time depending on the number of DICOM files. The default timeout is set to 3600 seconds (1 hour). Adjust the timeout parameter as needed for your dataset size.
All file paths must be absolute paths on the server where LeapAI CXR is running. If your client application is on a different machine, the paths refer to locations on the server's filesystem, not the client.