Loading...
Loading...
Developer Resource
Copy-paste-ready Python snippets for reading ComfyUI workflow metadata from PNG files. Extract prompts, find models, list LoRAs, and more.
Parse the raw tEXt and zTXt chunks from a ComfyUI-generated PNG. This is where the workflow and prompt data live.
import struct
import zlib
def read_png_text_chunks(filepath: str) -> dict[str, str]:
"""Read all tEXt and zTXt chunks from a PNG file."""
chunks: dict[str, str] = {}
with open(filepath, "rb") as f:
# Skip PNG signature (8 bytes)
f.read(8)
while True:
# Each chunk: [4-byte length] [4-byte type] [data] [4-byte CRC]
raw = f.read(4)
if len(raw) < 4:
break
length = struct.unpack(">I", raw)[0]
chunk_type = f.read(4).decode("ascii")
data = f.read(length)
f.read(4) # skip CRC
if chunk_type == "tEXt":
# Keyword and value separated by null byte
null_pos = data.index(b"\x00")
key = data[:null_pos].decode("latin-1")
value = data[null_pos + 1 :].decode("latin-1")
chunks[key] = value
elif chunk_type == "zTXt":
# Keyword, null byte, compression method, compressed data
null_pos = data.index(b"\x00")
key = data[:null_pos].decode("latin-1")
# Skip compression method byte (always 0 = deflate)
compressed = data[null_pos + 2 :]
value = zlib.decompress(compressed).decode("utf-8")
chunks[key] = value
elif chunk_type == "IEND":
break
return chunks
# Usage
chunks = read_png_text_chunks("ComfyUI_00042_.png")
print(list(chunks.keys()))
# Typical output: ['workflow', 'prompt']Note: ComfyUI stores metadata in standard PNG text chunks. The "workflow" chunk contains the full node graph; the "prompt" chunk contains the serialized execution data.
Numonic: Numonic automatically extracts and indexes these chunks on upload — no manual parsing required.
The "workflow" chunk is a JSON string describing the full node graph. Parse it to access nodes, links, and settings.
import json
def parse_workflow(chunks: dict[str, str]) -> dict | None:
"""Parse the workflow JSON from PNG text chunks."""
raw = chunks.get("workflow")
if not raw:
return None
workflow = json.loads(raw)
# Top-level keys:
# - "nodes": list of node objects
# - "links": list of connection arrays
# - "groups": visual groups (cosmetic only)
# - "config": graph configuration
# - "extra": workspace metadata
# - "version": ComfyUI version (e.g. 0.4)
print(f"Nodes: {len(workflow.get('nodes', []))}")
print(f"Links: {len(workflow.get('links', []))}")
print(f"Version: {workflow.get('version')}")
return workflow
# Usage
workflow = parse_workflow(chunks)
# Nodes: 12
# Links: 15
# Version: 0.4Note: The workflow JSON describes the visual graph exactly as it appears in the ComfyUI editor. Each node has a "type" (e.g. "KSampler"), "widgets_values" (parameter settings), and connections.
Find the text prompt used for image generation by looking for CLIPTextEncode nodes.
def extract_positive_prompt(workflow: dict) -> str | None:
"""Find the positive prompt from CLIPTextEncode nodes."""
for node in workflow.get("nodes", []):
if node.get("type") != "CLIPTextEncode":
continue
# widgets_values[0] is typically the prompt text
values = node.get("widgets_values", [])
if not values:
continue
text = values[0]
if not isinstance(text, str) or not text.strip():
continue
# Heuristic: positive prompts are usually longer
# and don't start with common negative patterns
lower = text.lower()
if lower.startswith(("ugly", "bad", "worst", "low quality")):
continue # Likely a negative prompt
return text.strip()
return None
# Usage
prompt = extract_positive_prompt(workflow)
print(prompt)
# "a serene mountain landscape at golden hour, ..."Note: This simple heuristic works for basic txt2img workflows. Complex workflows with multiple CLIPTextEncode nodes (e.g. regional prompting, ControlNet conditioning) require graph traversal to determine which prompt fed into which sampler.
Numonic: Numonic resolves the full execution graph to correctly attribute prompts to samplers — even in multi-pass workflows with dozens of prompt nodes.
Pull the generation seed, sampler name, steps, and CFG scale from KSampler nodes.
def extract_sampler_settings(workflow: dict) -> list[dict]:
"""Extract settings from all KSampler nodes."""
samplers = []
for node in workflow.get("nodes", []):
node_type = node.get("type", "")
if "KSampler" not in node_type:
continue
values = node.get("widgets_values", [])
if len(values) < 7:
continue
# Standard KSampler widget order:
# [seed, control_after_generate, steps, cfg, sampler, scheduler, denoise]
samplers.append({
"node_id": node.get("id"),
"seed": values[0],
"steps": values[2],
"cfg": values[3],
"sampler": values[4],
"scheduler": values[5],
"denoise": values[6],
})
return samplers
# Usage
for s in extract_sampler_settings(workflow):
print(f"Seed: {s['seed']}, Steps: {s['steps']}, "
f"CFG: {s['cfg']}, Sampler: {s['sampler']}")
# Seed: 842395, Steps: 30, CFG: 7.5, Sampler: euler_ancestralNote: Widget value order depends on the node type. KSampler and KSamplerAdvanced have different layouts. Custom sampler nodes from community packs may have entirely different parameter orders.
Numonic: Numonic normalizes sampler parameters across all known node types — including custom community samplers — into a consistent schema you can search and filter.
Find all LoRA models referenced in the workflow, along with their strength values.
def list_loras(workflow: dict) -> list[dict]:
"""Extract LoRA references from LoraLoader nodes."""
loras = []
for node in workflow.get("nodes", []):
node_type = node.get("type", "")
if "LoraLoader" not in node_type:
continue
values = node.get("widgets_values", [])
if not values:
continue
# LoraLoader widget order:
# [lora_name, strength_model, strength_clip]
lora = {
"name": values[0] if len(values) > 0 else None,
"strength_model": values[1] if len(values) > 1 else 1.0,
"strength_clip": values[2] if len(values) > 2 else 1.0,
}
loras.append(lora)
return loras
# Usage
for lora in list_loras(workflow):
print(f"{lora['name']} "
f"(model: {lora['strength_model']}, "
f"clip: {lora['strength_clip']})")
# add_detail.safetensors (model: 0.8, clip: 0.8)
# film_grain_v1.safetensors (model: 0.5, clip: 0.5)Note: This catches standard LoraLoader nodes. Some custom node packs provide alternative LoRA loaders with different parameter layouts.
Numonic: Numonic detects LoRA references across all known loader node types and links them to your model library for tracking which LoRAs produced which outputs.
Identify which Stable Diffusion checkpoint was used for generation.
def find_checkpoints(workflow: dict) -> list[str]:
"""Find checkpoint model names from loader nodes."""
checkpoints = []
for node in workflow.get("nodes", []):
node_type = node.get("type", "")
# Common checkpoint loader types
if node_type not in (
"CheckpointLoaderSimple",
"CheckpointLoader",
"UNETLoader",
):
continue
values = node.get("widgets_values", [])
if values:
checkpoints.append(values[0])
return checkpoints
# Usage
models = find_checkpoints(workflow)
print(models)
# ['sd_xl_base_1.0.safetensors']Note: Workflows can reference multiple checkpoints (e.g. base + refiner in SDXL, or separate models for different passes). The filename alone doesn't tell you the model version or hash.
Numonic: Numonic resolves model filenames to their full identity — including version, hash, and CivitAI/HuggingFace source — so you always know exactly which model produced each image.
Quick check whether a PNG file contains ComfyUI workflow metadata.
def is_comfyui_image(filepath: str) -> bool:
"""Check if a PNG contains ComfyUI metadata."""
chunks = read_png_text_chunks(filepath) # from snippet #1
# ComfyUI embeds "workflow" and/or "prompt" chunks
return "workflow" in chunks or "prompt" in chunks
# Usage
if is_comfyui_image("mystery_image.png"):
print("This image was generated with ComfyUI")
else:
print("No ComfyUI metadata found")Note: This detects images with intact metadata. Images shared on social media, messaging apps, or certain cloud services may have their PNG chunks stripped during upload.
Numonic: Numonic identifies AI-generated images from multiple tools — not just ComfyUI — using advanced detection that goes far beyond chunk presence checks.
Extract the output resolution from EmptyLatentImage or similar sizing nodes.
def get_image_dimensions(workflow: dict) -> dict | None:
"""Extract image dimensions from EmptyLatentImage node."""
for node in workflow.get("nodes", []):
if node.get("type") != "EmptyLatentImage":
continue
values = node.get("widgets_values", [])
if len(values) >= 3:
return {
"width": values[0],
"height": values[1],
"batch_size": values[2],
}
return None
# Usage
dims = get_image_dimensions(workflow)
if dims:
print(f"{dims['width']}x{dims['height']} "
f"(batch: {dims['batch_size']})")
# 1024x1024 (batch: 1)Note: This works for txt2img workflows. For img2img, the dimensions come from the input image loader node instead. Upscaler workflows may have multiple resolution stages.
These snippets handle basic cases. Real workflows are messy — custom nodes, multi-pass pipelines, regional prompting, community sampler packs. Numonic handles all of it automatically so you can focus on building, not parsing.
License: All code snippets on this page are released under the MIT License. You are free to use, copy, modify, and distribute them in personal and commercial projects without attribution.
Disclaimer: These snippets are provided "as is" without warranty of any kind, express or implied. They are educational examples for common ComfyUI metadata operations. Numonic Labs Ltd and its contributors are not liable for any damages arising from their use. Always test thoroughly in your own environment before deploying to production.