This commit is contained in:
2026-01-06 21:02:41 +00:00
parent 70e02704b3
commit c48b9cb1a4
716 changed files with 63900 additions and 72021 deletions

6
MaterialX/python/Scripts/README.md Normal file → Executable file
View File

@@ -1,3 +1,3 @@
# Python Code Examples
This folder contains example Python scripts that generate, process, and validate material content using the MaterialX API.
# Python Code Examples
This folder contains example Python scripts that generate, process, and validate material content using the MaterialX API.

145
MaterialX/python/Scripts/baketextures.py Normal file → Executable file
View File

@@ -1,73 +1,72 @@
#!/usr/bin/env python
'''
Generate a baked version of each material in the input document, using the TextureBaker class in the MaterialXRenderGlsl library.
'''
import sys, os, argparse
from sys import platform
import MaterialX as mx
from MaterialX import PyMaterialXRender as mx_render
from MaterialX import PyMaterialXRenderGlsl as mx_render_glsl
if platform == "darwin":
from MaterialX import PyMaterialXRenderMsl as mx_render_msl
def main():
parser = argparse.ArgumentParser(description="Generate a baked version of each material in the input document.")
parser.add_argument("--width", dest="width", type=int, default=1024, help="Specify the width of baked textures.")
parser.add_argument("--height", dest="height", type=int, default=1024, help="Specify the height of baked textures.")
parser.add_argument("--hdr", dest="hdr", action="store_true", help="Save images to hdr format.")
parser.add_argument("--average", dest="average", action="store_true", help="Average baked images to generate constant values.")
parser.add_argument("--path", dest="paths", action='append', nargs='+', help="An additional absolute search path location (e.g. '/projects/MaterialX')")
parser.add_argument("--library", dest="libraries", action='append', nargs='+', help="An additional relative path to a custom data library folder (e.g. 'libraries/custom')")
parser.add_argument('--writeDocumentPerMaterial', dest='writeDocumentPerMaterial', type=mx.stringToBoolean, default=True, help='Specify whether to write baked materials to separate MaterialX documents. Default is True')
if platform == "darwin":
parser.add_argument("--glsl", dest="useGlslBackend", default=False, type=bool, help="Set to True to use GLSL backend (default = Metal).")
parser.add_argument(dest="inputFilename", help="Filename of the input document.")
parser.add_argument(dest="outputFilename", help="Filename of the output document.")
opts = parser.parse_args()
# Load standard and custom data libraries.
stdlib = mx.createDocument()
searchPath = mx.getDefaultDataSearchPath()
searchPath.append(os.path.dirname(opts.inputFilename))
libraryFolders = []
if opts.paths:
for pathList in opts.paths:
for path in pathList:
searchPath.append(path)
if opts.libraries:
for libraryList in opts.libraries:
for library in libraryList:
libraryFolders.append(library)
libraryFolders.extend(mx.getDefaultDataLibraryFolders())
mx.loadLibraries(libraryFolders, searchPath, stdlib)
# Read and validate the source document.
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, opts.inputFilename)
doc.setDataLibrary(stdlib)
except mx.ExceptionFileMissing as err:
print(err)
sys.exit(0)
valid, msg = doc.validate()
if not valid:
print("Validation warnings for input document:")
print(msg)
# Construct the texture baker.
baseType = mx_render.BaseType.FLOAT if opts.hdr else mx_render.BaseType.UINT8
if platform == "darwin" and not opts.useGlslBackend:
baker = mx_render_msl.TextureBaker.create(opts.width, opts.height, baseType)
else:
baker = mx_render_glsl.TextureBaker.create(opts.width, opts.height, baseType)
# Bake materials to textures.
if opts.average:
baker.setAverageImages(True)
baker.writeDocumentPerMaterial(opts.writeDocumentPerMaterial)
baker.bakeAllMaterials(doc, searchPath, opts.outputFilename)
if __name__ == '__main__':
main()
#!/usr/bin/env python
'''
Generate a baked version of each material in the input document, using the TextureBaker class in the MaterialXRenderGlsl library.
'''
import sys, os, argparse
from sys import platform
import MaterialX as mx
from MaterialX import PyMaterialXRender as mx_render
from MaterialX import PyMaterialXRenderGlsl as mx_render_glsl
if platform == "darwin":
from MaterialX import PyMaterialXRenderMsl as mx_render_msl
def main():
parser = argparse.ArgumentParser(description="Generate a baked version of each material in the input document.")
parser.add_argument("--width", dest="width", type=int, default=1024, help="Specify the width of baked textures.")
parser.add_argument("--height", dest="height", type=int, default=1024, help="Specify the height of baked textures.")
parser.add_argument("--hdr", dest="hdr", action="store_true", help="Save images to hdr format.")
parser.add_argument("--average", dest="average", action="store_true", help="Average baked images to generate constant values.")
parser.add_argument("--path", dest="paths", action='append', nargs='+', help="An additional absolute search path location (e.g. '/projects/MaterialX')")
parser.add_argument("--library", dest="libraries", action='append', nargs='+', help="An additional relative path to a custom data library folder (e.g. 'libraries/custom')")
parser.add_argument('--writeDocumentPerMaterial', dest='writeDocumentPerMaterial', type=mx.stringToBoolean, default=True, help='Specify whether to write baked materials to seprate MaterialX documents. Default is True')
if platform == "darwin":
parser.add_argument("--glsl", dest="useGlslBackend", default=False, type=bool, help="Set to True to use GLSL backend (default = Metal).")
parser.add_argument(dest="inputFilename", help="Filename of the input document.")
parser.add_argument(dest="outputFilename", help="Filename of the output document.")
opts = parser.parse_args()
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, opts.inputFilename)
except mx.ExceptionFileMissing as err:
print(err)
sys.exit(0)
stdlib = mx.createDocument()
searchPath = mx.getDefaultDataSearchPath()
searchPath.append(os.path.dirname(opts.inputFilename))
libraryFolders = []
if opts.paths:
for pathList in opts.paths:
for path in pathList:
searchPath.append(path)
if opts.libraries:
for libraryList in opts.libraries:
for library in libraryList:
libraryFolders.append(library)
libraryFolders.extend(mx.getDefaultDataLibraryFolders())
mx.loadLibraries(libraryFolders, searchPath, stdlib)
doc.importLibrary(stdlib)
valid, msg = doc.validate()
if not valid:
print("Validation warnings for input document:")
print(msg)
baseType = mx_render.BaseType.FLOAT if opts.hdr else mx_render.BaseType.UINT8
if platform == "darwin" and not opts.useGlslBackend:
baker = mx_render_msl.TextureBaker.create(opts.width, opts.height, baseType)
else:
baker = mx_render_glsl.TextureBaker.create(opts.width, opts.height, baseType)
if opts.average:
baker.setAverageImages(True)
baker.writeDocumentPerMaterial(opts.writeDocumentPerMaterial)
baker.bakeAllMaterials(doc, searchPath, opts.outputFilename)
if __name__ == '__main__':
main()

View File

@@ -1,923 +0,0 @@
#!/usr/bin/env python
'''
Compare node definitions between a specification Markdown document and a
data library MaterialX document.
Report any differences between the two in their supported node sets, typed
node signatures, and default values.
'''
import argparse
import re
from dataclasses import dataclass, field
from enum import Enum
from itertools import product
from pathlib import Path
import MaterialX as mx
# -----------------------------------------------------------------------------
# Type System
# -----------------------------------------------------------------------------
def loadStandardLibraries():
'''Load and return the standard MaterialX libraries as a document.'''
stdlib = mx.createDocument()
mx.loadLibraries(mx.getDefaultDataLibraryFolders(), mx.getDefaultDataSearchPath(), stdlib)
return stdlib
def getStandardTypes(stdlib):
'''Extract the set of standard type names from library TypeDefs.'''
return {td.getName() for td in stdlib.getTypeDefs()}
def buildTypeGroups(stdlib):
'''
Build type groups from standard library TypeDefs.
Derives colorN, vectorN, matrixNN groups from type naming patterns.
'''
groups = {}
for td in stdlib.getTypeDefs():
name = td.getName()
# Match colorN, vectorN patterns (color3, vector2, etc.)
match = re.match(r'^(color|vector)(\d)$', name)
if match:
groupName = f'{match.group(1)}N'
groups.setdefault(groupName, set()).add(name)
continue
# Match matrixNN pattern (matrix33, matrix44)
match = re.match(r'^matrix(\d)\1$', name)
if match:
groups.setdefault('matrixNN', set()).add(name)
return groups
def buildTypeGroupVariables(typeGroups):
'''Build type group variables (e.g., colorM from colorN) for "must differ" constraints.'''
variables = {}
for groupName in typeGroups:
if groupName.endswith('N') and not groupName.endswith('NN'):
variantName = groupName[:-1] + 'M'
variables[variantName] = groupName
return variables
def parseSpecTypes(typeStr):
'''
Parse a specification type string into (types, typeRef).
Supported patterns:
- Simple types: "float", "color3"
- Comma-separated: "float, color3"
- Union with "or": "BSDF or VDF", "BSDF, EDF, or VDF"
- Type references: "Same as bg", "Same as in1 or float"
'''
if typeStr is None or not typeStr.strip():
return set(), None
typeStr = typeStr.strip()
# Handle "Same as X" and "Same as X or Y" references
sameAsMatch = re.match(r'^Same as\s+`?(\w+)`?(?:\s+or\s+(.+))?$', typeStr, re.IGNORECASE)
if sameAsMatch:
refPort = sameAsMatch.group(1)
extraTypes = sameAsMatch.group(2)
extraSet = set()
if extraTypes:
extraSet, _ = parseSpecTypes(extraTypes)
return extraSet, refPort
# Normalize "or" to comma: "X or Y" -> "X, Y", "X, Y, or Z" -> "X, Y, Z"
normalized = re.sub(r',?\s+or\s+', ', ', typeStr)
result = set()
for t in normalized.split(','):
t = t.strip()
if t:
result.add(t)
return result, None
def expandTypeSet(types, typeGroups, typeGroupVariables):
'''Expand type groups to concrete types. Returns list of (concreteType, groupName) tuples.'''
result = []
for t in types:
if t in typeGroups:
for concrete in typeGroups[t]:
result.append((concrete, t))
elif t in typeGroupVariables:
baseGroup = typeGroupVariables[t]
for concrete in typeGroups[baseGroup]:
result.append((concrete, t))
else:
result.append((t, None))
return result
# -----------------------------------------------------------------------------
# Data Classes
# -----------------------------------------------------------------------------
class MatchType(Enum):
'''Types of signature matches between spec and library.'''
EXACT = 'exact' # Identical inputs and outputs
DIFFERENT_INPUTS = 'different_inputs' # Same outputs but different inputs
class DiffType(Enum):
'''Categories of differences between spec and library, with display labels.'''
# Invalid specification entries
SPEC_COLUMN_MISMATCH = 'Column Count Mismatches in Specification'
SPEC_EMPTY_PORT_NAME = 'Empty Port Names in Specification'
SPEC_UNRECOGNIZED_TYPE = 'Unrecognized Types in Specification'
# Node-level differences
NODE_MISSING_IN_LIBRARY = 'Nodes in Specification but not Data Library'
NODE_MISSING_IN_SPEC = 'Nodes in Data Library but not Specification'
# Signature-level differences
SIGNATURE_DIFFERENT_INPUTS = 'Nodes with Different Input Sets'
SIGNATURE_MISSING_IN_LIBRARY = 'Node Signatures in Specification but not Data Library'
SIGNATURE_MISSING_IN_SPEC = 'Node Signatures in Data Library but not Specification'
# Default value differences
DEFAULT_MISMATCH = 'Default Value Mismatches'
@dataclass
class PortInfo:
'''Information about an input or output port from the specification.'''
name: str
types: set = field(default_factory=set)
typeRef: str = None # For "Same as X" references
default: str = None # Spec default string (before type-specific expansion)
@dataclass(frozen=True)
class NodeSignature:
'''A typed combination of inputs and outputs, corresponding to one nodedef.'''
inputs: tuple # ((name, type), ...) sorted for hashing
outputs: tuple # ((name, type), ...) sorted for hashing
_displayInputs: tuple = None
_displayOutputs: tuple = None
@classmethod
def create(cls, inputs, outputs):
'''Create a NodeSignature from input/output dicts of name -> type.'''
return cls(
inputs=tuple(sorted(inputs.items())),
outputs=tuple(sorted(outputs.items())),
_displayInputs=tuple(inputs.items()),
_displayOutputs=tuple(outputs.items()),
)
def __hash__(self):
return hash((self.inputs, self.outputs))
def __eq__(self, other):
if not isinstance(other, NodeSignature):
return False
return self.inputs == other.inputs and self.outputs == other.outputs
def __str__(self):
insStr = ', '.join(f'{n}:{t}' for n, t in self._displayInputs)
outsStr = ', '.join(f'{n}:{t}' for n, t in self._displayOutputs)
return f'({insStr}) -> {outsStr}'
@dataclass
class NodeInfo:
'''A node and its supported signatures.'''
name: str
signatures: set = field(default_factory=set)
_specInputs: dict = field(default_factory=dict) # For default value comparison
@dataclass
class Difference:
'''A difference found between spec and data library.'''
diffType: DiffType
node: str
port: str = None
signature: NodeSignature = None
extraInLib: tuple = None
extraInSpec: tuple = None
valueType: str = None
specDefault: str = None
libDefault: str = None
def formatDifference(diff):
'''Format a Difference for display, returning a list of lines.'''
# Default mismatch
if diff.diffType == DiffType.DEFAULT_MISMATCH:
return [
f' {diff.node}.{diff.port} ({diff.valueType}):',
f' Signature: {diff.signature}',
f' Spec default: {diff.specDefault}',
f' Data library default: {diff.libDefault}',
]
# Different input sets
if diff.diffType == DiffType.SIGNATURE_DIFFERENT_INPUTS:
lines = [f' {diff.node}: {diff.signature}']
if diff.extraInLib:
extraStr = ', '.join(f'{n}:{t}' for n, t in diff.extraInLib)
lines.append(f' Extra in library: {extraStr}')
if diff.extraInSpec:
extraStr = ', '.join(f'{n}:{t}' for n, t in diff.extraInSpec)
lines.append(f' Extra in spec: {extraStr}')
return lines
# Signature mismatch (missing in spec or library)
if diff.signature:
return [f' {diff.node}: {diff.signature}']
# Spec validation error with port
if diff.port:
return [f' {diff.node}.{diff.port}']
# Node-level difference or simple spec validation error
return [f' {diff.node}']
# -----------------------------------------------------------------------------
# Default Value Utilities
# -----------------------------------------------------------------------------
def buildGeompropNames(stdlib):
'''Extract geomprop names from standard library GeomPropDefs.'''
return {gpd.getName() for gpd in stdlib.getGeomPropDefs()}
def getComponentCount(typeName):
'''Get the number of components for a MaterialX type, or None if unknown.'''
if typeName in ('float', 'integer', 'boolean'):
return 1
# Match colorN, vectorN patterns
match = re.match(r'^(color|vector)(\d)$', typeName)
if match:
return int(match.group(2))
# Match matrixNN pattern
match = re.match(r'^matrix(\d)(\d)$', typeName)
if match:
return int(match.group(1)) * int(match.group(2))
return None
def expandDefaultPlaceholder(placeholder, typeName):
'''Expand a placeholder (0, 1, 0.5) to a type-appropriate value string.'''
count = getComponentCount(typeName)
if count is None:
return None
if placeholder == '0':
if typeName == 'boolean':
return 'false'
return ', '.join(['0'] * count)
if placeholder == '1':
if typeName == 'boolean':
return 'true'
# Identity matrices, not all-ones
if typeName == 'matrix33':
return '1, 0, 0, 0, 1, 0, 0, 0, 1'
if typeName == 'matrix44':
return '1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1'
return ', '.join(['1'] * count)
if placeholder == '0.5':
if typeName in ('integer', 'boolean'):
return None # 0.5 doesn't apply to these types
return ', '.join(['0.5'] * count)
return None
def parseSpecDefault(value, specDefaultNotation):
'''Parse specification default value notation into normalized form.'''
if value is None:
return None
value = value.strip()
return specDefaultNotation.get(value, value)
def expandSpecDefaultToValue(specDefault, valueType, geompropNames):
'''Parse a spec default to a typed MaterialX value. Returns (value, isGeomprop).'''
if specDefault is None or specDefault == '':
return None, False
# Handle geomprop references - these are compared as strings, not typed values
if specDefault in geompropNames:
return specDefault, True
# Expand placeholder values to type-appropriate strings
expansion = expandDefaultPlaceholder(specDefault, valueType)
if expansion is not None:
specDefault = expansion
# Parse to typed value using MaterialX
try:
return mx.createValueFromStrings(specDefault, valueType), False
except Exception:
return None, False
def formatDefaultValue(value, valueType, geompropNames):
'''Format a default value for display using spec notation (__zero__, etc.).'''
if value is None:
return 'None'
# Handle string values (geomprops, empty strings, etc.)
if isinstance(value, str):
if value in geompropNames:
return f'_{value}_'
return '__empty__' if value == '' else value
# Check if value matches a standard constant (__zero__, __one__, __half__)
for placeholder, display in [('0', '__zero__'), ('1', '__one__'), ('0.5', '__half__')]:
expansion = expandDefaultPlaceholder(placeholder, valueType)
if expansion is None:
continue
try:
if value == mx.createValueFromStrings(expansion, valueType):
return display
except Exception:
pass
# Fall back to string representation
return str(value)
# -----------------------------------------------------------------------------
# Markdown Table Parsing
# -----------------------------------------------------------------------------
def parseMarkdownTable(lines, startIdx):
'''Parse a markdown table. Returns (rows, columnMismatchCount, endIndex).'''
table = []
headers = []
columnMismatchCount = 0
idx = startIdx
# Parse header row
if idx < len(lines) and '|' in lines[idx]:
headerLine = lines[idx].strip()
headers = [h.strip().strip('`') for h in headerLine.split('|')[1:-1]]
idx += 1
else:
return [], 0, startIdx
# Skip separator row
if idx < len(lines) and '|' in lines[idx] and '-' in lines[idx]:
idx += 1
else:
return [], 0, startIdx
# Parse data rows
while idx < len(lines):
line = lines[idx].strip()
if not line or not line.startswith('|'):
break
cells = [c.strip().strip('`') for c in line.split('|')[1:-1]]
if len(cells) == len(headers):
row = {headers[i].lower(): cells[i] for i in range(len(headers))}
table.append(row)
else:
columnMismatchCount += 1
idx += 1
return table, columnMismatchCount, idx
# -----------------------------------------------------------------------------
# Specification Document Parsing
# -----------------------------------------------------------------------------
def isValidTypeGroupAssignment(driverNames, combo, typeGroupVariables):
'''
Check if type assignments satisfy group constraints (e.g., colorN ports must
match, colorM must differ from colorN). Returns (isValid, typeAssignment).
'''
typeAssignment = {}
groupAssignments = {} # groupName -> concreteType assigned to that group
for name, (concreteType, groupName) in zip(driverNames, combo):
typeAssignment[name] = concreteType
# Skip constraint checking for None types (these will be resolved via typeRef)
if concreteType is None:
continue
if not groupName:
continue
# For group variables (colorM), get the base group (colorN)
baseGroup = typeGroupVariables.get(groupName, groupName)
isVariable = groupName in typeGroupVariables
# Check consistency: all uses of the same group must have same concrete type
if groupName in groupAssignments:
if groupAssignments[groupName] != concreteType:
return False, None
else:
groupAssignments[groupName] = concreteType
# For variables: must differ from base group if base is already assigned
if isVariable and baseGroup in groupAssignments:
if groupAssignments[baseGroup] == concreteType:
return False, None
return True, typeAssignment
def expandSpecSignatures(inputs, outputs, typeGroups, typeGroupVariables):
'''
Expand spec port definitions into concrete NodeSignatures.
Handles type groups, type group variables, and "Same as X or Y" patterns.
'''
allPorts = {**inputs, **outputs}
# Identify driver ports and their type options
# - Ports with explicit types (no typeRef): use those types
# - Ports with both types AND typeRef ("Same as X or Y"): explicit types OR inherit from typeRef
drivers = {}
for name, port in allPorts.items():
if port.types and not port.typeRef:
# Normal driver: explicit types only
drivers[name] = expandTypeSet(port.types, typeGroups, typeGroupVariables)
elif port.types and port.typeRef:
# "Same as X or Y" pattern: explicit types OR inherit from typeRef
expanded = expandTypeSet(port.types, typeGroups, typeGroupVariables)
expanded.append((None, None)) # None means "inherit from typeRef"
drivers[name] = expanded
if not drivers:
return set()
# Generate all combinations of driver types
driverNames = sorted(drivers.keys())
driverTypeLists = [drivers[n] for n in driverNames]
signatures = set()
for combo in product(*driverTypeLists):
# Validate type group constraints (skip None values which will be resolved via typeRef)
valid, typeAssignment = isValidTypeGroupAssignment(driverNames, combo, typeGroupVariables)
if not valid:
continue
# Remove None assignments - these ports will be resolved via typeRef
typeAssignment = {k: v for k, v in typeAssignment.items() if v is not None}
# Resolve typeRefs for this combination
resolved = resolveTypeAssignment(typeAssignment, allPorts)
if resolved is None:
continue
# Build signature
sigInputs = {name: resolved[name] for name in inputs if name in resolved}
sigOutputs = {name: resolved[name] for name in outputs if name in resolved}
signatures.add(NodeSignature.create(sigInputs, sigOutputs))
return signatures
def resolveTypeAssignment(baseAssignment, allPorts):
'''Resolve "Same as X" references to complete port type assignments.'''
assignment = baseAssignment.copy()
# Iteratively resolve references (limit iterations to handle circular refs)
for _ in range(10):
changed = False
for name, port in allPorts.items():
if name in assignment:
continue
if port.typeRef and port.typeRef in assignment:
assignment[name] = assignment[port.typeRef]
changed = True
if not changed:
break
# Check all ports resolved
if set(assignment.keys()) != set(allPorts.keys()):
return None
return assignment
def resolvePortTypeRefs(ports):
'''Resolve type references between ports by copying types. Modifies ports in place.'''
# Limit iterations to handle circular refs
for _ in range(10):
changed = False
for port in ports.values():
if port.typeRef:
refPort = ports.get(port.typeRef)
if refPort and refPort.types:
port.types.update(refPort.types)
port.typeRef = None
changed = True
if not changed:
break
def parseSpecDocument(specPath, stdlib, geompropNames):
'''Parse a specification markdown document. Returns (nodes, invalidEntries).'''
# Build type system data from stdlib
standardTypes = getStandardTypes(stdlib)
typeGroups = buildTypeGroups(stdlib)
typeGroupVariables = buildTypeGroupVariables(typeGroups)
# Build derived values for validation and parsing
knownTypes = standardTypes | set(typeGroups.keys()) | set(typeGroupVariables.keys())
specDefaultNotation = {
'__zero__': '0',
'__one__': '1',
'__half__': '0.5',
'__empty__': '',
}
for name in geompropNames:
specDefaultNotation[f'_{name}_'] = name
nodes = {}
invalidEntries = []
with open(specPath, 'r', encoding='utf-8') as f:
content = f.read()
lines = content.split('\n')
currentNode = None
currentTableInputs = {}
currentTableOutputs = {}
idx = 0
def finalizeCurrentTable():
'''Expand current table to signatures and add to node.'''
nonlocal currentTableInputs, currentTableOutputs
if currentNode and (currentTableInputs or currentTableOutputs):
node = nodes[currentNode]
# Expand to signatures (do NOT pre-resolve typeRefs - expansion handles them)
tableSigs = expandSpecSignatures(currentTableInputs, currentTableOutputs, typeGroups, typeGroupVariables)
node.signatures.update(tableSigs)
# Merge input port info for default comparison (resolve types for defaults)
allPorts = {**currentTableInputs, **currentTableOutputs}
resolvePortTypeRefs(allPorts)
for name, port in currentTableInputs.items():
if name not in node._specInputs:
node._specInputs[name] = port
else:
node._specInputs[name].types.update(port.types)
currentTableInputs = {}
currentTableOutputs = {}
while idx < len(lines):
line = lines[idx]
# Look for node headers (### `nodename`)
nodeMatch = re.match(r'^###\s+`([^`]+)`', line)
if nodeMatch:
# Finalize previous table before switching nodes
finalizeCurrentTable()
currentNode = nodeMatch.group(1)
if currentNode not in nodes:
nodes[currentNode] = NodeInfo(name=currentNode)
idx += 1
continue
# Look for tables when we have a current node
if currentNode and '|' in line and 'Port' in line:
# Finalize previous table before starting new one
finalizeCurrentTable()
rows, columnMismatchCount, idx = parseMarkdownTable(lines, idx)
# Track column count mismatches
for _ in range(columnMismatchCount):
invalidEntries.append(Difference(
diffType=DiffType.SPEC_COLUMN_MISMATCH,
node=currentNode,
))
if rows:
for row in rows:
portName = row.get('port', '').strip('`*')
# Track empty port names
if not portName:
invalidEntries.append(Difference(
diffType=DiffType.SPEC_EMPTY_PORT_NAME,
node=currentNode,
))
continue
portType = row.get('type', '')
portDefault = row.get('default', '')
portDesc = row.get('description', '')
types, typeRef = parseSpecTypes(portType)
# Track unrecognized types
if types - knownTypes:
invalidEntries.append(Difference(
diffType=DiffType.SPEC_UNRECOGNIZED_TYPE,
node=currentNode,
port=portName,
))
# Determine if this is an output port
isOutput = portName == 'out' or portDesc.lower().startswith('output')
target = currentTableOutputs if isOutput else currentTableInputs
# Create port info for this table
portInfo = target.setdefault(portName, PortInfo(
name=portName,
default=parseSpecDefault(portDefault, specDefaultNotation),
))
portInfo.types.update(types)
if typeRef and not portInfo.typeRef:
portInfo.typeRef = typeRef
continue
idx += 1
# Finalize the last table
finalizeCurrentTable()
return nodes, invalidEntries
# -----------------------------------------------------------------------------
# Data Library Loading
# -----------------------------------------------------------------------------
def loadDataLibrary(mtlxPath):
'''Load a data library MTLX document. Returns (nodes, defaults).'''
doc = mx.createDocument()
mx.readFromXmlFile(doc, str(mtlxPath))
nodes = {}
defaults = {} # (nodeName, signature) -> {portName -> (value, isGeomprop)}
for nodedef in doc.getNodeDefs():
nodeName = nodedef.getNodeString()
node = nodes.setdefault(nodeName, NodeInfo(name=nodeName))
# Build signature from this nodedef
sigInputs = {inp.getName(): inp.getType() for inp in nodedef.getInputs()}
sigOutputs = {out.getName(): out.getType() for out in nodedef.getOutputs()}
sig = NodeSignature.create(sigInputs, sigOutputs)
node.signatures.add(sig)
# Store defaults keyed by signature
sigDefaults = {}
for inp in nodedef.getInputs():
if inp.hasDefaultGeomPropString():
sigDefaults[inp.getName()] = (inp.getDefaultGeomPropString(), True)
elif inp.getValue() is not None:
sigDefaults[inp.getName()] = (inp.getValue(), False)
if sigDefaults:
defaults[(nodeName, sig)] = sigDefaults
return nodes, defaults
# -----------------------------------------------------------------------------
# Comparison Logic
# -----------------------------------------------------------------------------
def compareSignatureDefaults(nodeName, signature, specNode, libDefaults, geompropNames):
'''Compare default values for a matching signature. Returns list of Differences.'''
differences = []
for portName, valueType in signature.inputs:
specPort = specNode._specInputs.get(portName)
if not specPort or not specPort.default:
continue
specValue, specIsGeomprop = expandSpecDefaultToValue(specPort.default, valueType, geompropNames)
libValue, libIsGeomprop = libDefaults.get(portName, (None, False))
# Skip if either value is unavailable
if specValue is None or libValue is None:
continue
# Compare values (geomprops compare as strings, typed values use equality)
valuesMatch = (specValue == libValue) if (specIsGeomprop == libIsGeomprop) else False
if not valuesMatch:
differences.append(Difference(
diffType=DiffType.DEFAULT_MISMATCH,
node=nodeName,
port=portName,
signature=signature,
valueType=valueType,
specDefault=formatDefaultValue(specValue, valueType, geompropNames),
libDefault=formatDefaultValue(libValue, valueType, geompropNames),
))
return differences
def findLibraryMatch(specSig, libSigs):
'''Find a matching library signature. Returns (matchType, libSig, extraInLib, extraInSpec).'''
specInputs = set(specSig.inputs)
specOutputs = set(specSig.outputs)
for libSig in libSigs:
libInputs = set(libSig.inputs)
libOutputs = set(libSig.outputs)
# Check for exact match
if specInputs == libInputs and specOutputs == libOutputs:
return MatchType.EXACT, libSig, None, None
# Check for different input sets (same outputs, different inputs)
if specOutputs == libOutputs and specInputs != libInputs:
# If there are common inputs, verify they have the same types
commonInputNames = {name for name, _ in specInputs} & {name for name, _ in libInputs}
if commonInputNames:
specInputDict = dict(specSig.inputs)
libInputDict = dict(libSig.inputs)
typesMatch = all(specInputDict[n] == libInputDict[n] for n in commonInputNames)
if not typesMatch:
continue # Common inputs have different types - not a match
extraInLib = tuple(sorted(libInputs - specInputs))
extraInSpec = tuple(sorted(specInputs - libInputs))
return MatchType.DIFFERENT_INPUTS, libSig, extraInLib, extraInSpec
return None, None, None, None
def compareNodes(specNodes, libNodes, libDefaults, geompropNames, compareDefaults=False):
'''Compare nodes between spec and library. Returns list of Differences.'''
differences = []
specNames = set(specNodes.keys())
libNames = set(libNodes.keys())
# Nodes in spec but not in library
for nodeName in sorted(specNames - libNames):
differences.append(Difference(
diffType=DiffType.NODE_MISSING_IN_LIBRARY,
node=nodeName))
# Nodes in library but not in spec
for nodeName in sorted(libNames - specNames):
differences.append(Difference(
diffType=DiffType.NODE_MISSING_IN_SPEC,
node=nodeName))
# Compare signatures for common nodes
for nodeName in sorted(specNames & libNames):
specNode = specNodes[nodeName]
libNode = libNodes[nodeName]
specSigs = specNode.signatures
libSigs = libNode.signatures
# Track which signatures have been matched
matchedLibSigs = set()
matchedSpecSigs = set()
inputDiffMatches = [] # (specSig, libSig, extraInLib, extraInSpec)
# For each spec signature, find matching library signature
for specSig in specSigs:
matchType, libSig, extraInLib, extraInSpec = findLibraryMatch(specSig, libSigs)
if matchType == MatchType.EXACT:
matchedLibSigs.add(libSig)
matchedSpecSigs.add(specSig)
# Compare defaults for exact matches
if compareDefaults:
sigDefaults = libDefaults.get((nodeName, libSig), {})
differences.extend(compareSignatureDefaults(
nodeName, specSig, specNode, sigDefaults, geompropNames))
elif matchType == MatchType.DIFFERENT_INPUTS:
matchedLibSigs.add(libSig)
matchedSpecSigs.add(specSig)
inputDiffMatches.append((specSig, libSig, extraInLib, extraInSpec))
# Compare defaults for different input matches too (for common ports)
if compareDefaults:
sigDefaults = libDefaults.get((nodeName, libSig), {})
differences.extend(compareSignatureDefaults(
nodeName, specSig, specNode, sigDefaults, geompropNames))
# Report different input set matches
for specSig, libSig, extraInLib, extraInSpec in sorted(inputDiffMatches, key=lambda x: str(x[0])):
differences.append(Difference(
diffType=DiffType.SIGNATURE_DIFFERENT_INPUTS,
node=nodeName,
signature=specSig,
extraInLib=extraInLib,
extraInSpec=extraInSpec,
))
# Spec signatures not matched by any library signature
for specSig in sorted(specSigs - matchedSpecSigs, key=str):
differences.append(Difference(
diffType=DiffType.SIGNATURE_MISSING_IN_LIBRARY,
node=nodeName,
signature=specSig,
))
# Library signatures not matched by any spec signature
for libSig in sorted(libSigs - matchedLibSigs, key=str):
differences.append(Difference(
diffType=DiffType.SIGNATURE_MISSING_IN_SPEC,
node=nodeName,
signature=libSig,
))
return differences
# -----------------------------------------------------------------------------
# Output Formatting
# -----------------------------------------------------------------------------
def printDifferences(differences):
'''Print the differences in a formatted way.'''
if not differences:
print("No differences found between specification and data library.")
return
# Group differences by type
byType = {}
for diff in differences:
byType.setdefault(diff.diffType, []).append(diff)
print(f"\n{'=' * 70}")
print(f"COMPARISON RESULTS: {len(differences)} difference(s) found")
print(f"{'=' * 70}")
for diffType in DiffType:
if diffType not in byType:
continue
diffs = byType[diffType]
print(f"\n{diffType.value} ({len(diffs)}):")
print("-" * 50)
for diff in diffs:
for line in formatDifference(diff):
print(line)
# -----------------------------------------------------------------------------
# Main Entry Point
# -----------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(
description="Compare node definitions between a specification Markdown document and a data library MaterialX document.")
parser.add_argument('--spec', dest='specFile',
help='Path to the specification Markdown document. Defaults to documents/Specification/MaterialX.StandardNodes.md')
parser.add_argument('--mtlx', dest='mtlxFile',
help='Path to the data library MaterialX document. Defaults to libraries/stdlib/stdlib_defs.mtlx')
parser.add_argument('--defaults', dest='compareDefaults', action='store_true',
help='Compare default values for inputs using MaterialX typed value comparison')
parser.add_argument('--listNodes', dest='listNodes', action='store_true',
help='List all nodes and their node signature counts')
opts = parser.parse_args()
# Determine file paths
repoRoot = Path(__file__).resolve().parent.parent.parent
specPath = Path(opts.specFile) if opts.specFile else repoRoot / 'documents' / 'Specification' / 'MaterialX.StandardNodes.md'
mtlxPath = Path(opts.mtlxFile) if opts.mtlxFile else repoRoot / 'libraries' / 'stdlib' / 'stdlib_defs.mtlx'
# Verify files exist
if not specPath.exists():
raise FileNotFoundError(f"Specification document not found: {specPath}")
if not mtlxPath.exists():
raise FileNotFoundError(f"MTLX document not found: {mtlxPath}")
print(f"Comparing:")
print(f" Specification: {specPath}")
print(f" Data Library: {mtlxPath}")
# Load standard libraries
stdlib = loadStandardLibraries()
geompropNames = buildGeompropNames(stdlib)
# Parse specification
print("\nParsing specification...")
specNodes, invalidEntries = parseSpecDocument(specPath, stdlib, geompropNames)
specSigCount = sum(len(n.signatures) for n in specNodes.values())
print(f" Found {len(specNodes)} nodes with {specSigCount} node signatures")
if invalidEntries:
print(f" Found {len(invalidEntries)} invalid specification entries")
# Load data library
print("Loading data library...")
libNodes, libDefaults = loadDataLibrary(mtlxPath)
libSigCount = sum(len(n.signatures) for n in libNodes.values())
print(f" Found {len(libNodes)} nodes with {libSigCount} node signatures")
# List nodes if requested
if opts.listNodes:
print("\nNodes in Specification:")
for name in sorted(specNodes.keys()):
node = specNodes[name]
print(f" {name}: {len(node.signatures)} signature(s)")
print("\nNodes in Data Library:")
for name in sorted(libNodes.keys()):
node = libNodes[name]
print(f" {name}: {len(node.signatures)} signature(s)")
# Compare nodes
print("\nComparing node signatures...")
differences = compareNodes(specNodes, libNodes, libDefaults, geompropNames, opts.compareDefaults)
# Include invalid spec entries in the differences
differences = invalidEntries + differences
# Print differences
printDifferences(differences)
if __name__ == '__main__':
main()

536
MaterialX/python/Scripts/creatematerial.py Normal file → Executable file
View File

@@ -1,268 +1,268 @@
#!/usr/bin/env python
'''
Construct a MaterialX file from the textures in the given folder, using the standard data libraries
to build a mapping from texture filenames to shader inputs.
By default the standard_surface shading model is assumed, with the --shadingModel option used to
select any other shading model in the data libraries.
'''
import os
import re
import argparse
from difflib import SequenceMatcher
import MaterialX as mx
UDIM_TOKEN = '.<UDIM>.'
UDIM_REGEX = r'\.\d+\.'
TEXTURE_EXTENSIONS = [ "exr", "png", "jpg", "jpeg", "tif", "hdr" ]
INPUT_ALIASES = { "roughness": "specular_roughness" }
class UdimFilePath(mx.FilePath):
def __init__(self, pathString):
super().__init__(pathString)
self._isUdim = False
self._udimFiles = []
self._udimRegex = re.compile(UDIM_REGEX)
textureDir = self.getParentPath()
textureName = self.getBaseName()
textureExtension = self.getExtension()
if not self._udimRegex.search(textureName):
self._udimFiles = [self]
return
self._isUdim = True
fullNamePattern = self._udimRegex.sub(self._udimRegex.pattern.replace('\\', '\\\\'),
textureName)
udimFiles = filter(
lambda f: re.search(fullNamePattern, f.asString()),
textureDir.getFilesInDirectory(textureExtension)
)
self._udimFiles = [textureDir / f for f in udimFiles]
def __str__(self):
return self.asPattern()
def asPattern(self):
if not self._isUdim:
return self.asString()
textureDir = self.getParentPath()
textureName = self.getBaseName()
pattern = textureDir / mx.FilePath(
self._udimRegex.sub(UDIM_TOKEN, textureName))
return pattern.asString()
def isUdim(self):
return self._isUdim
def getUdimFiles(self):
return self._udimFiles
def getUdimNumbers(self):
def _extractUdimNumber(_file):
pattern = self._udimRegex.search(_file.getBaseName())
if pattern:
return re.search(r"\d+", pattern.group()).group()
return list(map(_extractUdimNumber, self._udimFiles))
def getNameWithoutExtension(self):
if self._isUdim:
name = self._udimRegex.split(self.getBaseName())[0]
else:
name = self.getBaseName().rsplit('.', 1)[0]
return re.sub(r'[^\w\s]+', '_', name)
def listTextures(textureDir, texturePrefix=None):
'''
Return a list of texture filenames matching known extensions.
'''
texturePrefix = texturePrefix or ""
allTextures = []
for ext in TEXTURE_EXTENSIONS:
textures = [textureDir / f for f in textureDir.getFilesInDirectory(ext)
if f.asString().lower().startswith(texturePrefix.lower())]
while textures:
textureFile = UdimFilePath(textures[0].asString())
allTextures.append(textureFile)
for udimFile in textureFile.getUdimFiles():
textures.remove(udimFile)
return allTextures
def findBestMatch(textureName, shadingModel):
'''
Given a texture name and shading model, return the shader input that is the closest match.
'''
parts = textureName.rsplit("_")
baseTexName = parts[-1]
if baseTexName.lower() == 'color':
baseTexName = ''.join(parts[-2:])
if baseTexName in INPUT_ALIASES:
baseTexName = INPUT_ALIASES.get(baseTexName.lower())
shaderInputs = shadingModel.getActiveInputs()
ratios = []
for shaderInput in shaderInputs:
inputName = shaderInput.getName()
inputName = re.sub(r'[^a-zA-Z0-9\s]', '', inputName).lower()
baseTexName = re.sub(r'[^a-zA-Z0-9\s]', '', baseTexName).lower()
sequenceScore = SequenceMatcher(None, inputName, baseTexName).ratio()
ratios.append(sequenceScore * 100)
highscore = max(ratios)
if highscore < 50:
return None
idx = ratios.index(highscore)
return shaderInputs[idx]
def buildDocument(textureFiles, mtlxFile, shadingModel, colorspace, useTiledImage):
'''
Build a MaterialX document from the given textures and shading model.
'''
# Find the default library nodedef, if any, for the requested shading model.
stdlib = mx.createDocument()
mx.loadLibraries(mx.getDefaultDataLibraryFolders(), mx.getDefaultDataSearchPath(), stdlib)
matchingNodeDefs = stdlib.getMatchingNodeDefs(shadingModel)
if not matchingNodeDefs:
print('Shading model', shadingModel, 'not found in the MaterialX data libraries')
return None
shadingModelNodeDef = matchingNodeDefs[0]
for nodeDef in matchingNodeDefs:
if nodeDef.getAttribute('isdefaultversion') == 'true':
shadingModelNodeDef = nodeDef
# Create content document.
doc = mx.createDocument()
materialName = mx.createValidName(mtlxFile.getBaseName().rsplit('.', 1)[0])
nodeGraph = doc.addNodeGraph('NG_' + materialName)
shaderNode = doc.addNode(shadingModel, 'SR_' + materialName, 'surfaceshader')
doc.addMaterialNode('M_' + materialName, shaderNode)
# Iterate over texture files.
imageNodeCategory = 'tiledimage' if useTiledImage else 'image'
udimNumbers = set()
for textureFile in textureFiles:
textureName = textureFile.getNameWithoutExtension()
shaderInput = findBestMatch(textureName, shadingModelNodeDef)
if not shaderInput:
print('Skipping', textureFile.getBaseName(), 'which does not match any', shadingModel, 'input')
continue
inputName = shaderInput.getName()
inputType = shaderInput.getType()
# Skip inputs that have already been created, e.g. in multi-UDIM materials.
if shaderNode.getInput(inputName) or nodeGraph.getChild(textureName):
continue
mtlInput = shaderNode.addInput(inputName)
textureName = nodeGraph.createValidChildName(textureName)
imageNode = nodeGraph.addNode(imageNodeCategory, textureName, inputType)
# Set color space.
if shaderInput.isColorType():
imageNode.setColorSpace(colorspace)
# Set file path.
filePathString = os.path.relpath(textureFile.asPattern(), mtlxFile.getParentPath().asString())
imageNode.setInputValue('file', filePathString, 'filename')
# Apply special cases for normal maps.
inputNode = imageNode
connNode = imageNode
inBetweenNodes = []
if inputName.endswith('normal') and shadingModel == 'standard_surface':
inBetweenNodes = ["normalmap"]
for inNodeName in inBetweenNodes:
connNode = nodeGraph.addNode(inNodeName, textureName + '_' + inNodeName, inputType)
connNode.setConnectedNode('in', inputNode)
inputNode = connNode
# Create output.
outputNode = nodeGraph.addOutput(textureName + '_output', inputType)
outputNode.setConnectedNode(connNode)
mtlInput.setConnectedOutput(outputNode)
mtlInput.setType(inputType)
if textureFile.isUdim():
udimNumbers.update(set(textureFile.getUdimNumbers()))
# Create udim set
if udimNumbers:
geomInfoName = doc.createValidChildName('GI_' + materialName)
geomInfo = doc.addGeomInfo(geomInfoName)
geomInfo.setGeomPropValue('udimset', list(udimNumbers), "stringarray")
# Return the new document
return doc
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--outputFilename', dest='outputFilename', type=str, help='Filename of the output MaterialX document.')
parser.add_argument('--shadingModel', dest='shadingModel', type=str, default="standard_surface", help='The shading model used in analyzing input textures.')
parser.add_argument('--colorSpace', dest='colorSpace', type=str, help='The colorspace in which input textures should be interpreted, defaulting to srgb_texture.')
parser.add_argument('--texturePrefix', dest='texturePrefix', type=str, help='Filter input textures by the given prefix.')
parser.add_argument('--tiledImage', dest='tiledImage', action="store_true", help='Request tiledimage nodes instead of image nodes.')
parser.add_argument(dest='inputDirectory', nargs='?', help='Input folder that will be scanned for textures, defaulting to the current working directory.')
options = parser.parse_args()
texturePath = mx.FilePath.getCurrentPath()
if options.inputDirectory:
texturePath = mx.FilePath(options.inputDirectory)
if not texturePath.isDirectory():
print('Input folder not found:', texturePath)
return
mtlxFile = texturePath / mx.FilePath('material.mtlx')
if options.outputFilename:
mtlxFile = mx.FilePath(options.outputFilename)
textureFiles = listTextures(texturePath, texturePrefix=options.texturePrefix)
if not textureFiles:
print('No matching textures found in input folder.')
return
# Get shading model and color space.
shadingModel = 'standard_surface'
colorspace = 'srgb_texture'
if options.shadingModel:
shadingModel = options.shadingModel
if options.colorSpace:
colorspace = options.colorSpace
print('Analyzing textures in the', texturePath.asString(), 'folder for the', shadingModel, 'shading model.')
# Create the MaterialX document.
doc = buildDocument(textureFiles, mtlxFile, shadingModel, colorspace, options.tiledImage)
if not doc:
return
if options.outputFilename:
# Write the document to disk.
if not mtlxFile.getParentPath().exists():
mtlxFile.getParentPath().createDirectory()
mx.writeToXmlFile(doc, mtlxFile.asString())
print('Wrote MaterialX document to disk:', mtlxFile.asString())
else:
# Print the document to the standard output.
print('Generated MaterialX document:')
print(mx.writeToXmlString(doc))
if __name__ == '__main__':
main()
#!/usr/bin/env python
'''
Construct a MaterialX file from the textures in the given folder, using the standard data libraries
to build a mapping from texture filenames to shader inputs.
By default the standard_surface shading model is assumed, with the --shadingModel option used to
select any other shading model in the data libraries.
'''
import os
import re
import argparse
from difflib import SequenceMatcher
import MaterialX as mx
UDIM_TOKEN = '.<UDIM>.'
UDIM_REGEX = r'\.\d+\.'
TEXTURE_EXTENSIONS = [ "exr", "png", "jpg", "jpeg", "tif", "hdr" ]
INPUT_ALIASES = { "roughness": "specular_roughness" }
class UdimFilePath(mx.FilePath):
def __init__(self, pathString):
super().__init__(pathString)
self._isUdim = False
self._udimFiles = []
self._udimRegex = re.compile(UDIM_REGEX)
textureDir = self.getParentPath()
textureName = self.getBaseName()
textureExtension = self.getExtension()
if not self._udimRegex.search(textureName):
self._udimFiles = [self]
return
self._isUdim = True
fullNamePattern = self._udimRegex.sub(self._udimRegex.pattern.replace('\\', '\\\\'),
textureName)
udimFiles = filter(
lambda f: re.search(fullNamePattern, f.asString()),
textureDir.getFilesInDirectory(textureExtension)
)
self._udimFiles = [textureDir / f for f in udimFiles]
def __str__(self):
return self.asPattern()
def asPattern(self):
if not self._isUdim:
return self.asString()
textureDir = self.getParentPath()
textureName = self.getBaseName()
pattern = textureDir / mx.FilePath(
self._udimRegex.sub(UDIM_TOKEN, textureName))
return pattern.asString()
def isUdim(self):
return self._isUdim
def getUdimFiles(self):
return self._udimFiles
def getUdimNumbers(self):
def _extractUdimNumber(_file):
pattern = self._udimRegex.search(_file.getBaseName())
if pattern:
return re.search(r"\d+", pattern.group()).group()
return list(map(_extractUdimNumber, self._udimFiles))
def getNameWithoutExtension(self):
if self._isUdim:
name = self._udimRegex.split(self.getBaseName())[0]
else:
name = self.getBaseName().rsplit('.', 1)[0]
return re.sub(r'[^\w\s]+', '_', name)
def listTextures(textureDir, texturePrefix=None):
'''
Return a list of texture filenames matching known extensions.
'''
texturePrefix = texturePrefix or ""
allTextures = []
for ext in TEXTURE_EXTENSIONS:
textures = [textureDir / f for f in textureDir.getFilesInDirectory(ext)
if f.asString().lower().startswith(texturePrefix.lower())]
while textures:
textureFile = UdimFilePath(textures[0].asString())
allTextures.append(textureFile)
for udimFile in textureFile.getUdimFiles():
textures.remove(udimFile)
return allTextures
def findBestMatch(textureName, shadingModel):
'''
Given a texture name and shading model, return the shader input that is the closest match.
'''
parts = textureName.rsplit("_")
baseTexName = parts[-1]
if baseTexName.lower() == 'color':
baseTexName = ''.join(parts[-2:])
if baseTexName in INPUT_ALIASES:
baseTexName = INPUT_ALIASES.get(baseTexName.lower())
shaderInputs = shadingModel.getActiveInputs()
ratios = []
for shaderInput in shaderInputs:
inputName = shaderInput.getName()
inputName = re.sub(r'[^a-zA-Z0-9\s]', '', inputName).lower()
baseTexName = re.sub(r'[^a-zA-Z0-9\s]', '', baseTexName).lower()
sequenceScore = SequenceMatcher(None, inputName, baseTexName).ratio()
ratios.append(sequenceScore * 100)
highscore = max(ratios)
if highscore < 50:
return None
idx = ratios.index(highscore)
return shaderInputs[idx]
def buildDocument(textureFiles, mtlxFile, shadingModel, colorspace, useTiledImage):
'''
Build a MaterialX document from the given textures and shading model.
'''
# Find the default library nodedef, if any, for the requested shading model.
stdlib = mx.createDocument()
mx.loadLibraries(mx.getDefaultDataLibraryFolders(), mx.getDefaultDataSearchPath(), stdlib)
matchingNodeDefs = stdlib.getMatchingNodeDefs(shadingModel)
if not matchingNodeDefs:
print('Shading model', shadingModel, 'not found in the MaterialX data libraries')
return None
shadingModelNodeDef = matchingNodeDefs[0]
for nodeDef in matchingNodeDefs:
if nodeDef.getAttribute('isdefaultversion') == 'true':
shadingModelNodeDef = nodeDef
# Create content document.
doc = mx.createDocument()
materialName = mx.createValidName(mtlxFile.getBaseName().rsplit('.', 1)[0])
nodeGraph = doc.addNodeGraph('NG_' + materialName)
shaderNode = doc.addNode(shadingModel, 'SR_' + materialName, 'surfaceshader')
doc.addMaterialNode('M_' + materialName, shaderNode)
# Iterate over texture files.
imageNodeCategory = 'tiledimage' if useTiledImage else 'image'
udimNumbers = set()
for textureFile in textureFiles:
textureName = textureFile.getNameWithoutExtension()
shaderInput = findBestMatch(textureName, shadingModelNodeDef)
if not shaderInput:
print('Skipping', textureFile.getBaseName(), 'which does not match any', shadingModel, 'input')
continue
inputName = shaderInput.getName()
inputType = shaderInput.getType()
# Skip inputs that have already been created, e.g. in multi-UDIM materials.
if shaderNode.getInput(inputName) or nodeGraph.getChild(textureName):
continue
mtlInput = shaderNode.addInput(inputName)
textureName = nodeGraph.createValidChildName(textureName)
imageNode = nodeGraph.addNode(imageNodeCategory, textureName, inputType)
# Set color space.
if shaderInput.isColorType():
imageNode.setColorSpace(colorspace)
# Set file path.
filePathString = os.path.relpath(textureFile.asPattern(), mtlxFile.getParentPath().asString())
imageNode.setInputValue('file', filePathString, 'filename')
# Apply special cases for normal maps.
inputNode = imageNode
connNode = imageNode
inBetweenNodes = []
if inputName.endswith('normal') and shadingModel == 'standard_surface':
inBetweenNodes = ["normalmap"]
for inNodeName in inBetweenNodes:
connNode = nodeGraph.addNode(inNodeName, textureName + '_' + inNodeName, inputType)
connNode.setConnectedNode('in', inputNode)
inputNode = connNode
# Create output.
outputNode = nodeGraph.addOutput(textureName + '_output', inputType)
outputNode.setConnectedNode(connNode)
mtlInput.setConnectedOutput(outputNode)
mtlInput.setType(inputType)
if textureFile.isUdim():
udimNumbers.update(set(textureFile.getUdimNumbers()))
# Create udim set
if udimNumbers:
geomInfoName = doc.createValidChildName('GI_' + materialName)
geomInfo = doc.addGeomInfo(geomInfoName)
geomInfo.setGeomPropValue('udimset', list(udimNumbers), "stringarray")
# Return the new document
return doc
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('--outputFilename', dest='outputFilename', type=str, help='Filename of the output MaterialX document.')
parser.add_argument('--shadingModel', dest='shadingModel', type=str, default="standard_surface", help='The shading model used in analyzing input textures.')
parser.add_argument('--colorSpace', dest='colorSpace', type=str, help='The colorspace in which input textures should be interpreted, defaulting to srgb_texture.')
parser.add_argument('--texturePrefix', dest='texturePrefix', type=str, help='Filter input textures by the given prefix.')
parser.add_argument('--tiledImage', dest='tiledImage', action="store_true", help='Request tiledimage nodes instead of image nodes.')
parser.add_argument(dest='inputDirectory', nargs='?', help='Input folder that will be scanned for textures, defaulting to the current working directory.')
options = parser.parse_args()
texturePath = mx.FilePath.getCurrentPath()
if options.inputDirectory:
texturePath = mx.FilePath(options.inputDirectory)
if not texturePath.isDirectory():
print('Input folder not found:', texturePath)
return
mtlxFile = texturePath / mx.FilePath('material.mtlx')
if options.outputFilename:
mtlxFile = mx.FilePath(options.outputFilename)
textureFiles = listTextures(texturePath, texturePrefix=options.texturePrefix)
if not textureFiles:
print('No matching textures found in input folder.')
return
# Get shading model and color space.
shadingModel = 'standard_surface'
colorspace = 'srgb_texture'
if options.shadingModel:
shadingModel = options.shadingModel
if options.colorSpace:
colorspace = options.colorSpace
print('Analyzing textures in the', texturePath.asString(), 'folder for the', shadingModel, 'shading model.')
# Create the MaterialX document.
doc = buildDocument(textureFiles, mtlxFile, shadingModel, colorspace, options.tiledImage)
if not doc:
return
if options.outputFilename:
# Write the document to disk.
if not mtlxFile.getParentPath().exists():
mtlxFile.getParentPath().createDirectory()
mx.writeToXmlFile(doc, mtlxFile.asString())
print('Wrote MaterialX document to disk:', mtlxFile.asString())
else:
# Print the document to the standard output.
print('Generated MaterialX document:')
print(mx.writeToXmlString(doc))
if __name__ == '__main__':
main()

412
MaterialX/python/Scripts/generateshader.py Normal file → Executable file
View File

@@ -1,204 +1,208 @@
#!/usr/bin/env python
'''
Generate shader code for each renderable element in a MaterialX document or folder.
The currently supported target languages are GLSL, ESSL, MSL, OSL, and MDL.
'''
import sys, os, argparse, subprocess
import MaterialX as mx
import MaterialX.PyMaterialXGenGlsl as mx_gen_glsl
import MaterialX.PyMaterialXGenMdl as mx_gen_mdl
import MaterialX.PyMaterialXGenMsl as mx_gen_msl
import MaterialX.PyMaterialXGenOsl as mx_gen_osl
import MaterialX.PyMaterialXGenSlang as mx_gen_slang
import MaterialX.PyMaterialXGenShader as mx_gen_shader
def validateCode(sourceCodeFile, codevalidator, codevalidatorArgs):
if codevalidator:
cmd = codevalidator.split()
cmd.append(sourceCodeFile)
if codevalidatorArgs:
cmd.append(codevalidatorArgs)
cmd_flatten ='----- Run Validator: '
for c in cmd:
cmd_flatten += c + ' '
print(cmd_flatten)
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return output.decode(encoding='utf-8')
except subprocess.CalledProcessError as out:
return (out.output.decode(encoding='utf-8'))
return ""
def getMaterialXFiles(rootPath):
filelist = []
if os.path.isdir(rootPath):
for subdir, dirs, files in os.walk(rootPath):
for file in files:
if file.endswith('mtlx'):
filelist.append(os.path.join(subdir, file))
else:
filelist.append( rootPath )
return filelist
def main():
parser = argparse.ArgumentParser(description='Generate shader code for each renderable element in a MaterialX document or folder.')
parser.add_argument('--path', dest='paths', action='append', nargs='+', help='An additional absolute search path location (e.g. "/projects/MaterialX")')
parser.add_argument('--library', dest='libraries', action='append', nargs='+', help='An additional relative path to a custom data library folder (e.g. "libraries/custom")')
parser.add_argument('--target', dest='target', default='glsl', help='Target shader generator to use (e.g. "glsl, osl, mdl, essl, vulkan, wgsl"). Default is glsl.')
parser.add_argument('--outputPath', dest='outputPath', help='File path to output shaders to. If not specified, is the location of the input document is used.')
parser.add_argument('--validator', dest='validator', nargs='?', const=' ', type=str, help='Name of executable to perform source code validation.')
parser.add_argument('--validatorArgs', dest='validatorArgs', nargs='?', const=' ', type=str, help='Optional arguments for code validator.')
parser.add_argument('--vulkanGlsl', dest='vulkanCompliantGlsl', default=False, type=bool, help='Set to True to generate Vulkan-compliant GLSL when using the genglsl target.')
parser.add_argument('--shaderInterfaceType', dest='shaderInterfaceType', default=0, type=int, help='Set the type of shader interface to be generated')
parser.add_argument(dest='inputFilename', help='Path to input document or folder containing input documents.')
opts = parser.parse_args()
# Load standard and custom data libraries.
stdlib = mx.createDocument()
searchPath = mx.getDefaultDataSearchPath()
libraryFolders = []
if opts.paths:
for pathList in opts.paths:
for path in pathList:
searchPath.append(path)
if opts.libraries:
for libraryList in opts.libraries:
for library in libraryList:
libraryFolders.append(library)
libraryFolders.extend(mx.getDefaultDataLibraryFolders())
mx.loadLibraries(libraryFolders, searchPath, stdlib)
# Generate shaders for each input document.
for inputFilename in getMaterialXFiles(opts.inputFilename):
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, inputFilename)
doc.setDataLibrary(stdlib)
except mx.ExceptionFileMissing as err:
print('Generation failed: "', err, '"')
sys.exit(-1)
print('---------- Generate code for file: ', inputFilename, '--------------------')
valid, msg = doc.validate()
if not valid:
print('Validation warnings for input document:')
print(msg)
gentarget = 'glsl'
if opts.target:
gentarget = opts.target
if gentarget == 'osl':
shadergen = mx_gen_osl.OslShaderGenerator.create()
elif gentarget == 'mdl':
shadergen = mx_gen_mdl.MdlShaderGenerator.create()
elif gentarget == 'essl':
shadergen = mx_gen_glsl.EsslShaderGenerator.create()
elif gentarget == 'vulkan':
shadergen = mx_gen_glsl.VkShaderGenerator.create()
elif gentarget == 'wgsl':
shadergen = mx_gen_glsl.WgslShaderGenerator.create()
elif gentarget == 'msl':
shadergen = mx_gen_msl.MslShaderGenerator.create()
elif gentarget == 'slang':
shadergen = mx_gen_slang.SlangShaderGenerator.create()
else:
shadergen = mx_gen_glsl.GlslShaderGenerator.create()
codeSearchPath = searchPath
codeSearchPath.append(os.path.dirname(inputFilename))
context = mx_gen_shader.GenContext(shadergen)
context.registerSourceCodeSearchPath(codeSearchPath)
shadergen.registerTypeDefs(doc);
# If we're generating Vulkan-compliant GLSL then set the binding context
if opts.vulkanCompliantGlsl:
bindingContext = mx_gen_glsl.GlslResourceBindingContext.create(0,0)
context.pushUserData('udbinding', bindingContext)
genoptions = context.getOptions()
if opts.shaderInterfaceType == 0 or opts.shaderInterfaceType == 1:
genoptions.shaderInterfaceType = mx_gen_shader.ShaderInterfaceType(opts.shaderInterfaceType)
else:
genoptions.shaderInterfaceType = mx_gen_shader.ShaderInterfaceType.SHADER_INTERFACE_COMPLETE
print('- Set up CMS ...')
cms = mx_gen_shader.DefaultColorManagementSystem.create(shadergen.getTarget())
cms.loadLibrary(doc)
shadergen.setColorManagementSystem(cms)
print('- Set up Units ...')
unitsystem = mx_gen_shader.UnitSystem.create(shadergen.getTarget())
registry = mx.UnitConverterRegistry.create()
distanceTypeDef = doc.getUnitTypeDef('distance')
registry.addUnitConverter(distanceTypeDef, mx.LinearUnitConverter.create(distanceTypeDef))
angleTypeDef = doc.getUnitTypeDef('angle')
registry.addUnitConverter(angleTypeDef, mx.LinearUnitConverter.create(angleTypeDef))
unitsystem.loadLibrary(stdlib)
unitsystem.setUnitConverterRegistry(registry)
shadergen.setUnitSystem(unitsystem)
genoptions.targetDistanceUnit = 'meter'
pathPrefix = ''
if opts.outputPath and os.path.exists(opts.outputPath):
pathPrefix = opts.outputPath + os.path.sep
else:
pathPrefix = os.path.dirname(os.path.abspath(inputFilename))
print('- Shader output path: ' + pathPrefix)
failedShaders = ""
for elem in mx_gen_shader.findRenderableElements(doc):
elemName = elem.getName()
print('-- Generate code for element: ' + elemName)
elemName = mx.createValidName(elemName)
shader = shadergen.generate(elemName, elem, context)
if shader:
# Use extension of .vert and .frag as it's type is
# recognized by glslangValidator
if gentarget in ['glsl', 'essl', 'vulkan', 'msl', 'wgsl']:
pixelSource = shader.getSourceCode(mx_gen_shader.PIXEL_STAGE)
filename = pathPrefix + "/" + shader.getName() + "." + gentarget + ".frag"
print('--- Wrote pixel shader to: ' + filename)
file = open(filename, 'w+')
file.write(pixelSource)
file.close()
errors = validateCode(filename, opts.validator, opts.validatorArgs)
vertexSource = shader.getSourceCode(mx_gen_shader.VERTEX_STAGE)
filename = pathPrefix + "/" + shader.getName() + "." + gentarget + ".vert"
print('--- Wrote vertex shader to: ' + filename)
file = open(filename, 'w+')
file.write(vertexSource)
file.close()
errors += validateCode(filename, opts.validator, opts.validatorArgs)
else:
pixelSource = shader.getSourceCode(mx_gen_shader.PIXEL_STAGE)
filename = pathPrefix + "/" + shader.getName() + "." + gentarget
print('--- Wrote pixel shader to: ' + filename)
file = open(filename, 'w+')
file.write(pixelSource)
file.close()
errors = validateCode(filename, opts.validator, opts.validatorArgs)
if errors != "":
print("--- Validation failed for element: ", elemName)
print("----------------------------")
print('--- Error log: ', errors)
print("----------------------------")
failedShaders += (elemName + ' ')
else:
print("--- Validation passed for element:", elemName)
else:
print("--- Validation failed for element:", elemName)
failedShaders += (elemName + ' ')
if failedShaders != "":
sys.exit(-1)
if __name__ == '__main__':
main()
#!/usr/bin/env python
'''
Utility to generate the shader for materials found in a MaterialX document. One file will be generated
for each material / shader found. The currently supported target languages are GLSL, OSL, MDL and ESSL.
'''
import sys, os, argparse, subprocess
import MaterialX as mx
import MaterialX.PyMaterialXGenGlsl as mx_gen_glsl
import MaterialX.PyMaterialXGenMdl as mx_gen_mdl
import MaterialX.PyMaterialXGenMsl as mx_gen_msl
import MaterialX.PyMaterialXGenOsl as mx_gen_osl
import MaterialX.PyMaterialXGenShader as mx_gen_shader
def validateCode(sourceCodeFile, codevalidator, codevalidatorArgs):
if codevalidator:
cmd = codevalidator.split()
cmd.append(sourceCodeFile)
if codevalidatorArgs:
cmd.append(codevalidatorArgs)
cmd_flatten ='----- Run Validator: '
for c in cmd:
cmd_flatten += c + ' '
print(cmd_flatten)
try:
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
return output.decode(encoding='utf-8')
except subprocess.CalledProcessError as out:
return (out.output.decode(encoding='utf-8'))
return ""
def getMaterialXFiles(rootPath):
filelist = []
if os.path.isdir(rootPath):
for subdir, dirs, files in os.walk(rootPath):
for file in files:
if file.endswith('mtlx'):
filelist.append(os.path.join(subdir, file))
else:
filelist.append( rootPath )
return filelist
def main():
parser = argparse.ArgumentParser(description='Generate shader code for each material / shader in a document.')
parser.add_argument('--path', dest='paths', action='append', nargs='+', help='An additional absolute search path location (e.g. "/projects/MaterialX")')
parser.add_argument('--library', dest='libraries', action='append', nargs='+', help='An additional relative path to a custom data library folder (e.g. "libraries/custom")')
parser.add_argument('--target', dest='target', default='glsl', help='Target shader generator to use (e.g. "glsl, osl, mdl, essl, vulkan"). Default is glsl.')
parser.add_argument('--outputPath', dest='outputPath', help='File path to output shaders to. If not specified, is the location of the input document is used.')
parser.add_argument('--validator', dest='validator', nargs='?', const=' ', type=str, help='Name of executable to perform source code validation.')
parser.add_argument('--validatorArgs', dest='validatorArgs', nargs='?', const=' ', type=str, help='Optional arguments for code validator.')
parser.add_argument('--vulkanGlsl', dest='vulkanCompliantGlsl', default=False, type=bool, help='Set to True to generate Vulkan-compliant GLSL when using the genglsl target.')
parser.add_argument('--shaderInterfaceType', dest='shaderInterfaceType', default=0, type=int, help='Set the type of shader interface to be generated')
parser.add_argument(dest='inputFilename', help='Path to input document or folder containing input documents.')
opts = parser.parse_args()
filelist = getMaterialXFiles(opts.inputFilename)
for inputFilename in filelist:
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, inputFilename)
except mx.ExceptionFileMissing as err:
print('Generation failed: "', err, '"')
sys.exit(-1)
print('---------- Generate code for file: ', inputFilename, '--------------------')
stdlib = mx.createDocument()
searchPath = mx.getDefaultDataSearchPath()
searchPath.append(os.path.dirname(inputFilename))
libraryFolders = []
if opts.paths:
for pathList in opts.paths:
for path in pathList:
searchPath.append(path)
if opts.libraries:
for libraryList in opts.libraries:
for library in libraryList:
libraryFolders.append(library)
libraryFolders.extend(mx.getDefaultDataLibraryFolders())
try:
mx.loadLibraries(libraryFolders, searchPath, stdlib)
doc.importLibrary(stdlib)
except Exception as err:
print('Generation failed: "', err, '"')
sys.exit(-1)
valid, msg = doc.validate()
if not valid:
print('Validation warnings for input document:')
print(msg)
gentarget = 'glsl'
if opts.target:
gentarget = opts.target
if gentarget == 'osl':
shadergen = mx_gen_osl.OslShaderGenerator.create()
elif gentarget == 'mdl':
shadergen = mx_gen_mdl.MdlShaderGenerator.create()
elif gentarget == 'essl':
shadergen = mx_gen_glsl.EsslShaderGenerator.create()
elif gentarget == 'vulkan':
shadergen = mx_gen_glsl.VkShaderGenerator.create()
elif gentarget == 'msl':
shadergen = mx_gen_msl.MslShaderGenerator.create()
else:
shadergen = mx_gen_glsl.GlslShaderGenerator.create()
context = mx_gen_shader.GenContext(shadergen)
context.registerSourceCodeSearchPath(searchPath)
# If we're generating Vulkan-compliant GLSL then set the binding context
if opts.vulkanCompliantGlsl:
bindingContext = mx_gen_glsl.GlslResourceBindingContext.create(0,0)
context.pushUserData('udbinding', bindingContext)
genoptions = context.getOptions()
if opts.shaderInterfaceType == 0 or opts.shaderInterfaceType == 1:
genoptions.shaderInterfaceType = mx_gen_shader.ShaderInterfaceType(opts.shaderInterfaceType)
else:
genoptions.shaderInterfaceType = mx_gen_shader.ShaderInterfaceType.SHADER_INTERFACE_COMPLETE
print('- Set up CMS ...')
cms = mx_gen_shader.DefaultColorManagementSystem.create(shadergen.getTarget())
cms.loadLibrary(doc)
shadergen.setColorManagementSystem(cms)
print('- Set up Units ...')
unitsystem = mx_gen_shader.UnitSystem.create(shadergen.getTarget())
registry = mx.UnitConverterRegistry.create()
distanceTypeDef = doc.getUnitTypeDef('distance')
registry.addUnitConverter(distanceTypeDef, mx.LinearUnitConverter.create(distanceTypeDef))
angleTypeDef = doc.getUnitTypeDef('angle')
registry.addUnitConverter(angleTypeDef, mx.LinearUnitConverter.create(angleTypeDef))
unitsystem.loadLibrary(stdlib)
unitsystem.setUnitConverterRegistry(registry)
shadergen.setUnitSystem(unitsystem)
genoptions.targetDistanceUnit = 'meter'
# Look for renderable nodes
nodes = mx_gen_shader.findRenderableElements(doc)
if not nodes:
nodes = doc.getMaterialNodes()
if not nodes:
nodes = doc.getNodesOfType(mx.SURFACE_SHADER_TYPE_STRING)
pathPrefix = ''
if opts.outputPath and os.path.exists(opts.outputPath):
pathPrefix = opts.outputPath + os.path.sep
else:
pathPrefix = os.path.dirname(os.path.abspath(inputFilename))
print('- Shader output path: ' + pathPrefix)
failedShaders = ""
for node in nodes:
nodeName = node.getName()
print('-- Generate code for node: ' + nodeName)
nodeName = mx.createValidName(nodeName)
shader = shadergen.generate(nodeName, node, context)
if shader:
# Use extension of .vert and .frag as it's type is
# recognized by glslangValidator
if gentarget in ['glsl', 'essl', 'vulkan', 'msl']:
pixelSource = shader.getSourceCode(mx_gen_shader.PIXEL_STAGE)
filename = pathPrefix + "/" + shader.getName() + "." + gentarget + ".frag"
print('--- Wrote pixel shader to: ' + filename)
file = open(filename, 'w+')
file.write(pixelSource)
file.close()
errors = validateCode(filename, opts.validator, opts.validatorArgs)
vertexSource = shader.getSourceCode(mx_gen_shader.VERTEX_STAGE)
filename = pathPrefix + "/" + shader.getName() + "." + gentarget + ".vert"
print('--- Wrote vertex shader to: ' + filename)
file = open(filename, 'w+')
file.write(vertexSource)
file.close()
errors += validateCode(filename, opts.validator, opts.validatorArgs)
else:
pixelSource = shader.getSourceCode(mx_gen_shader.PIXEL_STAGE)
filename = pathPrefix + "/" + shader.getName() + "." + gentarget
print('--- Wrote pixel shader to: ' + filename)
file = open(filename, 'w+')
file.write(pixelSource)
file.close()
errors = validateCode(filename, opts.validator, opts.validatorArgs)
if errors != "":
print("--- Validation failed for node: ", nodeName)
print("----------------------------")
print('--- Error log: ', errors)
print("----------------------------")
failedShaders += (nodeName + ' ')
else:
print("--- Validation passed for node:", nodeName)
else:
print("--- Validation failed for node:", nodeName)
failedShaders += (nodeName + ' ')
if failedShaders != "":
sys.exit(-1)
if __name__ == '__main__':
main()

2030
MaterialX/python/Scripts/genmdl.py Normal file → Executable file

File diff suppressed because it is too large Load Diff

226
MaterialX/python/Scripts/mxdoc.py Normal file → Executable file
View File

@@ -1,113 +1,113 @@
#!/usr/bin/env python
'''
Print markdown documentation for each nodedef in the given document.
'''
import argparse
import sys
import MaterialX as mx
HEADERS = ('Name', 'Type', 'Default Value',
'UI name', 'UI min', 'UI max', 'UI Soft Min', 'UI Soft Max', 'UI step', 'UI group', 'UI Advanced', 'Doc', 'Uniform')
ATTR_NAMES = ('uiname', 'uimin', 'uimax', 'uisoftmin', 'uisoftmax', 'uistep', 'uifolder', 'uiadvanced', 'doc', 'uniform' )
def main():
parser = argparse.ArgumentParser(description="Print documentation for each nodedef in the given document.")
parser.add_argument(dest="inputFilename", help="Filename of the input MaterialX document.")
parser.add_argument('--docType', dest='documentType', default='md', help='Document type. Default is "md" (Markdown). Specify "html" for HTML output')
parser.add_argument('--showInherited', default=False, action='store_true', help='Show inherited inputs. Default is False')
opts = parser.parse_args()
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, opts.inputFilename)
except mx.ExceptionFileMissing as err:
print(err)
sys.exit(0)
for nd in doc.getNodeDefs():
# HTML output
if opts.documentType == "html":
print('<head><style>')
print('table, th, td {')
print(' border-bottom: 1px solid; border-collapse: collapse; padding: 10px;')
print('}')
print('</style></head>')
print('<ul>')
print('<li> <em>Nodedef</em>: %s' % nd.getName())
print('<li> <em>Type</em>: %s' % nd.getType())
if len(nd.getNodeGroup()) > 0:
print('<li> <em>Node Group</em>: %s' % nd.getNodeGroup())
if len(nd.getVersionString()) > 0:
print('<li> <em>Version</em>: %s. Is default: %s' % (nd.getVersionString(), nd.getDefaultVersion()))
if len(nd.getInheritString()) > 0:
print('<li> <em>Inherits From</em>: %s' % nd.getInheritString())
print('<li> <em>Doc</em>: %s\n' % nd.getAttribute('doc'))
print('</ul>')
print('<table><tr>')
for h in HEADERS:
print('<th>' + h + '</th>')
print('</tr>')
inputList = nd.getActiveInputs() if opts.showInherited else nd.getInputs()
tokenList = nd.getActiveTokens() if opts.showInherited else nd.getTokens()
outputList = nd.getActiveOutputs() if opts.showInherited else nd.getOutputs()
totalList = inputList + tokenList + outputList;
for port in totalList:
print('<tr>')
infos = []
if port in outputList:
infos.append('<em>'+ port.getName() + '</em>')
elif port in tokenList:
infos.append(port.getName())
else:
infos.append('<b>'+ port.getName() + '</b>')
infos.append(port.getType())
val = port.getValue()
if port.getType() == "float":
val = round(val, 6)
infos.append(str(val))
for attrname in ATTR_NAMES:
infos.append(port.getAttribute(attrname))
for info in infos:
print('<td>' + info + '</td>')
print('</tr>')
print('</table>')
# Markdown output
else:
print('- *Nodedef*: %s' % nd.getName())
print('- *Type*: %s' % nd.getType())
if len(nd.getNodeGroup()) > 0:
print('- *Node Group*: %s' % nd.getNodeGroup())
if len(nd.getVersionString()) > 0:
print('- *Version*: %s. Is default: %s' % (nd.getVersionString(), nd.getDefaultVersion()))
if len(nd.getInheritString()) > 0:
print('- *Inherits From*: %s' % nd.getInheritString())
print('- *Doc*: %s\n' % nd.getAttribute('doc'))
print('| ' + ' | '.join(HEADERS) + ' |')
print('|' + ' ---- |' * len(HEADERS) + '')
inputList = nd.getActiveInputs() if opts.showInherited else nd.getInputs()
tokenList = nd.getActiveTokens() if opts.showInherited else nd.getTokens()
outputList = nd.getActiveOutputs() if opts.showInherited else nd.getOutputs()
totalList = inputList + tokenList + outputList;
for port in totalList:
infos = []
if port in outputList:
infos.append('*'+ port.getName() + '*')
elif port in tokenList:
infos.append(port.getName())
else:
infos.append('**'+ port.getName() + '**')
infos.append(port.getType())
val = port.getValue()
if port.getType() == "float":
val = round(val, 6)
infos.append(str(val))
for attrname in ATTR_NAMES:
infos.append(port.getAttribute(attrname))
print('| ' + " | ".join(infos) + ' |')
if __name__ == '__main__':
main()
#!/usr/bin/env python
'''
Print markdown documentation for each nodedef in the given document.
'''
import argparse
import sys
import MaterialX as mx
HEADERS = ('Name', 'Type', 'Default Value',
'UI name', 'UI min', 'UI max', 'UI Soft Min', 'UI Soft Max', 'UI step', 'UI group', 'UI Advanced', 'Doc', 'Uniform')
ATTR_NAMES = ('uiname', 'uimin', 'uimax', 'uisoftmin', 'uisoftmax', 'uistep', 'uifolder', 'uiadvanced', 'doc', 'uniform' )
def main():
parser = argparse.ArgumentParser(description="Print documentation for each nodedef in the given document.")
parser.add_argument(dest="inputFilename", help="Filename of the input MaterialX document.")
parser.add_argument('--docType', dest='documentType', default='md', help='Document type. Default is "md" (Markdown). Specify "html" for HTML output')
parser.add_argument('--showInherited', default=False, action='store_true', help='Show inherited inputs. Default is False')
opts = parser.parse_args()
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, opts.inputFilename)
except mx.ExceptionFileMissing as err:
print(err)
sys.exit(0)
for nd in doc.getNodeDefs():
# HTML output
if opts.documentType == "html":
print('<head><style>')
print('table, th, td {')
print(' border-bottom: 1px solid; border-collapse: collapse; padding: 10px;')
print('}')
print('</style></head>')
print('<ul>')
print('<li> <em>Nodedef</em>: %s' % nd.getName())
print('<li> <em>Type</em>: %s' % nd.getType())
if len(nd.getNodeGroup()) > 0:
print('<li> <em>Node Group</em>: %s' % nd.getNodeGroup())
if len(nd.getVersionString()) > 0:
print('<li> <em>Version</em>: %s. Is default: %s' % (nd.getVersionString(), nd.getDefaultVersion()))
if len(nd.getInheritString()) > 0:
print('<li> <em>Inherits From</em>: %s' % nd.getInheritString())
print('<li> <em>Doc</em>: %s\n' % nd.getAttribute('doc'))
print('</ul>')
print('<table><tr>')
for h in HEADERS:
print('<th>' + h + '</th>')
print('</tr>')
inputList = nd.getActiveInputs() if opts.showInherited else nd.getInputs()
tokenList = nd.getActiveTokens() if opts.showInherited else nd.getTokens()
outputList = nd.getActiveOutputs() if opts.showInherited else nd.getOutputs()
totalList = inputList + tokenList + outputList;
for port in totalList:
print('<tr>')
infos = []
if port in outputList:
infos.append('<em>'+ port.getName() + '</em>')
elif port in tokenList:
infos.append(port.getName())
else:
infos.append('<b>'+ port.getName() + '</b>')
infos.append(port.getType())
val = port.getValue()
if port.getType() == "float":
val = round(val, 6)
infos.append(str(val))
for attrname in ATTR_NAMES:
infos.append(port.getAttribute(attrname))
for info in infos:
print('<td>' + info + '</td>')
print('</tr>')
print('</table>')
# Markdown output
else:
print('- *Nodedef*: %s' % nd.getName())
print('- *Type*: %s' % nd.getType())
if len(nd.getNodeGroup()) > 0:
print('- *Node Group*: %s' % nd.getNodeGroup())
if len(nd.getVersionString()) > 0:
print('- *Version*: %s. Is default: %s' % (nd.getVersionString(), nd.getDefaultVersion()))
if len(nd.getInheritString()) > 0:
print('- *Inherits From*: %s' % nd.getInheritString())
print('- *Doc*: %s\n' % nd.getAttribute('doc'))
print('| ' + ' | '.join(HEADERS) + ' |')
print('|' + ' ---- |' * len(HEADERS) + '')
inputList = nd.getActiveInputs() if opts.showInherited else nd.getInputs()
tokenList = nd.getActiveTokens() if opts.showInherited else nd.getTokens()
outputList = nd.getActiveOutputs() if opts.showInherited else nd.getOutputs()
totalList = inputList + tokenList + outputList;
for port in totalList:
infos = []
if port in outputList:
infos.append('*'+ port.getName() + '*')
elif port in tokenList:
infos.append(port.getName())
else:
infos.append('**'+ port.getName() + '**')
infos.append(port.getType())
val = port.getValue()
if port.getType() == "float":
val = round(val, 6)
infos.append(str(val))
for attrname in ATTR_NAMES:
infos.append(port.getAttribute(attrname))
print('| ' + " | ".join(infos) + ' |')
if __name__ == '__main__':
main()

155
MaterialX/python/Scripts/mxformat.py Normal file → Executable file
View File

@@ -1,90 +1,65 @@
#!/usr/bin/env python
'''
Reformat a folder of MaterialX documents in place, optionally upgrading
the documents to the latest version of the standard.
'''
import argparse
import os
import xml.etree.ElementTree as ET
import MaterialX as mx
def is_well_formed(xml_string):
error = ''
try:
ET.fromstring(xml_string)
except ET.ParseError as e:
error = str(e)
def main():
parser = argparse.ArgumentParser(description="Reformat a folder of MaterialX documents in place.")
parser.add_argument('-y', '--yes', dest='yes', action="store_true", help="Proceed without asking for confirmation from the user.")
parser.add_argument('-u', '--upgrade', dest='upgrade', action="store_true", help='Upgrade documents to the latest version of the standard.')
parser.add_argument('-v', '--validate', dest='validate', action="store_true", help='Perform MaterialX validation on documents after reformatting.')
parser.add_argument('-x', '--xml_syntax', dest='xml_syntax', action="store_true", help='Check XML syntax after reformatting.')
parser.add_argument(dest="inputFolder", help="An input folder to scan for MaterialX documents.")
opts = parser.parse_args()
validDocs = dict()
for root, dirs, files in os.walk(opts.inputFolder):
for filename in files:
fullpath = os.path.join(root, filename)
if fullpath.endswith('.mtlx'):
doc = mx.createDocument()
try:
readOptions = mx.XmlReadOptions()
readOptions.readComments = True
readOptions.readNewlines = True
readOptions.upgradeVersion = opts.upgrade
try:
mx.readFromXmlFile(doc, fullpath, mx.FileSearchPath(), readOptions)
except Exception as err:
print('Skipping "' + filename + '" due to exception: ' + str(err))
continue
validDocs[fullpath] = doc
except mx.Exception:
pass
if not validDocs:
print('No MaterialX documents were found in "%s"' % (opts.inputFolder))
return
print('Found %s MaterialX files in "%s"' % (len(validDocs), opts.inputFolder))
mxVersion = mx.getVersionIntegers()
if not opts.yes:
if opts.upgrade:
question = 'Would you like to upgrade all %i documents to MaterialX v%i.%i in place (y/n)?' % (len(validDocs), mxVersion[0], mxVersion[1])
else:
question = 'Would you like to reformat all %i documents in place (y/n)?' % len(validDocs)
answer = input(question)
if answer != 'y' and answer != 'Y':
return
validate = opts.validate
if validate:
print(f'- Validate documents')
xml_syntax = opts.xml_syntax
if xml_syntax:
print(f'- Check XML syntax')
for (filename, doc) in validDocs.items():
if xml_syntax:
xml_string = mx.writeToXmlString(doc)
errors = is_well_formed(xml_string)
if errors:
print(f'- Warning: Document {filename} is not well-formed XML: {errors}')
if validate:
is_valid, errors = doc.validate()
if not is_valid:
print(f'- Warning: Document {filename} is invalid. Errors {errors}.')
mx.writeToXmlFile(doc, filename)
if opts.upgrade:
print('Upgraded %i documents to MaterialX v%i.%i' % (len(validDocs), mxVersion[0], mxVersion[1]))
else:
print('Reformatted %i documents ' % len(validDocs))
if __name__ == '__main__':
main()
#!/usr/bin/env python
'''
Reformat a folder of MaterialX documents in place, optionally upgrading
the documents to the latest version of the standard.
'''
import argparse
import os
import MaterialX as mx
def main():
parser = argparse.ArgumentParser(description="Reformat a folder of MaterialX documents in place.")
parser.add_argument("--yes", dest="yes", action="store_true", help="Proceed without asking for confirmation from the user.")
parser.add_argument('--upgrade', dest='upgrade', action="store_true", help='Upgrade documents to the latest version of the standard.')
parser.add_argument(dest="inputFolder", help="An input folder to scan for MaterialX documents.")
opts = parser.parse_args()
validDocs = dict()
for root, dirs, files in os.walk(opts.inputFolder):
for file in files:
if file.endswith('.mtlx'):
filename = os.path.join(root, file)
doc = mx.createDocument()
try:
readOptions = mx.XmlReadOptions()
readOptions.readComments = True
readOptions.readNewlines = True
readOptions.upgradeVersion = opts.upgrade
try:
mx.readFromXmlFile(doc, filename, mx.FileSearchPath(), readOptions)
except Exception as err:
print('Skipping "' + file + '" due to exception: ' + str(err))
continue
validDocs[filename] = doc
except mx.Exception:
pass
if not validDocs:
print('No MaterialX documents were found in "%s"' % (opts.inputFolder))
return
print('Found %s MaterialX files in "%s"' % (len(validDocs), opts.inputFolder))
mxVersion = mx.getVersionIntegers()
if not opts.yes:
if opts.upgrade:
question = 'Would you like to upgrade all %i documents to MaterialX v%i.%i in place (y/n)?' % (len(validDocs), mxVersion[0], mxVersion[1])
else:
question = 'Would you like to reformat all %i documents in place (y/n)?' % len(validDocs)
answer = input(question)
if answer != 'y' and answer != 'Y':
return
for (filename, doc) in validDocs.items():
mx.writeToXmlFile(doc, filename)
if opts.upgrade:
print('Upgraded %i documents to MaterialX v%i.%i' % (len(validDocs), mxVersion[0], mxVersion[1]))
else:
print('Reformatted %i documents ' % len(validDocs))
if __name__ == '__main__':
main()

View File

@@ -1,363 +1,359 @@
#!/usr/bin/env python
'''
Verify that the given file is a valid MaterialX document.
'''
import argparse
import sys
import MaterialX as mx
def main():
parser = argparse.ArgumentParser(description="Verify that the given file is a valid MaterialX document.")
parser.add_argument("--resolve", dest="resolve", action="store_true", help="Resolve inheritance and string substitutions.")
parser.add_argument("--verbose", dest="verbose", action="store_true", help="Print summary of elements found in the document.")
parser.add_argument("--stdlib", dest="stdlib", action="store_true", help="Import standard MaterialX libraries into the document.")
parser.add_argument(dest="inputFilename", help="Filename of the input document.")
opts = parser.parse_args()
# Load standard libraries if requested.
stdlib = None
if opts.stdlib:
stdlib = mx.createDocument()
try:
mx.loadLibraries(mx.getDefaultDataLibraryFolders(), mx.getDefaultDataSearchPath(), stdlib)
except Exception as err:
print(err)
sys.exit(0)
# Read and validate the source document.
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, opts.inputFilename)
if stdlib:
doc.setDataLibrary(stdlib)
except mx.ExceptionFileMissing as err:
print(err)
sys.exit(0)
valid, message = doc.validate()
if (valid):
print("%s is a valid MaterialX document in v%s" % (opts.inputFilename, mx.getVersionString()))
else:
print("%s is not a valid MaterialX document in v%s" % (opts.inputFilename, mx.getVersionString()))
print(message)
# Generate verbose output if requested.
if opts.verbose:
nodegraphs = doc.getNodeGraphs()
materials = doc.getMaterialNodes()
looks = doc.getLooks()
lookgroups = doc.getLookGroups()
collections = doc.getCollections()
nodedefs = doc.getNodeDefs()
implementations = doc.getImplementations()
geominfos = doc.getGeomInfos()
geompropdefs = doc.getGeomPropDefs()
typedefs = doc.getTypeDefs()
propsets = doc.getPropertySets()
variantsets = doc.getVariantSets()
backdrops = doc.getBackdrops()
print("----------------------------------")
print("Document Version: {}.{:02d}".format(*doc.getVersionIntegers()))
print("%4d Custom Type%s%s" % (len(typedefs), pl(typedefs), listContents(typedefs, opts.resolve)))
print("%4d Custom GeomProp%s%s" % (len(geompropdefs), pl(geompropdefs), listContents(geompropdefs, opts.resolve)))
print("%4d NodeDef%s%s" % (len(nodedefs), pl(nodedefs), listContents(nodedefs, opts.resolve)))
print("%4d Implementation%s%s" % (len(implementations), pl(implementations), listContents(implementations, opts.resolve)))
print("%4d Nodegraph%s%s" % (len(nodegraphs), pl(nodegraphs), listContents(nodegraphs, opts.resolve)))
print("%4d VariantSet%s%s" % (len(variantsets), pl(variantsets), listContents(variantsets, opts.resolve)))
print("%4d Material%s%s" % (len(materials), pl(materials), listContents(materials, opts.resolve)))
print("%4d Collection%s%s" % (len(collections), pl(collections), listContents(collections, opts.resolve)))
print("%4d GeomInfo%s%s" % (len(geominfos), pl(geominfos), listContents(geominfos, opts.resolve)))
print("%4d PropertySet%s%s" % (len(propsets), pl(propsets), listContents(propsets, opts.resolve)))
print("%4d Look%s%s" % (len(looks), pl(looks), listContents(looks, opts.resolve)))
print("%4d LookGroup%s%s" % (len(lookgroups), pl(lookgroups), listContents(lookgroups, opts.resolve)))
print("%4d Top-level backdrop%s%s" % (len(backdrops), pl(backdrops), listContents(backdrops, opts.resolve)))
print("----------------------------------")
def listContents(elemlist, resolve):
if len(elemlist) == 0:
return ''
names = []
for elem in elemlist:
if elem.isA(mx.NodeDef):
outtype = elem.getType()
outs = ""
if outtype == "multioutput":
for ot in elem.getOutputs():
outs = outs + \
'\n\t %s output "%s"' % (ot.getType(), ot.getName())
names.append('%s %s "%s"%s' %
(outtype, elem.getNodeString(), elem.getName(), outs))
names.append(listNodedefInterface(elem))
elif elem.isA(mx.Implementation):
impl = elem.getName()
targs = []
if elem.hasTarget():
targs.append("target %s" % elem.getTarget())
if targs:
impl = "%s (%s)" % (impl, ", ".join(targs))
if elem.hasFunction():
if elem.hasFile():
impl = "%s [%s:%s()]" % (
impl, elem.getFile(), elem.getFunction())
else:
impl = "%s [function %s()]" % (impl, elem.getFunction())
elif elem.hasFile():
impl = "%s [%s]" % (impl, elem.getFile())
names.append(impl)
elif elem.isA(mx.Backdrop):
names.append('%s: contains "%s"' %
(elem.getName(), elem.getContainsString()))
elif elem.isA(mx.NodeGraph):
nchildnodes = len(elem.getChildren()) - elem.getOutputCount()
backdrops = elem.getBackdrops()
nbackdrops = len(backdrops)
outs = ""
if nbackdrops > 0:
for bd in backdrops:
outs = outs + '\n\t backdrop "%s"' % (bd.getName())
outs = outs + ' contains "%s"' % bd.getContainsString()
if elem.getOutputCount() > 0:
for ot in elem.getOutputs():
outs = outs + '\n\t %s output "%s"' % (ot.getType(), ot.getName())
outs = outs + traverseInputs(ot, "", 0)
nd = elem.getNodeDef()
if nd:
names.append('%s (implementation for nodedef "%s"): %d nodes%s' % (
elem.getName(), nd.getName(), nchildnodes, outs))
else:
names.append("%s: %d nodes, %d backdrop%s%s" % (
elem.getName(), nchildnodes, nbackdrops, pl(backdrops), outs))
elif elem.isA(mx.Node, mx.SURFACE_MATERIAL_NODE_STRING):
shaders = mx.getShaderNodes(elem)
names.append("%s: %d connected shader node%s" % (elem.getName(), len(shaders), pl(shaders)))
for shader in shaders:
names.append('Shader node "%s" (%s), with bindings:%s' % (shader.getName(), shader.getCategory(), listShaderBindings(shader)))
elif elem.isA(mx.GeomInfo):
props = elem.getGeomProps()
if props:
propnames = " (Geomprops: " + ", ".join(map(
lambda x: "%s=%s" % (x.getName(), getConvertedValue(x)), props)) + ")"
else:
propnames = ""
tokens = elem.getTokens()
if tokens:
tokennames = " (Tokens: " + ", ".join(map(
lambda x: "%s=%s" % (x.getName(), x.getValueString()), tokens)) + ")"
else:
tokennames = ""
names.append("%s%s%s" % (elem.getName(), propnames, tokennames))
elif elem.isA(mx.VariantSet):
vars = elem.getVariants()
if vars:
varnames = " (variants " + ", ".join(map(
lambda x: '"' + x.getName()+'"', vars)) + ")"
else:
varnames = ""
names.append("%s%s" % (elem.getName(), varnames))
elif elem.isA(mx.PropertySet):
props = elem.getProperties()
if props:
propnames = " (" + ", ".join(map(
lambda x: "%s %s%s" % (x.getType(), x.getName(), getTarget(x)), props)) + ")"
else:
propnames = ""
names.append("%s%s" % (elem.getName(), propnames))
elif elem.isA(mx.LookGroup):
lks = elem.getLooks()
if lks:
names.append("%s (looks: %s)" % (elem.getName(), lks))
else:
names.append("%s (no looks)" % (elem.getName()))
elif elem.isA(mx.Look):
mas = ""
if resolve:
mtlassns = elem.getActiveMaterialAssigns()
else:
mtlassns = elem.getMaterialAssigns()
for mtlassn in mtlassns:
mas = mas + "\n\t MaterialAssign %s to%s" % (
mtlassn.getMaterial(), getGeoms(mtlassn, resolve))
pas = ""
if resolve:
propassns = elem.getActivePropertyAssigns()
else:
propassns = elem.getPropertyAssigns()
for propassn in propassns:
propertyname = propassn.getAttribute("property")
pas = pas + "\n\t PropertyAssign %s %s to%s" % (
propassn.getType(), propertyname, getGeoms(propassn, resolve))
psas = ""
if resolve:
propsetassns = elem.getActivePropertySetAssigns()
else:
propsetassns = elem.getPropertySetAssigns()
for propsetassn in propsetassns:
propertysetname = propsetassn.getAttribute("propertyset")
psas = psas + "\n\t PropertySetAssign %s to%s" % (
propertysetname, getGeoms(propsetassn, resolve))
varas = ""
if resolve:
variantassns = elem.getActiveVariantAssigns()
else:
variantassns = elem.getVariantAssigns()
for varassn in variantassns:
varas = varas + "\n\t VariantAssign %s from variantset %s" % (
varassn.getVariantString(), varassn.getVariantSetString())
visas = ""
if resolve:
visassns = elem.getActiveVisibilities()
else:
visassns = elem.getVisibilities()
for vis in visassns:
visstr = 'on' if vis.getVisible() else 'off'
visas = visas + "\n\t Set %s visibility%s %s to%s" % (
vis.getVisibilityType(), getViewerGeoms(vis), visstr, getGeoms(vis, resolve))
names.append("%s%s%s%s%s%s" %
(elem.getName(), mas, pas, psas, varas, visas))
else:
names.append(elem.getName())
return ":\n\t" + "\n\t".join(names)
def listShaderBindings(shader):
s = ''
for inp in shader.getInputs():
bname = inp.getName()
btype = inp.getType()
if inp.hasOutputString():
outname = inp.getOutputString()
if inp.hasNodeGraphString():
ngname = inp.getNodeGraphString()
s = s + '\n\t %s "%s" -> nodegraph "%s" output "%s"' % (btype, bname, ngname, outname)
else:
s = s + '\n\t %s "%s" -> output "%s"' % (btype, bname, outname)
else:
bval = getConvertedValue(inp)
s = s + '\n\t %s "%s" = %s' % (btype, bname, bval)
return s
def listNodedefInterface(nodedef):
s = ''
for inp in nodedef.getActiveInputs():
iname = inp.getName()
itype = inp.getType()
if s:
s = s + '\n\t'
s = s + ' %s input "%s"' % (itype, iname)
for tok in nodedef.getActiveTokens():
tname = tok.getName()
ttype = tok.getType()
if s:
s = s + '\n\t'
s = s + ' %s token "%s"' % (ttype, tname)
return s
def traverseInputs(node, port, depth):
s = ''
if node.isA(mx.Output):
parent = node.getConnectedNode()
s = s + traverseInputs(parent, "", depth+1)
else:
s = s + '%s%s -> %s %s "%s"' % (spc(depth), port,
node.getType(), node.getCategory(), node.getName())
ins = node.getActiveInputs()
for i in ins:
if i.hasInterfaceName():
intname = i.getInterfaceName()
s = s + \
'%s%s ^- %s interface "%s"' % (spc(depth+1),
i.getName(), i.getType(), intname)
elif i.hasValueString():
val = getConvertedValue(i)
s = s + \
'%s%s = %s value %s' % (
spc(depth+1), i.getName(), i.getType(), val)
else:
parent = i.getConnectedNode()
if parent:
s = s + traverseInputs(parent, i.getName(), depth+1)
toks = node.getActiveTokens()
for i in toks:
if i.hasInterfaceName():
intname = i.getInterfaceName()
s = s + \
'%s[T]%s ^- %s interface "%s"' % (
spc(depth+1), i.getName(), i.getType(), intname)
elif i.hasValueString():
val = i.getValueString()
s = s + \
'%s[T]%s = %s value "%s"' % (
spc(depth+1), i.getName(), i.getType(), val)
else:
s = s + \
'%s[T]%s error: no valueString' % (
spc(depth+1), i.getName())
return s
def pl(elem):
if len(elem) == 1:
return ""
else:
return "s"
def spc(depth):
return "\n\t " + ": "*depth
# Return a value string for the element, converting units if appropriate
def getConvertedValue(elem):
if elem.getType() in ["float", "vector2", "vector3", "vector4"]:
if elem.hasUnit():
u = elem.getUnit()
print ("[Unit for %s is %s]" % (elem.getName(), u))
if elem.hasUnitType():
utype = elem.getUnitType()
print ("[Unittype for %s is %s]" % (elem.getName(), utype))
# NOTDONE...
return elem.getValueString()
def getGeoms(elem, resolve):
s = ""
if elem.hasGeom():
if resolve:
s = s + ' geom "%s"' % elem.getActiveGeom()
else:
s = s + ' geom "%s"' % elem.getGeom()
if elem.hasCollectionString():
s = s + ' collection "%s"' % elem.getCollectionString()
return s
def getViewerGeoms(elem):
s = ""
if elem.hasViewerGeom():
s = s + ' viewergeom "%s"' % elem.getViewerGeom()
if elem.hasViewerCollection():
s = s + ' viewercollection "%s"' % elem.getViewerCollection()
if s:
s = " of" + s
return s
def getTarget(elem):
if elem.hasTarget():
return ' [target "%s"]' % elem.getTarget()
else:
return ""
if __name__ == '__main__':
main()
#!/usr/bin/env python
'''
Verify that the given file is a valid MaterialX document.
'''
import argparse
import sys
import MaterialX as mx
def main():
parser = argparse.ArgumentParser(description="Verify that the given file is a valid MaterialX document.")
parser.add_argument("--resolve", dest="resolve", action="store_true", help="Resolve inheritance and string substitutions.")
parser.add_argument("--verbose", dest="verbose", action="store_true", help="Print summary of elements found in the document.")
parser.add_argument("--stdlib", dest="stdlib", action="store_true", help="Import standard MaterialX libraries into the document.")
parser.add_argument(dest="inputFilename", help="Filename of the input document.")
opts = parser.parse_args()
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, opts.inputFilename)
except mx.ExceptionFileMissing as err:
print(err)
sys.exit(0)
if opts.stdlib:
stdlib = mx.createDocument()
try:
mx.loadLibraries(mx.getDefaultDataLibraryFolders(), mx.getDefaultDataSearchPath(), stdlib)
except Exception as err:
print(err)
sys.exit(0)
doc.importLibrary(stdlib)
(valid, message) = doc.validate()
if (valid):
print("%s is a valid MaterialX document in v%s" % (opts.inputFilename, mx.getVersionString()))
else:
print("%s is not a valid MaterialX document in v%s" % (opts.inputFilename, mx.getVersionString()))
print(message)
if opts.verbose:
nodegraphs = doc.getNodeGraphs()
materials = doc.getMaterialNodes()
looks = doc.getLooks()
lookgroups = doc.getLookGroups()
collections = doc.getCollections()
nodedefs = doc.getNodeDefs()
implementations = doc.getImplementations()
geominfos = doc.getGeomInfos()
geompropdefs = doc.getGeomPropDefs()
typedefs = doc.getTypeDefs()
propsets = doc.getPropertySets()
variantsets = doc.getVariantSets()
backdrops = doc.getBackdrops()
print("----------------------------------")
print("Document Version: {}.{:02d}".format(*doc.getVersionIntegers()))
print("%4d Custom Type%s%s" % (len(typedefs), pl(typedefs), listContents(typedefs, opts.resolve)))
print("%4d Custom GeomProp%s%s" % (len(geompropdefs), pl(geompropdefs), listContents(geompropdefs, opts.resolve)))
print("%4d NodeDef%s%s" % (len(nodedefs), pl(nodedefs), listContents(nodedefs, opts.resolve)))
print("%4d Implementation%s%s" % (len(implementations), pl(implementations), listContents(implementations, opts.resolve)))
print("%4d Nodegraph%s%s" % (len(nodegraphs), pl(nodegraphs), listContents(nodegraphs, opts.resolve)))
print("%4d VariantSet%s%s" % (len(variantsets), pl(variantsets), listContents(variantsets, opts.resolve)))
print("%4d Material%s%s" % (len(materials), pl(materials), listContents(materials, opts.resolve)))
print("%4d Collection%s%s" % (len(collections), pl(collections), listContents(collections, opts.resolve)))
print("%4d GeomInfo%s%s" % (len(geominfos), pl(geominfos), listContents(geominfos, opts.resolve)))
print("%4d PropertySet%s%s" % (len(propsets), pl(propsets), listContents(propsets, opts.resolve)))
print("%4d Look%s%s" % (len(looks), pl(looks), listContents(looks, opts.resolve)))
print("%4d LookGroup%s%s" % (len(lookgroups), pl(lookgroups), listContents(lookgroups, opts.resolve)))
print("%4d Top-level backdrop%s%s" % (len(backdrops), pl(backdrops), listContents(backdrops, opts.resolve)))
print("----------------------------------")
def listContents(elemlist, resolve):
if len(elemlist) == 0:
return ''
names = []
for elem in elemlist:
if elem.isA(mx.NodeDef):
outtype = elem.getType()
outs = ""
if outtype == "multioutput":
for ot in elem.getOutputs():
outs = outs + \
'\n\t %s output "%s"' % (ot.getType(), ot.getName())
names.append('%s %s "%s"%s' %
(outtype, elem.getNodeString(), elem.getName(), outs))
names.append(listNodedefInterface(elem))
elif elem.isA(mx.Implementation):
impl = elem.getName()
targs = []
if elem.hasTarget():
targs.append("target %s" % elem.getTarget())
if targs:
impl = "%s (%s)" % (impl, ", ".join(targs))
if elem.hasFunction():
if elem.hasFile():
impl = "%s [%s:%s()]" % (
impl, elem.getFile(), elem.getFunction())
else:
impl = "%s [function %s()]" % (impl, elem.getFunction())
elif elem.hasFile():
impl = "%s [%s]" % (impl, elem.getFile())
names.append(impl)
elif elem.isA(mx.Backdrop):
names.append('%s: contains "%s"' %
(elem.getName(), elem.getContainsString()))
elif elem.isA(mx.NodeGraph):
nchildnodes = len(elem.getChildren()) - elem.getOutputCount()
backdrops = elem.getBackdrops()
nbackdrops = len(backdrops)
outs = ""
if nbackdrops > 0:
for bd in backdrops:
outs = outs + '\n\t backdrop "%s"' % (bd.getName())
outs = outs + ' contains "%s"' % bd.getContainsString()
if elem.getOutputCount() > 0:
for ot in elem.getOutputs():
outs = outs + '\n\t %s output "%s"' % (ot.getType(), ot.getName())
outs = outs + traverseInputs(ot, "", 0)
nd = elem.getNodeDef()
if nd:
names.append('%s (implementation for nodedef "%s"): %d nodes%s' % (
elem.getName(), nd.getName(), nchildnodes, outs))
else:
names.append("%s: %d nodes, %d backdrop%s%s" % (
elem.getName(), nchildnodes, nbackdrops, pl(backdrops), outs))
elif elem.isA(mx.Node, mx.SURFACE_MATERIAL_NODE_STRING):
shaders = mx.getShaderNodes(elem)
names.append("%s: %d connected shader node%s" % (elem.getName(), len(shaders), pl(shaders)))
for shader in shaders:
names.append('Shader node "%s" (%s), with bindings:%s' % (shader.getName(), shader.getCategory(), listShaderBindings(shader)))
elif elem.isA(mx.GeomInfo):
props = elem.getGeomProps()
if props:
propnames = " (Geomprops: " + ", ".join(map(
lambda x: "%s=%s" % (x.getName(), getConvertedValue(x)), props)) + ")"
else:
propnames = ""
tokens = elem.getTokens()
if tokens:
tokennames = " (Tokens: " + ", ".join(map(
lambda x: "%s=%s" % (x.getName(), x.getValueString()), tokens)) + ")"
else:
tokennames = ""
names.append("%s%s%s" % (elem.getName(), propnames, tokennames))
elif elem.isA(mx.VariantSet):
vars = elem.getVariants()
if vars:
varnames = " (variants " + ", ".join(map(
lambda x: '"' + x.getName()+'"', vars)) + ")"
else:
varnames = ""
names.append("%s%s" % (elem.getName(), varnames))
elif elem.isA(mx.PropertySet):
props = elem.getProperties()
if props:
propnames = " (" + ", ".join(map(
lambda x: "%s %s%s" % (x.getType(), x.getName(), getTarget(x)), props)) + ")"
else:
propnames = ""
names.append("%s%s" % (elem.getName(), propnames))
elif elem.isA(mx.LookGroup):
lks = elem.getLooks()
if lks:
names.append("%s (looks: %s)" % (elem.getName(), lks))
else:
names.append("%s (no looks)" % (elem.getName()))
elif elem.isA(mx.Look):
mas = ""
if resolve:
mtlassns = elem.getActiveMaterialAssigns()
else:
mtlassns = elem.getMaterialAssigns()
for mtlassn in mtlassns:
mas = mas + "\n\t MaterialAssign %s to%s" % (
mtlassn.getMaterial(), getGeoms(mtlassn, resolve))
pas = ""
if resolve:
propassns = elem.getActivePropertyAssigns()
else:
propassns = elem.getPropertyAssigns()
for propassn in propassns:
propertyname = propassn.getAttribute("property")
pas = pas + "\n\t PropertyAssign %s %s to%s" % (
propassn.getType(), propertyname, getGeoms(propassn, resolve))
psas = ""
if resolve:
propsetassns = elem.getActivePropertySetAssigns()
else:
propsetassns = elem.getPropertySetAssigns()
for propsetassn in propsetassns:
propertysetname = propsetassn.getAttribute("propertyset")
psas = psas + "\n\t PropertySetAssign %s to%s" % (
propertysetname, getGeoms(propsetassn, resolve))
varas = ""
if resolve:
variantassns = elem.getActiveVariantAssigns()
else:
variantassns = elem.getVariantAssigns()
for varassn in variantassns:
varas = varas + "\n\t VariantAssign %s from variantset %s" % (
varassn.getVariantString(), varassn.getVariantSetString())
visas = ""
if resolve:
visassns = elem.getActiveVisibilities()
else:
visassns = elem.getVisibilities()
for vis in visassns:
visstr = 'on' if vis.getVisible() else 'off'
visas = visas + "\n\t Set %s visibility%s %s to%s" % (
vis.getVisibilityType(), getViewerGeoms(vis), visstr, getGeoms(vis, resolve))
names.append("%s%s%s%s%s%s" %
(elem.getName(), mas, pas, psas, varas, visas))
else:
names.append(elem.getName())
return ":\n\t" + "\n\t".join(names)
def listShaderBindings(shader):
s = ''
for inp in shader.getInputs():
bname = inp.getName()
btype = inp.getType()
if inp.hasOutputString():
outname = inp.getOutputString()
if inp.hasNodeGraphString():
ngname = inp.getNodeGraphString()
s = s + '\n\t %s "%s" -> nodegraph "%s" output "%s"' % (btype, bname, ngname, outname)
else:
s = s + '\n\t %s "%s" -> output "%s"' % (btype, bname, outname)
else:
bval = getConvertedValue(inp)
s = s + '\n\t %s "%s" = %s' % (btype, bname, bval)
return s
def listNodedefInterface(nodedef):
s = ''
for inp in nodedef.getActiveInputs():
iname = inp.getName()
itype = inp.getType()
if s:
s = s + '\n\t'
s = s + ' %s input "%s"' % (itype, iname)
for tok in nodedef.getActiveTokens():
tname = tok.getName()
ttype = tok.getType()
if s:
s = s + '\n\t'
s = s + ' %s token "%s"' % (ttype, tname)
return s
def traverseInputs(node, port, depth):
s = ''
if node.isA(mx.Output):
parent = node.getConnectedNode()
s = s + traverseInputs(parent, "", depth+1)
else:
s = s + '%s%s -> %s %s "%s"' % (spc(depth), port,
node.getType(), node.getCategory(), node.getName())
ins = node.getActiveInputs()
for i in ins:
if i.hasInterfaceName():
intname = i.getInterfaceName()
s = s + \
'%s%s ^- %s interface "%s"' % (spc(depth+1),
i.getName(), i.getType(), intname)
elif i.hasValueString():
val = getConvertedValue(i)
s = s + \
'%s%s = %s value %s' % (
spc(depth+1), i.getName(), i.getType(), val)
else:
parent = i.getConnectedNode()
if parent:
s = s + traverseInputs(parent, i.getName(), depth+1)
toks = node.getActiveTokens()
for i in toks:
if i.hasInterfaceName():
intname = i.getInterfaceName()
s = s + \
'%s[T]%s ^- %s interface "%s"' % (
spc(depth+1), i.getName(), i.getType(), intname)
elif i.hasValueString():
val = i.getValueString()
s = s + \
'%s[T]%s = %s value "%s"' % (
spc(depth+1), i.getName(), i.getType(), val)
else:
s = s + \
'%s[T]%s error: no valueString' % (
spc(depth+1), i.getName())
return s
def pl(elem):
if len(elem) == 1:
return ""
else:
return "s"
def spc(depth):
return "\n\t " + ": "*depth
# Return a value string for the element, converting units if appropriate
def getConvertedValue(elem):
if elem.getType() in ["float", "vector2", "vector3", "vector4"]:
if elem.hasUnit():
u = elem.getUnit()
print ("[Unit for %s is %s]" % (elem.getName(), u))
if elem.hasUnitType():
utype = elem.getUnitType()
print ("[Unittype for %s is %s]" % (elem.getName(), utype))
# NOTDONE...
return elem.getValueString()
def getGeoms(elem, resolve):
s = ""
if elem.hasGeom():
if resolve:
s = s + ' geom "%s"' % elem.getActiveGeom()
else:
s = s + ' geom "%s"' % elem.getGeom()
if elem.hasCollectionString():
s = s + ' collection "%s"' % elem.getCollectionString()
return s
def getViewerGeoms(elem):
s = ""
if elem.hasViewerGeom():
s = s + ' viewergeom "%s"' % elem.getViewerGeom()
if elem.hasViewerCollection():
s = s + ' viewercollection "%s"' % elem.getViewerCollection()
if s:
s = " of" + s
return s
def getTarget(elem):
if elem.hasTarget():
return ' [target "%s"]' % elem.getTarget()
else:
return ""
if __name__ == '__main__':
main()

View File

@@ -1,429 +0,0 @@
"""
Sample MaterialX ImageLoader implementation using OpenImageIO package.
This module provides a MaterialX-compatible ImageLoader implementation using OpenImageIO (OIIO).
The test will test loading an image, save it out, and optionally previewing it.
Steps:
1. Create an OIIOLoader which is derived from the ImageLoader interface class.
2. Create a new ImageHandler and register the loader with it.
3. Request to acquire an image using the ImageHandler. An EXR image is requested.
4. OIIOLoader will return supported extensions and match the requested image format.
5. As such the OIIOLoader will be requested to load in the EXR image, convert the
data and return a MaterialX Image.
6. Try to acquire the image again. This should returnt the cached MaterialX Image.
7. Save the image back to disk in the original format.
The image can optionally be previewed after load before save.
- Python Dependencies:
- OpenImageIO (version 3.0.6.1)
- API Docs can be found here: https://openimageio.readthedocs.io/en/v3.0.6.1/)
- numpy : For numerical operations on image data
- matplotlib : If image preview is desired.
"""
import ctypes
import os
import argparse
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("OIIOLoad")
try:
import MaterialX as mx
import MaterialX.PyMaterialXRender as mx_render
except ImportError:
logger.error("Required modules not found. Please install MaterialX.")
raise
try:
import OpenImageIO as oiio
import numpy as np
except ImportError:
logger.error("Required modules not found. Please install OpenImageIO and numpy.")
raise
have_matplot = False
try:
import matplotlib.pyplot as plt
have_matplot = True
except ImportError:
logger.warning("matplotlib module not found. Image preview display is disabled.")
class OiioImageLoader(mx_render.ImageLoader):
"""
A MaterialX ImageLoader implementation that uses OpenImageIO to read image files.
Inherits from MaterialX.ImageLoader and implements the required interface methods.
Supports common image formats like PNG, JPEG, TIFF, EXR, HDR, etc.
"""
def __init__(self):
"""
Initialize the OiioImageLoader and set supported extensions."""
super().__init__()
# Set all extensions supported by OpenImageIO. e.g.
# openexr:exr,sxr,mxr;tiff:tif,tiff,tx,env,sm,vsm;jpeg:jpg,jpe,jpeg,jif,jfif,jfi;bmp:bmp,dib;cineon:cin;dds:dds;dpx:dpx;fits:fits;hdr:hdr,rgbe;ico:ico;iff:iff,z;null:null,nul;png:png;pnm:ppm,pgm,pbm,pnm,pfm;psd:psd,pdd,psb;rla:rla;sgi:sgi,rgb,rgba,bw,int,inta;softimage:pic;targa:tga,tpic;term:term;webp:webp;zfile:zfile
self._extensions = set()
oiio_extensions = oiio.get_string_attribute("extension_list")
# Split string by ";"
for group in oiio_extensions.split(";"):
# Each group is like "openexr:exr,sxr,mxr"
if ":" in group:
_, exts = group.split(":", 1)
self._extensions.update(ext.strip() for ext in exts.split(","))
else:
self._extensions.update(ext.strip() for ext in group.split(","))
logger.debug(f"Cache supported extensions: {self._extensions}")
self.preview = False
self.identifier = "OpenImageIO Custom Image Loader"
self.color_space = {}
def supportedExtensions(self):
"""
Derived method to return a set of supported image file extensions.
"""
logger.info(f"OIIO supported extensions: {self._extensions}")
return self._extensions
def set_preview(self, value):
"""
Set whether to preview images when loading and saving
@param value: Boolean indicating whether to enable preview
"""
self.preview = value
def get_identifier(self):
return "OIIO Custom Loader"
def previewImage(self, title, data, width, height, nchannels, color_space):
"""
Utility method to preview an image using matplotlib.
Handles normalization and dtype for correct display.
@param title: Title for the preview window
@param data: Image data array
@param width: Image width
@param height: Image height
@param nchannels: Number of image channels
@param color_space: Color space of the image
"""
if not self.preview:
return
if have_matplot:
# If the image is float16 (half), convert to float32
if data.dtype == np.float16:
data = data.astype(np.float32)
flat = data.reshape(height, width, nchannels)
# Always display as RGB (first 3 channels or repeat if less)
if nchannels >= 3:
rgb = flat[..., :3]
else:
rgb = np.repeat(flat[..., :1], 3, axis=-1)
# Determine if normalization is needed
if np.issubdtype(flat.dtype, np.floating):
# If float, normalize to [0, 1] for display
rgb_disp = np.clip(rgb, 0.0, 1.0)
elif np.issubdtype(flat.dtype, np.integer):
# If integer, assume 8 or 16 bit, scale if needed
if flat.dtype == np.uint8:
rgb_disp = rgb # matplotlib expects [0,255] for uint8
elif flat.dtype == np.uint16:
# Scale 16-bit to 8-bit for display
rgb_disp = (rgb / 257).astype(np.uint8)
else:
# For other integer types, try to scale to [0,255]
rgb_disp = np.clip(rgb, 0, 255).astype(np.uint8)
else:
rgb_disp = rgb
# Set title bar text for the preview window
fig, ax = plt.subplots()
ax.imshow(rgb_disp)
ax.axis("off")
#fig.patch.set_facecolor("black")
fig.canvas.manager.set_window_title(title)
info = f"Dimensions:({width}x{height}), {nchannels} channels, type={data.dtype}, colorspace={color_space}"
fig.suptitle(title, fontsize=12)
plt.title(info, fontsize=9)
plt.show()
def loadImage(self, filePath):
"""
Load an image from the file system (MaterialX interface method).
@param filePath (MaterialX.FilePath): Path to the image file
@returns MaterialX.ImagePtr: MaterialX Image object or None if loading fails
"""
file_path_str = filePath.asString()
logger.info(f"Load using OIIO loader: {file_path_str}")
if not os.path.exists(file_path_str):
print(f"Error: File '{file_path_str}' does not exist")
return None
try:
# Open the image file
img_input = oiio.ImageInput.open(file_path_str)
if not img_input:
print(f"Error: Could not open '{file_path_str}' - {oiio.geterror()}")
return None
# Get image specifications
spec = img_input.spec()
color_space = spec.getattribute("oiio:ColorSpace")
logger.info(f"ColorSpace: {color_space}")
self.color_space[file_path_str] = color_space
# Check channel count
channels = spec.nchannels
if channels > 4:
channels = 4
# Determine MaterialX base type from OIIO format
base_type = self._oiio_to_materialx_type(spec.format.basetype)
if base_type is None:
img_input.close()
print(f"Error: Unsupported image format for '{file_path_str}'")
return None
# Create MaterialX image
mx_image = mx_render.Image.create(spec.width, spec.height, channels, base_type)
mx_image.createResourceBuffer()
logger.debug(f"Create buffer with width: {spec.width}, height: {spec.height}, channels: {spec.nchannels} -> {channels}")
# Read the image data using the correct OIIO Python API (returns a bytes object)
logger.debug(f"Reading image data from '{file_path_str}' with spec: {spec}")
data = img_input.read_image(0, 0, 0, channels, spec.format)
if len(data) > 0:
logger.debug(f"Done Reading image data from '{file_path_str}' with spec: {spec}")
else:
logger.error(f"Could not read image data.")
return None
self.previewImage("Loaded MaterialX Image", data, spec.width, spec.height, channels, color_space)
# Steps:
# - Copy the OIIO data into the MaterialX image resource buffer
resource_buffer_ptr = mx_image.getResourceBuffer()
bytes_per_channel = spec.format.size()
total_bytes = spec.width * spec.height * channels * bytes_per_channel
logger.info(f"Total bytes read in: {total_bytes} (width: {spec.width}, height: {spec.height}, channels: {channels}, format: {spec.format})")
try:
ctypes.memmove(resource_buffer_ptr, (ctypes.c_char * total_bytes).from_buffer_copy(data), total_bytes)
except Exception as e:
logger.error(f"Failed to update image resource buffer: {e}")
img_input.close()
return mx_image
except Exception as e:
print(f"Error loading image from '{file_path_str}': {str(e)}")
return None
return None
def saveImage(self, filePath, image, verticalFlip=False):
"""
@brief Saves an image to disk using OpenImageIO (OIIO).
@param filePath The file path where the image will be saved. Expected to have an asString() method.
@param image The MaterialX image object to save.
@param verticalFlip Whether to vertically flip the image before saving. (Currently unused.)
@return True if the image was saved successfully, False otherwise.
"""
filename = filePath.asString()
width = image.getWidth()
height = image.getHeight()
# Clamp to RGBA
src_channels = image.getChannelCount()
channels = min(src_channels, 4)
if src_channels > 4:
logger.warning(f"Image has {src_channels} channels. Saving only first {channels} (RGBA).")
mx_basetype = image.getBaseType()
oiio_format = self._materialx_to_oiio_type(mx_basetype)
logger.info(f"mx_basetype: {mx_basetype}, oiio_format: {oiio_format}, base_stride: {image.getBaseStride()}")
if oiio_format is None:
logger.error(f"Unsupported MaterialX base type for OIIO: {mx_basetype}")
return False
buffer_addr = image.getResourceBuffer()
np_type = self._materialx_type_to_np_type(mx_basetype)
if np_type is None:
logger.error(f"No NumPy dtype mapping for base type: {mx_basetype}")
return False
try:
# Steps:
# - Maps the MaterialX base type to OIIO and NumPy types.
# - Allocates a NumPy array for the pixel data.
# - Copies the raw buffer from the image into the NumPy array.
# - Optionally previews the image for debugging.
# - Creates an OIIO ImageOutput and writes the image to disk.
#
base_stride = image.getBaseStride() # bytes per channel element
total_bytes = width * height * src_channels * base_stride
buf_type = (ctypes.c_char * total_bytes)
buf = buf_type.from_address(buffer_addr)
np_buffer = np.frombuffer(buf, dtype=np_type)
# Validate total elements before reshape to catch mismatches early
expected_elems = width * height * src_channels
if np_buffer.size != expected_elems:
logger.error(f"Buffer element count mismatch: got {np_buffer.size}, expected {expected_elems}.")
return False
np_buffer = np_buffer.reshape((height, width, src_channels))
# Keep only up to RGBA
pixels = np_buffer[..., :channels].copy()
if verticalFlip:
logger.info("Applying vertical flip before saving image.")
pixels = np.flipud(pixels)
logger.info("Previewing image after load into Image and reload for save...")
# Remove "saved_" prefix if present
search_name = filename.replace("saved_", "")
color_space = "Unknown"
for key in self.color_space:
value = self.color_space[key]
path = os.path.basename(key)
if path in search_name:
color_space = value
logger.info(f"colorspace lookup for: {search_name}. list: {color_space}")
self.previewImage("OpenImageIO Output Image", pixels, width, height, channels, color_space)
except Exception as e:
logger.error(f"Error copying buffer to pixels: {e}")
return False
out = oiio.ImageOutput.create(filename)
if not out:
logger.error("Failed to create OIIO ImageOutput.")
return False
try:
spec = oiio.ImageSpec(width, height, channels, oiio_format)
out.open(filename, spec)
out.write_image(pixels)
logger.info(f"Image saved to {filename} (w={width}, h={height}, c={channels}, type={mx_basetype})")
out.close()
return True
except Exception as e:
logger.error(f"Failed to write image: {e}")
try:
out.close()
finally:
pass
return False
def _oiio_to_materialx_type(self, oiio_basetype):
"""Convert OIIO base type to MaterialX Image base type."""
type_mapping = {
oiio.UINT8: mx_render.BaseType.UINT8,
oiio.INT8: mx_render.BaseType.INT8,
oiio.UINT16: mx_render.BaseType.UINT16,
oiio.INT16: mx_render.BaseType.INT16,
oiio.HALF: mx_render.BaseType.HALF,
oiio.FLOAT: mx_render.BaseType.FLOAT
}
return_val = type_mapping.get(oiio_basetype, None)
logger.debug(f"OIIO to MaterialX type mapping: {return_val} from {oiio_basetype}")
return return_val
def _materialx_to_oiio_type(self, mx_basetype):
"""Convert MaterialX Image base type to OIIO type."""
type_mapping = {
mx_render.BaseType.UINT8: oiio.UINT8,
mx_render.BaseType.UINT16: oiio.UINT16,
mx_render.BaseType.INT8: oiio.INT8,
mx_render.BaseType.INT16: oiio.INT16,
mx_render.BaseType.HALF: oiio.HALF,
mx_render.BaseType.FLOAT: oiio.FLOAT,
}
return_val = type_mapping.get(mx_basetype, None)
logger.debug(f"MaterialX type mapping: {mx_basetype} to {return_val}")
return return_val
def _materialx_type_to_np_type(self, mx_basetype):
"""Map MaterialX base type to NumPy dtype with explicit widths."""
type_mapping = {
mx_render.BaseType.UINT8: np.uint8,
mx_render.BaseType.UINT16: np.uint16,
mx_render.BaseType.INT8: np.int8,
mx_render.BaseType.INT16: np.int16,
mx_render.BaseType.HALF: np.float16,
mx_render.BaseType.FLOAT: np.float32,
}
return type_mapping.get(mx_basetype, None)
def test_load_save():
"""
Example usage of the OiioImageLoader class with MaterialX ImageHandler.
"""
parser = argparse.ArgumentParser(description="MaterialX OIIO Image Handler")
parser.add_argument("path", help="Path to the image file")
parser.add_argument("--flip", action="store_true", help="Flip the image vertically")
parser.add_argument("--preview", action="store_true", help="Preview the image before saving")
args = parser.parse_args()
test_image_path = args.path
if not os.path.exists(test_image_path):
logger.error(f"Image file not found: {test_image_path}")
return
# Create MaterialX handler with custom OIIO image loader
loader = OiioImageLoader()
loader.set_preview(args.preview)
handler = mx_render.ImageHandler.create(loader)
logger.info(f"Created image handler with loader ({loader.get_identifier()}): {handler is not None}")
handler.addLoader(loader)
mx_filepath = mx.FilePath(test_image_path)
# Load image using handler API
logger.info('-'*45)
logger.info(f"Loading image from path: {mx_filepath.asString()}")
mx_image = handler.acquireImage(mx_filepath)
if mx_image:
# Q: How to check for failed image load as you
# get back a 1x1 pixel image.
if mx_image.getWidth() == 1 and mx_image.getHeight() == 1:
logger.warning("Failed to load image. Got 1x1 pixel image returned")
return
logger.info(f"MaterialX Image loaded via Image Handler:")
logger.info(f" Dimensions: {mx_image.getWidth()}x{mx_image.getHeight()}")
logger.info(f" Channels: {mx_image.getChannelCount()}")
logger.info(f" Base type: {mx_image.getBaseType()}")
# Save image using handler API (to a new file)
logger.info('-'*45)
# Retrieve cached image
mx_image = handler.acquireImage(mx_filepath)
if mx_image:
out_path = mx.FilePath("saved_" + os.path.basename(test_image_path))
if handler.saveImage(out_path, mx_image, verticalFlip=args.flip):
logger.info(f"MaterialX Image saved to {out_path.asString()}")
else:
logger.error("Failed to save image.")
else:
logger.error("Failed to acquire image for saving.")
else:
logger.error("Failed to load image.")
if __name__ == "__main__":
test_load_save()

View File

@@ -1,514 +0,0 @@
#!/usr/bin/env python
"""
pybind11 documentation insertion tool.
Extracts documentation from Doxygen XML and inserts it into pybind11 bindings
using string matching via signature lookup table.
Logic:
- Builds a multi-key lookup for all functions (MaterialX::, mx::, Class::method, method)
- Handles free functions without <qualifiedname> by assuming MaterialX namespace
- Adds class context tracking to correctly document lambda-based bindings
- Supports .def(...) and .def_static(...); skips .def_readonly_static(...)
"""
import argparse
import re
import json
import xml.etree.ElementTree as ET
from pathlib import Path
from typing import Dict, Optional
# Defaults (can be overridden by CLI)
DOXYGEN_XML_DIR = Path("build/documents/doxygen_xml")
PYBIND_DIR = Path("source/PyMaterialX")
class DocExtractor:
"""Extracts documentation from Doxygen XML files and builds a lookup table."""
def __init__(self, xml_dir: Path):
self.xml_dir = xml_dir
self.class_docs: Dict[str, str] = {}
self.func_docs: Dict[str, Dict] = {}
# Multi-key lookup: all name variants point to the same doc
self.func_lookup: Dict[str, Dict] = {}
def extract(self):
if not self.xml_dir.exists():
raise FileNotFoundError(f"Doxygen XML directory not found: {self.xml_dir}")
for xml_file in self.xml_dir.glob("*.xml"):
self._process_xml_file(xml_file)
self._build_lookup_table()
print(f"Extracted {len(self.class_docs)} classes and {len(self.func_docs)} functions")
print(f"Built lookup table with {len(self.func_lookup)} keys")
def _process_xml_file(self, xml_file: Path):
tree = ET.parse(xml_file)
root = tree.getroot()
# Class / struct documentation
for compound in root.findall(".//compounddef[@kind='class']") + root.findall(".//compounddef[@kind='struct']"):
self._extract_class_doc(compound)
# Function documentation
for member in root.findall(".//memberdef[@kind='function']"):
self._extract_func_doc(member)
def _extract_class_doc(self, compound):
name = self._get_text(compound.find("compoundname"))
brief = self._get_text(compound.find("briefdescription/para"))
detail = self._extract_detail(compound.find("detaileddescription"))
doc = "\n\n".join(filter(None, [brief, detail]))
if doc:
normalized = self._normalize_name(name)
self.class_docs[normalized] = doc
def _extract_func_doc(self, member):
name = self._get_text(member.find("name"))
qualified = self._get_text(member.find("qualifiedname"))
# Many free functions have no <qualifiedname>; use the bare name
# and normalize to MaterialX::name so lookups can resolve.
if not qualified and name:
qualified = name
if not qualified:
return
brief = self._get_text(member.find("briefdescription/para"))
detail = self._extract_detail(member.find("detaileddescription"))
params = self._extract_params(member)
returns = self._get_text(member.find(".//simplesect[@kind='return']"))
normalized = self._normalize_name(qualified)
self.func_docs[normalized] = {
"brief": brief,
"detail": detail,
"params": params,
"returns": returns,
}
def _build_lookup_table(self):
for qualified_name, doc in self.func_docs.items():
for variant in self._generate_name_variants(qualified_name):
if variant not in self.func_lookup:
self.func_lookup[variant] = doc
def _generate_name_variants(self, qualified_name: str) -> list:
variants = [qualified_name]
parts = qualified_name.split("::")
# Class::method
if len(parts) >= 2:
variants.append("::".join(parts[-2:]))
# method
if len(parts) >= 1:
variants.append(parts[-1])
# mx:: variant if MaterialX::
if qualified_name.startswith("MaterialX::"):
mx_variant = qualified_name.replace("MaterialX::", "mx::", 1)
variants.append(mx_variant)
if len(parts) >= 3:
variants.append(f"mx::{parts[-2]}::{parts[-1]}")
return variants
def _normalize_name(self, name: str) -> str:
if not name:
return name
return name if name.startswith("MaterialX::") else f"MaterialX::{name}"
def _get_text(self, elem) -> str:
if elem is None:
return ""
text = "".join(elem.itertext())
return re.sub(r"\s+", " ", text).strip()
def _extract_detail(self, elem, exclude_tags={"parameterlist", "simplesect"}) -> str:
if elem is None:
return ""
parts = []
for para in elem.findall("para"):
if not any(para.find(tag) is not None for tag in exclude_tags):
t = self._get_text(para)
if t:
parts.append(t)
return "\n\n".join(parts)
def _extract_params(self, member) -> Dict[str, str]:
params = {}
for param_item in member.findall(".//parameterlist[@kind='param']/parameteritem"):
name = self._get_text(param_item.find("parameternamelist/parametername"))
desc = self._get_text(param_item.find("parameterdescription"))
if name:
params[name] = desc
return params
class DocInserter:
"""Inserts documentation into pybind11 binding files."""
def __init__(self, extractor: DocExtractor, pybind_dir: Path, force_replace: bool = False):
self.extractor = extractor
self.pybind_dir = pybind_dir
self.force_replace = force_replace
self.class_pattern = re.compile(r"py::class_<")
self.def_pattern = re.compile(r"\.def(?:_static)?\s*\(")
# Match .def and .def_static; skip .def_readonly_static (constants)
self.def_pattern = re.compile(r"\.def(?:_static)?\s*\(")
self.skip_pattern = re.compile(r"\.def_readonly_static\s*\(")
def process_all_files(self):
cpp_files = list(self.pybind_dir.rglob("*.cpp"))
patched = 0
for cpp_file in cpp_files:
if self._process_file(cpp_file):
patched += 1
print(f"\nProcessed {len(cpp_files)} files, patched {patched}")
def _process_file(self, cpp_file: Path) -> bool:
content = cpp_file.read_text(encoding="utf-8")
original = content
content = self._insert_class_docs(content)
content = self._insert_method_docs(content)
if content != original:
cpp_file.write_text(content, encoding="utf-8")
print(f" - {cpp_file.relative_to(self.pybind_dir.parent)}")
return True
else:
print(f" - {cpp_file.relative_to(self.pybind_dir.parent)}")
return False
def _insert_class_docs(self, content: str) -> str:
result = []
pos = 0
for match in self.class_pattern.finditer(content):
result.append(content[pos:match.start()])
start = match.start()
template_end = self._find_template_end(content, start)
if template_end == -1:
result.append(content[start:match.end()])
pos = match.end()
continue
paren_start = content.find('(', template_end)
if paren_start == -1:
result.append(content[start:match.end()])
pos = match.end()
continue
paren_end = self._find_matching_paren(content, paren_start)
if paren_end == -1:
result.append(content[start:match.end()])
pos = match.end()
continue
args_text = content[paren_start + 1:paren_end]
class_name = self._extract_class_name(args_text)
if class_name:
doc = self.extractor.class_docs.get(self.extractor._normalize_name(class_name))
if doc:
args = self._split_args(args_text)
if len(args) >= 3 and not self.force_replace:
result.append(content[start:paren_end + 1])
pos = paren_end + 1
continue
escaped = self._escape_for_cpp(doc)
if len(args) >= 3 and self.force_replace:
new_args = args[:2] + [f'"{escaped}"'] + args[3:]
result.append(content[start:paren_start + 1])
result.append(", ".join(new_args))
result.append(")")
else:
result.append(content[start:paren_end])
result.append(f', "{escaped}")')
pos = paren_end + 1
continue
result.append(content[start:paren_end + 1])
pos = paren_end + 1
result.append(content[pos:])
return "".join(result)
def _insert_method_docs(self, content: str) -> str:
# Build a map of line numbers to class contexts
class_contexts = self._extract_class_contexts(content)
result = []
pos = 0
for match in self.def_pattern.finditer(content):
if self.skip_pattern.match(content, match.start()):
continue
result.append(content[pos:match.start()])
start = match.start()
paren_start = content.find('(', start)
if paren_start == -1:
result.append(content[start:match.end()])
pos = match.end()
continue
paren_end = self._find_matching_paren(content, paren_start)
if paren_end == -1:
result.append(content[start:match.end()])
pos = match.end()
continue
args_text = content[paren_start + 1:paren_end]
args = self._split_args(args_text)
if len(args) < 2:
result.append(content[start:paren_end + 1])
pos = paren_end + 1
continue
has_doc = self._has_docstring(args)
if has_doc and not self.force_replace:
result.append(content[start:paren_end + 1])
pos = paren_end + 1
continue
callable_ref = args[1].strip()
current_line = content[:start].count('\n')
class_context = class_contexts.get(current_line)
doc_entry = self._find_doc_for_callable(callable_ref, class_context)
if doc_entry:
docstring = self._build_docstring(doc_entry)
escaped = self._escape_for_cpp(docstring)
if has_doc and self.force_replace:
doc_idx = self._find_docstring_arg_index(args)
if doc_idx is not None:
new_args = args[:doc_idx] + [f'"{escaped}"'] + args[doc_idx + 1:]
result.append(content[start:paren_start + 1])
result.append(", ".join(new_args))
result.append(")")
pos = paren_end + 1
continue
result.append(content[start:paren_end])
result.append(f', "{escaped}")')
pos = paren_end + 1
continue
result.append(content[start:paren_end + 1])
pos = paren_end + 1
result.append(content[pos:])
return "".join(result)
def _extract_class_contexts(self, content: str) -> Dict[int, str]:
contexts = {}
for match in self.class_pattern.finditer(content):
start = match.start()
template_end = self._find_template_end(content, start)
if template_end == -1:
continue
template_start = content.find('<', start) + 1
template_content = content[template_start:template_end - 1]
class_type = template_content.split(',')[0].strip()
class_name = class_type.split('::')[-1] if '::' in class_type else class_type
start_line = content[:start].count('\n')
end_pos = content.find(';', start)
if end_pos != -1:
end_line = content[:end_pos].count('\n')
for line in range(start_line, end_line + 1):
contexts[line] = class_name
return contexts
def _find_doc_for_callable(self, callable_ref: str, class_context: Optional[str] = None) -> Optional[Dict]:
callable_ref = callable_ref.strip()
# Function pointers like &mx::Class::method or &MaterialX::name
if callable_ref.startswith('&'):
name = callable_ref[1:].strip()
name = re.sub(r'[,\s]+$', '', name)
return self.extractor.func_lookup.get(name)
# Lambdas: look for elem.method( or obj->method(
method_match = re.search(r'[\.\->](\w+)\s*\(', callable_ref)
if method_match:
method_name = method_match.group(1)
if class_context:
for prefix in ("", "mx::", "MaterialX::"):
qualified = f"{prefix}{class_context}::{method_name}" if prefix else f"{class_context}::{method_name}"
doc = self.extractor.func_lookup.get(qualified)
if doc:
return doc
return self.extractor.func_lookup.get(method_name)
return None
def _build_docstring(self, doc_entry: Dict) -> str:
parts = []
if doc_entry.get("brief"):
parts.append(doc_entry["brief"])
if doc_entry.get("detail"):
parts.append(doc_entry["detail"])
params = doc_entry.get("params", {})
if params:
param_lines = ["Args:"]
for name, desc in params.items():
param_lines.append(f" {name}: {desc}" if desc else f" {name}:")
parts.append("\n".join(param_lines))
if doc_entry.get("returns"):
parts.append(f"Returns:\n {doc_entry['returns']}")
return "\n\n".join(parts)
def _escape_for_cpp(self, s: str) -> str:
if not s:
return ""
s = s.replace("\\", "\\\\").replace('"', '\\"')
s = s.replace("\n", "\\n")
return s
def _find_template_end(self, content: str, start: int) -> int:
pos = content.find('<', start)
if pos == -1:
return -1
depth = 1
i = pos + 1
in_string = False
while i < len(content) and depth > 0:
c = content[i]
if c == '"' and content[i - 1] != '\\':
in_string = not in_string
elif not in_string:
if c == '<':
depth += 1
elif c == '>':
depth -= 1
i += 1
return i if depth == 0 else -1
def _find_matching_paren(self, content: str, start: int) -> int:
depth = 0
in_string = False
escape = False
for i in range(start, len(content)):
c = content[i]
if escape:
escape = False
continue
if c == '\\':
escape = True
continue
if c == '"':
in_string = not in_string
continue
if not in_string:
if c == '(':
depth += 1
elif c == ')':
depth -= 1
if depth == 0:
return i
return -1
def _split_args(self, args_text: str) -> list:
args = []
current = []
depth = 0
in_string = False
escape = False
for c in args_text:
if escape:
current.append(c)
escape = False
continue
if c == '\\':
current.append(c)
escape = True
continue
if c == '"':
in_string = not in_string
current.append(c)
continue
if not in_string:
if c in '(<':
depth += 1
elif c in ')>':
depth -= 1
elif c == ',' and depth == 0:
args.append("".join(current).strip())
current = []
continue
current.append(c)
if current:
args.append("".join(current).strip())
return args
def _extract_class_name(self, args_text: str) -> Optional[str]:
args = self._split_args(args_text)
if len(args) >= 2:
return args[1].strip().strip('"')
return None
def _has_docstring(self, args: list) -> bool:
for arg in args[2:]:
a = arg.strip()
if not a.startswith("py::arg") and a.startswith('"'):
return True
return False
def _find_docstring_arg_index(self, args: list) -> Optional[int]:
for i, arg in enumerate(args[2:], start=2):
a = arg.strip()
if not a.startswith("py::arg") and a.startswith('"'):
return i
return None
def main():
parser = argparse.ArgumentParser(description="Extract Doxygen docs and insert into pybind11 bindings (simplified)")
parser.add_argument("-d", "--doxygen_xml_dir", type=Path, default=Path("build/documents/doxygen_xml"), help="Path to Doxygen XML output directory")
parser.add_argument("-p", "--pybind_dir", type=Path, default=Path("source/PyMaterialX"), help="Path to pybind11 bindings directory")
parser.add_argument("-f", "--force", action="store_true", help="Force replace existing docstrings")
parser.add_argument("-j", "--write_json", action="store_true", help="Write extracted docs to JSON files")
args = parser.parse_args()
if not args.doxygen_xml_dir.exists():
print(f"Error: Doxygen XML directory not found: {args.doxygen_xml_dir}")
return 1
if not args.pybind_dir.exists():
print(f"Error: Pybind directory not found: {args.pybind_dir}")
return 1
print("Extracting documentation from Doxygen XML...")
extractor = DocExtractor(args.doxygen_xml_dir)
extractor.extract()
if args.write_json:
print("\nWriting JSON files...")
Path("class_docs.json").write_text(json.dumps(extractor.class_docs, indent=2), encoding="utf-8")
Path("func_docs.json").write_text(json.dumps(extractor.func_docs, indent=2), encoding="utf-8")
print(" - class_docs.json")
print(" - func_docs.json")
print(f"\n{'Replacing' if args.force else 'Inserting'} documentation in pybind11 files...")
inserter = DocInserter(extractor, args.pybind_dir, args.force)
inserter.process_all_files()
print("\nDone!")
return 0
if __name__ == "__main__":
exit(main())

202
MaterialX/python/Scripts/translateshader.py Normal file → Executable file
View File

@@ -1,101 +1,101 @@
#!/usr/bin/env python
'''
Generate a baked translated version of each material in the input document, using the ShaderTranslator class in the MaterialXShaderGen library
and the TextureBaker class in the MaterialXRenderGlsl library.
'''
import sys, os, argparse
from sys import platform
import MaterialX as mx
from MaterialX import PyMaterialXGenShader as mx_gen_shader
from MaterialX import PyMaterialXRender as mx_render
from MaterialX import PyMaterialXRenderGlsl as mx_render_glsl
if platform == "darwin":
from MaterialX import PyMaterialXRenderMsl as mx_render_msl
def main():
parser = argparse.ArgumentParser(description="Generate a translated baked version of each material in the input document.")
parser.add_argument("--width", dest="width", type=int, default=0, help="Specify an optional width for baked textures (defaults to the maximum image height in the source document).")
parser.add_argument("--height", dest="height", type=int, default=0, help="Specify an optional height for baked textures (defaults to the maximum image width in the source document).")
parser.add_argument("--hdr", dest="hdr", action="store_true", help="Bake images with high dynamic range (e.g. in HDR or EXR format).")
parser.add_argument("--path", dest="paths", action='append', nargs='+', help="An additional absolute search path location (e.g. '/projects/MaterialX')")
parser.add_argument("--library", dest="libraries", action='append', nargs='+', help="An additional relative path to a custom data library folder (e.g. 'libraries/custom')")
parser.add_argument('--writeDocumentPerMaterial', dest='writeDocumentPerMaterial', type=mx.stringToBoolean, default=True, help='Specify whether to write baked materials to separate MaterialX documents. Default is True')
if platform == "darwin":
parser.add_argument("--glsl", dest="useGlslBackend", default=False, type=bool, help="Set to True to use GLSL backend (default = Metal).")
parser.add_argument(dest="inputFilename", help="Filename of the input document.")
parser.add_argument(dest="outputFilename", help="Filename of the output document.")
parser.add_argument(dest="destShader", help="Destination shader for translation")
opts = parser.parse_args()
# Load standard and custom data libraries.
stdlib = mx.createDocument()
searchPath = mx.getDefaultDataSearchPath()
searchPath.append(os.path.dirname(opts.inputFilename))
libraryFolders = []
if opts.paths:
for pathList in opts.paths:
for path in pathList:
searchPath.append(path)
if opts.libraries:
for libraryList in opts.libraries:
for library in libraryList:
libraryFolders.append(library)
libraryFolders.extend(mx.getDefaultDataLibraryFolders())
mx.loadLibraries(libraryFolders, searchPath, stdlib)
# Read and validate the source document.
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, opts.inputFilename)
doc.setDataLibrary(stdlib)
except mx.ExceptionFileMissing as err:
print(err)
sys.exit(0)
valid, msg = doc.validate()
if not valid:
print("Validation warnings for input document:")
print(msg)
# Check the document for a UDIM set.
udimSetValue = doc.getGeomPropValue(mx.UDIM_SET_PROPERTY)
udimSet = udimSetValue.getData() if udimSetValue else []
# Compute baking resolution from the source document.
imageHandler = mx_render.ImageHandler.create(mx_render.StbImageLoader.create())
imageHandler.setSearchPath(searchPath)
if udimSet:
resolver = doc.createStringResolver()
resolver.setUdimString(udimSet[0])
imageHandler.setFilenameResolver(resolver)
imageVec = imageHandler.getReferencedImages(doc)
bakeWidth, bakeHeight = mx_render.getMaxDimensions(imageVec)
# Apply baking resolution settings.
if opts.width > 0:
bakeWidth = opts.width
if opts.height > 0:
bakeHeight = opts.height
bakeWidth = max(bakeWidth, 4)
bakeHeight = max(bakeHeight, 4)
# Translate materials between shading models
translator = mx_gen_shader.ShaderTranslator.create()
try:
translator.translateAllMaterials(doc, opts.destShader)
except mx.Exception as err:
print(err)
sys.exit(0)
# Bake translated materials to flat textures.
baseType = mx_render.BaseType.FLOAT if opts.hdr else mx_render.BaseType.UINT8
if platform == "darwin" and not opts.useGlslBackend:
baker = mx_render_msl.TextureBaker.create(bakeWidth, bakeHeight, baseType)
else:
baker = mx_render_glsl.TextureBaker.create(bakeWidth, bakeHeight, baseType)
baker.writeDocumentPerMaterial(opts.writeDocumentPerMaterial)
baker.bakeAllMaterials(doc, searchPath, opts.outputFilename)
if __name__ == '__main__':
main()
#!/usr/bin/env python
'''
Generate a baked translated version of each material in the input document, using the ShaderTranslator class in the MaterialXShaderGen library
and the TextureBaker class in the MaterialXRenderGlsl library.
'''
import sys, os, argparse
from sys import platform
import MaterialX as mx
from MaterialX import PyMaterialXGenShader as mx_gen_shader
from MaterialX import PyMaterialXRender as mx_render
from MaterialX import PyMaterialXRenderGlsl as mx_render_glsl
if platform == "darwin":
from MaterialX import PyMaterialXRenderMsl as mx_render_msl
def main():
parser = argparse.ArgumentParser(description="Generate a translated baked version of each material in the input document.")
parser.add_argument("--width", dest="width", type=int, default=0, help="Specify an optional width for baked textures (defaults to the maximum image height in the source document).")
parser.add_argument("--height", dest="height", type=int, default=0, help="Specify an optional height for baked textures (defaults to the maximum image width in the source document).")
parser.add_argument("--hdr", dest="hdr", action="store_true", help="Bake images with high dynamic range (e.g. in HDR or EXR format).")
parser.add_argument("--path", dest="paths", action='append', nargs='+', help="An additional absolute search path location (e.g. '/projects/MaterialX')")
parser.add_argument("--library", dest="libraries", action='append', nargs='+', help="An additional relative path to a custom data library folder (e.g. 'libraries/custom')")
parser.add_argument('--writeDocumentPerMaterial', dest='writeDocumentPerMaterial', type=mx.stringToBoolean, default=True, help='Specify whether to write baked materials to seprate MaterialX documents. Default is True')
if platform == "darwin":
parser.add_argument("--glsl", dest="useGlslBackend", default=False, type=bool, help="Set to True to use GLSL backend (default = Metal).")
parser.add_argument(dest="inputFilename", help="Filename of the input document.")
parser.add_argument(dest="outputFilename", help="Filename of the output document.")
parser.add_argument(dest="destShader", help="Destination shader for translation")
opts = parser.parse_args()
doc = mx.createDocument()
try:
mx.readFromXmlFile(doc, opts.inputFilename)
except mx.ExceptionFileMissing as err:
print(err)
sys.exit(0)
stdlib = mx.createDocument()
searchPath = mx.getDefaultDataSearchPath()
searchPath.append(os.path.dirname(opts.inputFilename))
libraryFolders = []
if opts.paths:
for pathList in opts.paths:
for path in pathList:
searchPath.append(path)
if opts.libraries:
for libraryList in opts.libraries:
for library in libraryList:
libraryFolders.append(library)
libraryFolders.extend(mx.getDefaultDataLibraryFolders())
mx.loadLibraries(libraryFolders, searchPath, stdlib)
doc.importLibrary(stdlib)
valid, msg = doc.validate()
if not valid:
print("Validation warnings for input document:")
print(msg)
# Check the document for a UDIM set.
udimSetValue = doc.getGeomPropValue(mx.UDIM_SET_PROPERTY)
udimSet = udimSetValue.getData() if udimSetValue else []
# Compute baking resolution from the source document.
imageHandler = mx_render.ImageHandler.create(mx_render.StbImageLoader.create())
imageHandler.setSearchPath(searchPath)
if udimSet:
resolver = doc.createStringResolver()
resolver.setUdimString(udimSet[0])
imageHandler.setFilenameResolver(resolver)
imageVec = imageHandler.getReferencedImages(doc)
bakeWidth, bakeHeight = mx_render.getMaxDimensions(imageVec)
# Apply baking resolution settings.
if opts.width > 0:
bakeWidth = opts.width
if opts.height > 0:
bakeHeight = opts.height
bakeWidth = max(bakeWidth, 4)
bakeHeight = max(bakeHeight, 4)
# Translate materials between shading models
translator = mx_gen_shader.ShaderTranslator.create()
try:
translator.translateAllMaterials(doc, opts.destShader)
except mx.Exception as err:
print(err)
sys.exit(0)
# Bake translated materials to flat textures.
baseType = mx_render.BaseType.FLOAT if opts.hdr else mx_render.BaseType.UINT8
if platform == "darwin" and not opts.useGlslBackend:
baker = mx_render_msl.TextureBaker.create(bakeWidth, bakeHeight, baseType)
else:
baker = mx_render_glsl.TextureBaker.create(bakeWidth, bakeHeight, baseType)
baker.writeDocumentPerMaterial(opts.writeDocumentPerMaterial)
baker.bakeAllMaterials(doc, searchPath, opts.outputFilename)
if __name__ == '__main__':
main()

View File

@@ -1,138 +1,138 @@
#!/usr/bin/env python
'''
Generate the "NodeGraphs.mtlx" example file programmatically.
'''
import MaterialX as mx
def main():
doc = mx.createDocument()
#
# Nodegraph example 1
#
ng1 = doc.addNodeGraph("NG_example1")
img1 = ng1.addNode("image", "img1", "color3")
# Because filenames look like string types, it is necessary to explicitly declare
# this parameter value as type "filename".
img1.setInputValue("file", "layer1.tif", "filename")
img2 = ng1.addNode("image", "img2", "color3")
img2.setInputValue("file", "layer2.tif", "filename")
img3 = ng1.addNode("image", "img3", "float")
img3.setInputValue("file", "mask1.tif", "filename")
n0 = ng1.addNode("mix", "n0", "color3")
# To connect an input to another node, you must first add the input with the expected
# type, and then setConnectedNode() that input to the desired Node object.
infg = n0.addInput("fg", "color3")
infg.setConnectedNode(img1)
inbg = n0.addInput("bg", "color3")
inbg.setConnectedNode(img2)
inmx = n0.addInput("mix", "float")
inmx.setConnectedNode(img3)
n1 = ng1.addNode("multiply", "n1", "color3")
inp1 = n1.addInput("in1", "color3")
inp1.setConnectedNode(n0)
inp2 = n1.setInputValue("in2", 0.22)
nout = ng1.addOutput("diffuse", "color3")
nout.setConnectedNode(n1)
#
# Nodegraph example 3
#
ng3 = doc.addNodeGraph("NG_example3")
img1 = ng3.addNode("image", "img1", "color3")
img1.setInputValue("file", "<diff_albedo>", "filename")
img2 = ng3.addNode("image", "img2", "color3")
img2.setInputValue("file", "<dirt_albedo>", "filename")
img3 = ng3.addNode("image", "img3", "float")
img3.setInputValue("file", "<areamask>", "filename")
img4 = ng3.addNode("image", "img4", "float")
img4.setInputValue("file", "<noisemask>", "filename")
n5 = ng3.addNode("constant", "n5", "color3")
# For colorN, vectorN or matrix types, use the appropriate mx Type constructor.
n5.setInputValue("value", mx.Color3(0.8,1.0,1.3))
n6 = ng3.addNode("multiply", "n6", "color3")
inp1 = n6.addInput("in1", "color3")
inp1.setConnectedNode(n5)
inp2 = n6.addInput("in2", "color3")
inp2.setConnectedNode(img1)
n7 = ng3.addNode("contrast", "n7", "color3")
inp = n7.addInput("in", "color3")
inp.setConnectedNode(img2)
n7.setInputValue("amount", 0.2)
n7.setInputValue("pivot", 0.5)
n8 = ng3.addNode("mix", "n8", "color3")
infg = n8.addInput("fg", "color3")
infg.setConnectedNode(n7)
inbg = n8.addInput("bg", "color3")
inbg.setConnectedNode(n6)
inmx = n8.addInput("mix", "float")
inmx.setConnectedNode(img3)
t1 = ng3.addNode("texcoord", "t1", "vector2")
m1 = ng3.addNode("multiply", "m1", "vector2")
inp1 = m1.addInput("in1", "vector2")
inp1.setConnectedNode(t1)
m1.setInputValue("in2", 0.003)
# If limited floating-point precision results in output value strings like "0.00299999",
# you could instead write this as a ValueString (must add the input to the node first):
# inp2 = m1.addInput("in2", "float")
# inp2.setValueString("0.003")
n9 = ng3.addNode("noise2d", "n9", "color3")
intx = n9.addInput("texcoord", "vector2")
intx.setConnectedNode(m1)
n9.setInputValue("amplitude", mx.Vector3(0.05,0.04,0.06))
n10 = ng3.addNode("inside", "n10", "color3")
inmask = n10.addInput("mask", "float")
inmask.setConnectedNode(img4)
inp = n10.addInput("in", "color3")
inp.setConnectedNode(n9)
n11 = ng3.addNode("add", "n11", "color3")
inp1 = n11.addInput("in1", "color3")
inp1.setConnectedNode(n10)
inp2 = n11.addInput("in2", "color3")
inp2.setConnectedNode(n8)
nout1 = ng3.addOutput("albedo", "color3")
nout1.setConnectedNode(n11)
nout2 = ng3.addOutput("areamask", "float")
nout2.setConnectedNode(img3)
# It is not necessary to validate a document before writing but it's nice
# to know for sure. And you can validate any element (and its children)
# independently, not just the whole document.
rc = ng1.validate()
if (len(rc) >= 1 and rc[0]):
print("Nodegraph %s is valid." % ng1.getName())
else:
print("Nodegraph %s is NOT valid: %s" % (ng1.getName(), str(rc[1])))
rc = ng3.validate()
if (len(rc) >= 1 and rc[0]):
print("Nodegraph %s is valid." % ng3.getName())
else:
print("Nodegraph %s is NOT valid: %s" % (ng3.getName(), str(rc[1])))
outfile = "myNodeGraphs.mtlx"
mx.writeToXmlFile(doc, outfile)
print("Wrote nodegraphs to %s" % outfile)
if __name__ == '__main__':
main()
#!/usr/bin/env python
'''
Generate the "NodeGraphs.mtlx" example file programmatically.
'''
import MaterialX as mx
def main():
doc = mx.createDocument()
#
# Nodegraph example 1
#
ng1 = doc.addNodeGraph("NG_example1")
img1 = ng1.addNode("image", "img1", "color3")
# Because filenames look like string types, it is necessary to explicitly declare
# this parameter value as type "filename".
img1.setInputValue("file", "layer1.tif", "filename")
img2 = ng1.addNode("image", "img2", "color3")
img2.setInputValue("file", "layer2.tif", "filename")
img3 = ng1.addNode("image", "img3", "float")
img3.setInputValue("file", "mask1.tif", "filename")
n0 = ng1.addNode("mix", "n0", "color3")
# To connect an input to another node, you must first add the input with the expected
# type, and then setConnectedNode() that input to the desired Node object.
infg = n0.addInput("fg", "color3")
infg.setConnectedNode(img1)
inbg = n0.addInput("bg", "color3")
inbg.setConnectedNode(img2)
inmx = n0.addInput("mix", "float")
inmx.setConnectedNode(img3)
n1 = ng1.addNode("multiply", "n1", "color3")
inp1 = n1.addInput("in1", "color3")
inp1.setConnectedNode(n0)
inp2 = n1.setInputValue("in2", 0.22)
nout = ng1.addOutput("diffuse", "color3")
nout.setConnectedNode(n1)
#
# Nodegraph example 3
#
ng3 = doc.addNodeGraph("NG_example3")
img1 = ng3.addNode("image", "img1", "color3")
img1.setInputValue("file", "<diff_albedo>", "filename")
img2 = ng3.addNode("image", "img2", "color3")
img2.setInputValue("file", "<dirt_albedo>", "filename")
img3 = ng3.addNode("image", "img3", "float")
img3.setInputValue("file", "<areamask>", "filename")
img4 = ng3.addNode("image", "img4", "float")
img4.setInputValue("file", "<noisemask>", "filename")
n5 = ng3.addNode("constant", "n5", "color3")
# For colorN, vectorN or matrix types, use the appropriate mx Type constructor.
n5.setInputValue("value", mx.Color3(0.8,1.0,1.3))
n6 = ng3.addNode("multiply", "n6", "color3")
inp1 = n6.addInput("in1", "color3")
inp1.setConnectedNode(n5)
inp2 = n6.addInput("in2", "color3")
inp2.setConnectedNode(img1)
n7 = ng3.addNode("contrast", "n7", "color3")
inp = n7.addInput("in", "color3")
inp.setConnectedNode(img2)
n7.setInputValue("amount", 0.2)
n7.setInputValue("pivot", 0.5)
n8 = ng3.addNode("mix", "n8", "color3")
infg = n8.addInput("fg", "color3")
infg.setConnectedNode(n7)
inbg = n8.addInput("bg", "color3")
inbg.setConnectedNode(n6)
inmx = n8.addInput("mix", "float")
inmx.setConnectedNode(img3)
t1 = ng3.addNode("texcoord", "t1", "vector2")
m1 = ng3.addNode("multiply", "m1", "vector2")
inp1 = m1.addInput("in1", "vector2")
inp1.setConnectedNode(t1)
m1.setInputValue("in2", 0.003)
# If limited floating-point precision results in output value strings like "0.00299999",
# you could instead write this as a ValueString (must add the input to the node first):
# inp2 = m1.addInput("in2", "float")
# inp2.setValueString("0.003")
n9 = ng3.addNode("noise2d", "n9", "color3")
intx = n9.addInput("texcoord", "vector2")
intx.setConnectedNode(m1)
n9.setInputValue("amplitude", mx.Vector3(0.05,0.04,0.06))
n10 = ng3.addNode("inside", "n10", "color3")
inmask = n10.addInput("mask", "float")
inmask.setConnectedNode(img4)
inp = n10.addInput("in", "color3")
inp.setConnectedNode(n9)
n11 = ng3.addNode("add", "n11", "color3")
inp1 = n11.addInput("in1", "color3")
inp1.setConnectedNode(n10)
inp2 = n11.addInput("in2", "color3")
inp2.setConnectedNode(n8)
nout1 = ng3.addOutput("albedo", "color3")
nout1.setConnectedNode(n11)
nout2 = ng3.addOutput("areamask", "float")
nout2.setConnectedNode(img3)
# It is not necessary to validate a document before writing but it's nice
# to know for sure. And you can validate any element (and its children)
# independently, not just the whole document.
rc = ng1.validate()
if (len(rc) >= 1 and rc[0]):
print("Nodegraph %s is valid." % ng1.getName())
else:
print("Nodegraph %s is NOT valid: %s" % (ng1.getName(), str(rc[1])))
rc = ng3.validate()
if (len(rc) >= 1 and rc[0]):
print("Nodegraph %s is valid." % ng3.getName())
else:
print("Nodegraph %s is NOT valid: %s" % (ng3.getName(), str(rc[1])))
outfile = "myNodeGraphs.mtlx"
mx.writeToXmlFile(doc, outfile)
print("Wrote nodegraphs to %s" % outfile)
if __name__ == '__main__':
main()