1
Fork 0

Compare commits

..

8 commits

Author SHA1 Message Date
f032838230
bug(import_bsi): Fix incorrect BSIZ parsing 2021-07-26 20:12:34 +02:00
64a15a0274
feat: Add SJSON library 2021-07-26 20:12:10 +02:00
2e7282956f
WIP: Import bones
Signed-off-by: Lucas Schwiderski <lucas@lschwiderski.de>
2021-04-08 23:23:55 +02:00
302e0e4863
feat(import_bsi): Implement TEXCOORD import
This imports UV maps.

Signed-off-by: Lucas Schwiderski <lucas@lschwiderski.de>
2021-04-07 19:12:37 +02:00
0c7613f735
feat: Implement object transformation
Signed-off-by: Lucas Schwiderski <lucas@lschwiderski.de>
2021-04-07 14:32:21 +02:00
888e8e48fa
feat: Use proper Vector data type
Signed-off-by: Lucas Schwiderski <lucas@lschwiderski.de>
2021-04-07 11:58:58 +02:00
1c6d160a1a
doc(import_bsi): Improve comments
Signed-off-by: Lucas Schwiderski <lucas@lschwiderski.de>
2021-04-07 01:41:07 +02:00
80a8e239d3
WIP: Add initial BSI import
So far, it's only capable of importing basic mesh information.

Signed-off-by: Lucas Schwiderski <lucas@lschwiderski.de>
2021-04-07 00:34:34 +02:00
7 changed files with 1240 additions and 80 deletions

View file

@ -4,7 +4,7 @@
## Install ## Install
Copy the `addons/bitsquid` directory to `$BLENDER/scripts/addons/bitsquid`, where `$BLENDER` is one of [Blender's configuration directories](https://docs.blender.org/manual/en/latest/advanced/blender_directory_layout.html#blender-directory-layout). Copy the `addons/bitsquid` directory as `$BLENDER/scripts/addons/bitsquid`, where `$BLENDER` is one of [Blender's configuration directories](https://docs.blender.org/manual/en/latest/advanced/blender_directory_layout.html#blender-directory-layout).
It should now show up in Blender's preferences as `Import-Export: Bitsquid Engine`. It should now show up in Blender's preferences as `Import-Export: Bitsquid Engine`.
### Development ### Development

View file

@ -13,7 +13,7 @@
# #
# You should have received a copy of the GNU General Public License # You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>. # along with this program. If not, see <https://www.gnu.org/licenses/>.
import os
bl_info = { bl_info = {
"name": "Bitsquid Engine", "name": "Bitsquid Engine",
@ -28,14 +28,15 @@ bl_info = {
# Reload sub modules if they are already loaded # Reload sub modules if they are already loaded
if "bpy" in locals(): if "bpy" in locals():
import importlib import importlib
if "unit_export" in locals(): if "export_unit" in locals():
importlib.reload(unit_export) importlib.reload(export_unit)
if "material_export" in locals(): if "export_material" in locals():
importlib.reload(material_export) importlib.reload(export_material)
if "import_bsi" in locals():
importlib.reload(import_bsi)
import bpy import bpy
from bpy.app.handlers import persistent
from bpy.types import ( from bpy.types import (
Panel, Panel,
Operator, Operator,
@ -48,13 +49,14 @@ from bpy.props import (
PointerProperty, PointerProperty,
) )
from bpy_extras.io_utils import ( from bpy_extras.io_utils import (
ExportHelper, ImportHelper,
axis_conversion, axis_conversion,
orientation_helper, orientation_helper,
path_reference_mode, path_reference_mode,
) )
from bitsquid.unit import export as unit_export from bitsquid.unit import export as export_unit
from bitsquid.material import export as material_export from bitsquid.material import export as export_material
from bitsquid import import_bsi
class BitsquidSettings(PropertyGroup): class BitsquidSettings(PropertyGroup):
@ -111,9 +113,9 @@ class OBJECT_OT_bitsquid_export(Operator):
object = context.active_object object = context.active_object
if object.bitsquid.export_materials: if object.bitsquid.export_materials:
for material_slot in object.material_slots.values(): for material_slot in object.material_slots.values():
material_export.save(self, context, material_slot.material) export_material.save(self, context, material_slot.material)
return unit_export.save(self, context, object) return export_unit.save(self, context, object)
class OBJECT_PT_bitsquid(Panel): class OBJECT_PT_bitsquid(Panel):
@ -159,8 +161,8 @@ class MATERIAL_OT_bitsquid_export(Operator):
return bpy.data.is_saved and context.active_object is not None return bpy.data.is_saved and context.active_object is not None
def execute(self, context): def execute(self, context):
material = context.material material = context.active_material
return material_export.save(self, context, material) return export_unit.save(self, context, material)
class MATERIAL_PT_bitsquid(Panel): class MATERIAL_PT_bitsquid(Panel):
@ -184,8 +186,42 @@ class MATERIAL_PT_bitsquid(Panel):
layout.operator("object.bitsquid_export_material", text="Export .material") layout.operator("object.bitsquid_export_material", text="Export .material")
@orientation_helper(axis_forward='-Z', axis_up='Y')
class ImportBSI(bpy.types.Operator, ImportHelper):
"""Load a Bitsquid .bsi File"""
bl_idname = "import_scene.bsi"
bl_label = "Import BSI"
bl_options = {'PRESET', 'UNDO'}
filename_ext = ".bsi"
filter_glob: StringProperty(
default="*.bsi;*.bsiz",
options={'HIDDEN'},
)
def execute(self, context):
keywords = self.as_keywords(ignore=("axis_forward",
"axis_up",
"filter_glob",
))
if bpy.data.is_saved and context.preferences.filepaths.use_relative_paths:
import os
keywords["relpath"] = os.path.dirname(bpy.data.filepath)
return import_bsi.load(self, context, **keywords)
def draw(self, context):
pass
def menu_func_import(self, context):
self.layout.operator(ImportBSI.bl_idname, text="Bitsquid Object (.bsi)")
# Register # Register
classes = [ classes = [
ImportBSI,
BitsquidSettings, BitsquidSettings,
SCENE_PT_bitsquid, SCENE_PT_bitsquid,
BitsquidObjectSettings, BitsquidObjectSettings,
@ -196,36 +232,18 @@ classes = [
MATERIAL_OT_bitsquid_export, MATERIAL_OT_bitsquid_export,
] ]
def import_template():
cwd = os.path.dirname(os.path.realpath(__file__))
resources_dir = "resources"
blendfile = "BitsquidPBR.blend"
section = "Material"
object = "Stingray Standard"
filepath = os.path.join(cwd, resources_dir, blendfile, section, object)
directory = os.path.join(cwd, resources_dir, blendfile, section)
filename = object
bpy.ops.wm.append(
filepath=filepath,
filename=filename,
directory=directory)
@persistent
def load_handler(dummy):
import_template()
def register(): def register():
from bpy.utils import register_class from bpy.utils import register_class
for cls in classes: for cls in classes:
register_class(cls) register_class(cls)
bpy.types.TOPBAR_MT_file_import.append(menu_func_import)
bpy.types.Scene.bitsquid = PointerProperty(type=BitsquidSettings) bpy.types.Scene.bitsquid = PointerProperty(type=BitsquidSettings)
bpy.types.Object.bitsquid = PointerProperty(type=BitsquidObjectSettings) bpy.types.Object.bitsquid = PointerProperty(type=BitsquidObjectSettings)
bpy.types.Material.bitsquid = PointerProperty(type=BitsquidMaterialSettings) bpy.types.Material.bitsquid = PointerProperty(type=BitsquidMaterialSettings)
bpy.app.handlers.load_post.append(load_handler)
def unregister(): def unregister():
del bpy.types.Scene.bitsquid del bpy.types.Scene.bitsquid
@ -234,7 +252,6 @@ def unregister():
for cls in reversed(classes): for cls in reversed(classes):
unregister_class(cls) unregister_class(cls)
bpy.app.handlers.load_post.remove(load_handler)
if __name__ == "__main__": if __name__ == "__main__":
register() register()

View file

@ -0,0 +1,598 @@
# Bitsquid Blender Tools
# Copyright (C) 2021 Lucas Schwiderski
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import sys
import zlib
import json
import bpy
import math
import traceback
from mathutils import Vector, Matrix
from bpy_extras.io_utils import unpack_list
from bitsquid import sjson
def parse_sjson(file_path, skip_editor_data=True):
"""
Translate Bitsquid .bsi SJSON data to plain JSON,
then parse into a python dictionary.
Taken from `bsi_import` in the Vermintide 2 SDK, but slightly
modified to fix some issues and improve readability.
"""
with open(file_path, 'rb') as f:
data = f.read()
if data[:4] == b'bsiz':
data = zlib.decompress(data[8:])
data = data.decode("utf-8")
file_lines = ['{\n']
inside_list = False
check_editor_data = 1 if skip_editor_data else 0
for line in data.splitlines():
if check_editor_data:
if check_editor_data > 1:
if line[0] == '}':
check_editor_data = 0
continue
elif line[:18] == 'editor_metadata = ':
check_editor_data = 2
continue
if not line.strip():
continue
# Strip trailing whitespace,
# including line break and carriage return
line = line.rstrip()
if line[-1] in ['\n', '\r']:
line = line[:-1]
if ' = ' in line:
line_parts = line.split(' = ')
if '[ ' in line_parts[-1] and ' ]' in line_parts[-1]:
new_end = ''
end_parts = line_parts[-1].split(' ]')
short_len = len(end_parts) - 1
for i, end_part in enumerate(end_parts):
if not end_part:
if i < short_len:
new_end += ' ]'
continue
if '[ ' not in end_part:
new_end += end_part + ' ]'
continue
sub_part_pre = end_part.rpartition('[ ')
new_end = ''.join((
new_end,
sub_part_pre[0],
'[ ',
', '.join(sub_part_pre[-1].split(' ')),
' ]'
))
if i < short_len and end_parts[i + 1] and end_parts[i + 1][:2] == ' [':
new_end += ','
line_parts[-1] = new_end
# Handle indentation
if '\t' in line_parts[0]:
line_start = line_parts[0].rpartition('\t')
else:
tab_len = len(line_parts[0]) - len(line_parts[0].lstrip())
if tab_len > 4:
line_start = [line_parts[0][:tab_len - 4], '' , line_parts[0].lstrip()]
elif tab_len > 0:
line_start = ['', '', line_parts[0].lstrip()]
else:
line_start = line_parts[0].rpartition('\t')
if line_start[-1][0] == '"':
new_line = ''.join((
''.join(line_start[:-1]),
line_start[-1],
': ',
''.join(line_parts[1:])
))
else:
new_line = ''.join((
''.join(line_start[:-1]),
'"',
line_start[-1],
'": ',
''.join(line_parts[1:])
))
if not line_parts[-1][-1] == ',':
new_line += ','
elif ']' in line or '}' in line:
new_line = line + ','
if '} {' in new_line:
new_line = new_line.replace('} {', '}, {')
if file_lines[-1][-2] == ',':
file_lines[-1] = file_lines[-1][:-2] + '\n'
elif inside_list and ']' not in line:
tab_len = len(line) - len(line.lstrip())
new_line = line[:tab_len] + ', '.join([x for x in line[tab_len:].split(' ') if x])
else:
new_line = line
if new_line[-2:] in ['[,', '{,'] or new_line[-3:] in ['[ ,', '{ ,']:
new_line = new_line[:-1]
if inside_list:
if ']' in line:
inside_list = False
elif not new_line[-1] in ['[', '{', ',']:
new_line += ','
elif '[' in line:
inside_list = True
file_lines.append(''.join(('\t', new_line.lstrip(), '\n')))
# To save memory...
if len(file_lines) > 100000:
file_lines = [''.join(file_lines)]
if file_lines[-1][-2] == ',':
file_lines[-1] = file_lines[-1][:-2] + '\n'
file_lines.append('}\n')
return json.loads(''.join(file_lines))
def find(arr, f):
"""
Find a value in a list by executing `f`
on each item until it returns `true`.
"""
for key, val in arr.items():
if f(val, key):
return val, key
def create_mesh(self, context, name, node_data, geo_data):
"""
Create a Blender mesh object from a BSI node definition
and additional data from the file.
"""
# A list of 3-dimensional vectors. Each vector encodes a vertex position.
vertices = []
# A list of vectors, where each vector contains three indices into
# `vertices`. Those three indices define the vertices that make up
# the face.
faces = []
uv_name = "UVMap"
uvs = []
for i, index_stream in enumerate(geo_data["indices"]["streams"]):
data_stream = geo_data["streams"][i]
stride = data_stream["stride"] / 4
for channel in data_stream["channels"]:
stream_data = data_stream["data"]
if channel["name"] == 'POSITION':
# NOTE: Do we need to handle `stride != 3`?
# Since the value seems to be fixed per stream, a higher
# stride would only be possible for objects that can be built
# entirely from quads, which is very uncommon.
if stride != 3:
raise NotImplementedError("stride != 3 cannot be handled")
# Get vertex positions.
# Iterate over data in sets of three values. Each set
# represents `x`, `y` and `z`.
for j in range(0, len(stream_data), 3):
vertices.append(Vector((
stream_data[j],
stream_data[j + 1],
stream_data[j + 2],
)))
# Get face definitions. Values are vertex indices.
# Iteration works just like vertices above.
for j in range(0, len(index_stream), 3):
faces.append(Vector((
index_stream[j],
index_stream[j + 1],
index_stream[j + 2],
)))
print(vertices)
print(faces)
elif channel["name"] == 'NORMAL':
# Blender is able to create normals from the face definition
# (i.e. the order in which the faces vertices were defined)
# and that seems to be good enough
# self.report({'INFO'}, "Ignoring custom normals")
continue
elif channel["name"] in {'ALPHA', 'COLOR'}:
# Not sure if this can be intended as a primitive material,
# but I'll assume it's not. And while Blender does feature
# the concept of viewport-only vertex colors, that's rather
# low priority to implement.
# self.report({'INFO'}, "Ignoring vertex color data")
continue
elif channel["name"] == 'TEXCOORD':
uv_name = "UVMap{}".format(channel["index"] + 1)
uv_data = data_stream["data"]
uv_defs = [uv_data[j:j+2] for j in range(0, len(uv_data), 2)]
uvs = [uv_defs[index_stream[j]] for j in range(0, len(index_stream))]
else:
# TODO: Implement other channel types
# self.report(
# {'WARNING'},
# "Unknown channel type: {}".format(channel["name"])
# )
continue
mesh = bpy.data.meshes.new(name)
mesh.from_pydata(vertices, [], faces)
if len(uvs) > 0:
uv_layer = mesh.uv_layers.new(name=uv_name)
uv_layer.data.foreach_set("uv", unpack_list(uvs))
return mesh
def matrix_from_list(list):
"""
Builds a square Matrix from a list of values in column order.
When cross-referencing the `bsi_importer` and Maya's Python docs,
it appears as though matrices stored in `.bsi` should be row ordered,
but they are, in fact, column ordered.
"""
stride = int(math.sqrt(len(list)))
rows = []
for i in range(stride):
row = (list[i], list[i+stride], list[i+(stride*2)], list[i+(stride*3)])
rows.append(row)
return Matrix(rows)
def import_joint(
self,
context,
name,
node_data,
armature,
parent_bone,
parent_rotation,
global_data
):
"""
Imports a joint and all of its children.
In BSI (smilar to Maya) skeletons are defined as a series of joints,
with bones added virtually in between, when needed.
In Blender, skeletons are defined with bones, where each bone has a `head`
and `tail`. So we need to convert the list of joints to a series of bones
instead.
The challenge here is the fact that joints have rotation data, whereas
`head` and `tail` for bones don't. This changes how the position data has
to be treated, as it is relative to the respective previous joint.
Compared to the code that imports mesh objects, we can't just apply
the matrix to the bone and have it position itself relative to its parent.
Instead, we have to manually keep track of the parent's rotation.
"""
if "local" not in node_data:
raise RuntimeError("No position value for joint '{}'".format(name))
mat = matrix_from_list(node_data["local"])
translation, rotation, _ = mat.decompose()
if name.endswith("_scale"):
# print("Skipping joint '{}'".format(name))
bone = parent_bone
else:
bone = armature.edit_bones.new(name)
if parent_bone:
# The values for `bone.head` and `bone.tail` are relative to their
# parent, so we need to apply that first.
bone.parent = parent_bone
bone.use_connect = True
# bone.head = parent_bone.tail
else:
bone.head = Vector((0, 0, 0))
if parent_rotation:
print("[import_joint] {} Parent @ Local:".format(name), parent_rotation, parent_rotation @ translation)
bone.tail = bone.head + (parent_rotation @ translation)
else:
bone.tail = bone.head + translation
print("[import_joint] {} Local:".format(name), translation)
print("[import_joint] {} Bone:".format(name), bone.head, bone.tail)
if "children" in node_data:
for child_name, child_data in node_data["children"].items():
if child_data["parent"] != name:
raise RuntimeError(
"Assigned parent '{}' doesn't match actual parent node '{}'"
.format(child_data["parent"], name)
)
if child_name.startswith("j_"):
child_bone = import_joint(
self,
context,
child_name,
child_data,
armature,
bone,
rotation,
global_data
)
child_bone.parent = bone
else:
# DEBUG: ignoring these for now
continue
# Not entirely sure, but I think these are considered
# "controller nodes" in Maya. Would make sense based on
# name.
if "children" in child_data:
raise RuntimeError(
"Controller node '{}' has children."
.format(child_name)
)
if "geometries" not in child_data:
raise RuntimeError(
"Controller node '{}' has no geometry."
.format(child_name)
)
child_obj = import_node(
self,
context,
child_name,
child_data,
global_data,
)
# TODO: How to parent to a bone?
child_obj.parent = bone
return bone
def import_armature(self, context, name, node_data, global_data):
armature = context.blend_data.armatures.new(name)
# An armature itself cannot exist in the view layer.
# We need to create an object from it
obj = bpy.data.objects.new(name, armature)
context.collection.objects.link(obj)
# Armature needs to be in EDIT mode to allow adding bones
context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='EDIT', toggle=False)
if "local" not in node_data:
raise RuntimeError("No position value for joint '{}'".format(name))
mat = matrix_from_list(node_data["local"])
obj.matrix_local = mat
# DEBUG
mat_loc, mat_rot, mat_scale = mat.decompose()
print("[import_joint] {}".format(name), mat_loc, mat_rot)
if "children" in node_data:
for child_name, child_data in node_data["children"].items():
if child_data["parent"] != name:
raise RuntimeError(
"Assigned parent '{}' doesn't match actual parent node '{}'"
.format(child_data["parent"], name)
)
if child_name.startswith("j_"):
import_joint(
self,
context,
child_name,
child_data,
armature,
None,
None,
global_data
)
else:
# DEBUG: Ignoring these for now
continue
# Not entirely sure, but I think these are considered
# "controller nodes" in Maya. Would make sense based on
# name.
if "children" in child_data:
raise RuntimeError(
"Controller node '{}' has children."
.format(child_name)
)
child_obj = import_node(
self,
context,
child_name,
child_data,
global_data,
)
child_obj.parent = obj
# Disable EDIT mode
context.view_layer.objects.active = obj
bpy.ops.object.mode_set(mode='OBJECT')
return obj
def import_geometry(self, context, name, node_data, global_data):
if len(node_data["geometries"]) > 1:
self.report(
{'WARNING'},
"More than one geometry for node '{}'.".format(name)
)
geometry_name = node_data["geometries"][0]
geo_data = global_data["geometries"][geometry_name]
mesh = create_mesh(self, context, name, node_data, geo_data)
obj = bpy.data.objects.new(mesh.name, mesh)
obj.matrix_world = Matrix()
# Check of a local offset
if "local" in node_data:
mat = matrix_from_list(node_data["local"])
obj.matrix_local = mat
# Recurse into child nodes and parent them to the current object
if "children" in node_data:
for child_name, child_data in node_data["children"].items():
if child_data["parent"] != name:
raise RuntimeError(
"Assigned parent '{}' doesn't match actual parent node '{}'"
.format(child_data["parent"], name)
)
child_obj = import_node(
self,
context,
child_name,
child_data,
global_data,
)
if not isinstance(child_obj, bpy.types.Object):
raise RuntimeError(
"Node of type '{}' cannot be child of a geometry node."
.format(type(child_obj))
)
child_obj.parent = obj
# Make sure all objects are linked to the current collection.
# Otherwise they won't show up in the outliner.
collection = context.collection
collection.objects.link(obj)
return obj
def import_node(self, context, name, node_data, global_data):
"""Import a BSI node. Recurses into child nodes."""
has_geometry = "geometries" in node_data
is_joint = name in global_data["joint_index"]
if is_joint:
if has_geometry:
raise RuntimeError("Found geometry data in joint '{}'".format(name))
if not name.startswith("j_"):
raise RuntimeError("Invalid name for joint: '{}".format(name))
self.report({'INFO'}, "Creating Armature '{}'".format(name))
return import_armature(self, context, name, node_data, global_data)
if has_geometry:
return import_geometry(self, context, name, node_data, global_data)
else:
# Only the root node should be left now.
# It needs slightly different treatment compared to a regular geometry
# node
if name != "root_point":
self.report({'WARNING'}, "Unknown kind of node: '{}'. Falling back to Empty.".format(name))
obj = bpy.data.objects.new(name, None)
# Decrease axis size to prevent overcrowding in the viewport
obj.empty_display_size = 0.1
if "children" in node_data:
for child_name, child_data in node_data["children"].items():
if child_data["parent"] != name:
raise RuntimeError(
"Assigned parent '{}' doesn't match actual parent node '{}'"
.format(child_data["parent"], name)
)
child_obj = import_node(
self,
context,
child_name,
child_data,
global_data,
)
child_obj.parent = obj
# Make sure all objects are linked to the current collection.
# Otherwise they won't show up in the outliner.
collection = context.collection
collection.objects.link(obj)
return obj
def load(self, context, filepath, *, relpath=None):
try:
with open(filepath, 'rb') as f:
data = f.read()
if data[:4] == b'bsiz':
data = zlib.decompress(data[8:])
data = data.decode("utf-8")
global_data = sjson.loads(data)
except Exception:
self.report({'ERROR'}, "Failed to parse SJSON: {}".format(filepath))
traceback.print_exc(file=sys.stderr)
return {'CANCELLED'}
# Nothing to do if there are no nodes
if "nodes" not in global_data:
self.report({'WARNING'}, "No nodes to import in {}".format(filepath))
return {'CANCELLED'}
# Build joint index
joint_index = []
if "skins" in global_data:
for v in global_data["skins"].values():
for joint in v["joints"]:
name = joint["name"]
if name in global_data["geometries"]:
self.report({'ERROR'}, "Found joint with mesh data.")
return {'CANCELLED'}
if name not in joint_index:
joint_index.append(joint['name'])
global_data["joint_index"] = joint_index
for name, node_data in global_data["nodes"].items():
import_node(self, context, name, node_data, global_data)
return {'FINISHED'}

View file

@ -29,54 +29,54 @@ variables = {
base_color = { base_color = {
type = "vector3" type = "vector3"
value = [ value = [
{{ base_color[0] }} 1
{{ base_color[1] }} 0.333333333333333
{{ base_color[2] }} 0
] ]
} }
emissive = { emissive = {
type = "vector3" type = "vector3"
value = [ value = [
{{ emissive[0] }} 0
{{ emissive[1] }} 0
{{ emissive[2] }} 0
] ]
} }
emissive_intensity = { emissive_intensity = {
type = "scalar" type = "scalar"
value = {{ emissive_intensity }} value = 1
} }
metallic = { metallic = {
type = "scalar" type = "scalar"
value = {{ metallic }} value = 0
} }
roughness = { roughness = {
type = "scalar" type = "scalar"
value = {{ roughness }} value = 0.91
} }
use_ao_map = { use_ao_map = {
type = "scalar" type = "scalar"
value = {{ use_ao_map }} value = 0
} }
use_color_map = { use_color_map = {
type = "scalar" type = "scalar"
value = {{ use_color_map }} value = 0
} }
use_emissive_map = { use_emissive_map = {
type = "scalar" type = "scalar"
value = {{ use_emissive_map }} value = 0
} }
use_metallic_map = { use_metallic_map = {
type = "scalar" type = "scalar"
value = {{ use_metallic_map }} value = 0
} }
use_normal_map = { use_normal_map = {
type = "scalar" type = "scalar"
value = {{ use_normal_map }} value = 0
} }
use_roughness_map = { use_roughness_map = {
type = "scalar" type = "scalar"
value = {{ use_roughness_map }} value = 0
} }
} }
@ -102,37 +102,7 @@ def save(self, context, material):
namespace = { namespace = {
'material': material, 'material': material,
'base_color': (1, 1, 1),
'roughness': 0.0,
'metallic': 0.0,
'emissive': (0, 0, 0),
'emissive_intensity': 0,
'use_color_map': 0,
'use_roughness_map': 0,
'use_metallic_map': 0,
'use_emissive_map': 0,
'use_ao_map': 0,
'use_normal_map': 0
} }
nodes = material.node_tree.nodes
try:
namespace['base_color'] = nodes["Base Color"].outputs[0].default_value
namespace['roughness'] = nodes["Roughness"].outputs[0].default_value
namespace['metallic'] = nodes["Metallic"].outputs[0].default_value
namespace['emissive'] = nodes["Emissive"].outputs[0].default_value
namespace['emissive_intensity'] = nodes["Emissive Intensity"].outputs[0].default_value
namespace['use_color_map'] = nodes["Use Color Map"].outputs[0].default_value
namespace['use_roughness_map'] = nodes["Use Roughness Map"].outputs[0].default_value
namespace['use_metallic_map'] = nodes["Use Metallic Map"].outputs[0].default_value
namespace['use_emissive_map'] = nodes["Use Emissive Map"].outputs[0].default_value
namespace['use_ao_map'] = nodes["Use AO Map"].outputs[0].default_value
namespace['use_normal_map'] = nodes["Use Normal Map"].outputs[0].default_value
except:
self.report({'WARNING'}, "Couldn't find Stingray Standard nodes")
content = step.Template(template, strip=False).expand(namespace) content = step.Template(template, strip=False).expand(namespace)
with open(filepath, "w", encoding="utf8", newline="\n") as f: with open(filepath, "w", encoding="utf8", newline="\n") as f:

View file

@ -0,0 +1,23 @@
Copyright (c) 2014-2018, Matthäus G. Chajdas
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View file

@ -0,0 +1,552 @@
"""Module to parse SJSON files."""
# coding=utf8
# @author: Matthäus G. Chajdas
# @license: 3-clause BSD
import collections.abc
import collections
import numbers
import string
import io
from enum import Enum
__version__ = '2.1.0'
class MemoryInputStream:
"""Input stream wrapper for reading directly from memory."""
def __init__(self, s):
"""
s -- a bytes object.
"""
self._stream = s
self._current_index = 0
self._length = len(s)
def read(self, count=1):
"""read ``count`` bytes from the stream."""
end_index = self._current_index + count
if end_index > self._length:
_raise_end_of_file_exception(self)
result = self._stream[self._current_index:end_index]
self._current_index = end_index
return result
def peek(self, count=1, allow_end_of_file=False):
"""peek ``count`` bytes from the stream. If ``allow_end_of_file`` is
``True``, no error will be raised if the end of the stream is reached
while trying to peek."""
end_index = self._current_index + count
if end_index > self._length:
if allow_end_of_file:
return None
_raise_end_of_file_exception(self)
return self._stream[self._current_index:end_index]
def skip(self, count=1):
"""skip ``count`` bytes."""
self._current_index += count
def get_location(self):
"""Get the current location in the stream."""
loc = collections.namedtuple('Location', ['line', 'column'])
bytes_read = self._stream[:self._current_index]
line = 1
column = 1
for byte in bytes_read:
# We test the individual bytes here, must use ord
if byte == ord('\n'):
line += 1
column = 1
else:
column += 1
return loc(line, column)
class ByteBufferInputStream:
"""Input stream wrapper for reading directly from an I/O object."""
def __init__(self, stream):
self._stream = stream
self._index = 0
self._line = 1
self._column = 1
def read(self, count=1):
"""read ``count`` bytes from the stream."""
result = self._stream.read(count)
if len(result) < count:
_raise_end_of_file_exception(self)
for char in result:
# We test the individual bytes here, must use ord
if char == ord('\n'):
self._line += 1
self._column = 1
else:
self._column += 1
return result
def peek(self, count=1, allow_end_of_file=False):
"""peek ``count`` bytes from the stream. If ``allow_end_of_file`` is
``True``, no error will be raised if the end of the stream is reached
while trying to peek."""
result = self._stream.peek(count)
if not result and not allow_end_of_file:
_raise_end_of_file_exception(self)
elif not result and allow_end_of_file:
return None
else:
return result[:count]
def skip(self, count=1):
"""skip ``count`` bytes."""
self.read(count)
def get_location(self):
"""Get the current location in the stream."""
loc = collections.namedtuple('Location', ['line', 'column'])
return loc(self._line, self._column)
class ParseException(RuntimeError):
"""Parse exception."""
def __init__(self, msg, location):
super(ParseException, self).__init__(msg)
self._msg = msg
self._location = location
def get_location(self):
"""Get the current location at which the exception occurred."""
return self._location
def __str__(self):
return '{} at line {}, column {}'.format(self._msg,
self._location.line,
self._location.column)
def _raise_end_of_file_exception(stream):
raise ParseException('Unexpected end-of-stream', stream.get_location())
def _consume(stream, what):
_skip_whitespace(stream)
what_len = len(what)
if stream.peek(what_len) != what:
raise ParseException("Expected to read '{}'".format(what),
stream.get_location())
stream.skip(what_len)
def _skip_characters_and_whitespace(stream, num_char_to_skip):
stream.skip(num_char_to_skip)
return _skip_whitespace(stream)
_WHITESPACE_SET = {b' ', b'\t', b'\n', b'\r'}
def _is_whitespace(char):
return char in _WHITESPACE_SET
def _skip_c_style_comment(stream):
comment_start_location = stream.get_location()
# skip the comment start
stream.skip(2)
# we don't support nested comments, so we're not going to
# count the nesting level. Instead, skip ahead until we
# find a closing */
while True:
next_char = stream.peek(1, allow_end_of_file=True)
if next_char == b'*':
comment_end = stream.peek(2, allow_end_of_file=True)
if comment_end == b'*/':
stream.skip(2)
break
else:
stream.skip()
elif next_char is None:
raise ParseException("Could not find closing '*/' for comment",
comment_start_location)
stream.skip()
def _skip_cpp_style_comment(stream):
# skip the comment start
stream.skip(2)
while True:
next_char = stream.peek(allow_end_of_file=True)
if next_char is None or next_char == b'\n':
break
stream.skip()
def _skip_whitespace(stream):
"""skip whitespace. Returns the next character if a new position within the
stream was found; returns None if the end of the stream was hit."""
while True:
next_char = stream.peek(allow_end_of_file=True)
if not _is_whitespace(next_char):
if next_char == b'/':
# this could be a C or C++ style comment
comment_start = stream.peek(2, allow_end_of_file=True)
if comment_start == b'/*':
_skip_c_style_comment(stream)
continue
elif comment_start == b'//':
_skip_cpp_style_comment(stream)
continue
break
stream.skip()
return next_char
_IDENTIFIER_SET = set(string.ascii_letters + string.digits + '_')
def _is_identifier(obj):
return chr(obj[0]) in _IDENTIFIER_SET
def _decode_escaped_character(char):
if char == b'b':
return b'\b'
elif char == b'n':
return b'\n'
elif char == b't':
return b'\t'
elif char == b'\\' or char == b'\"':
return char
else:
# If we get here, it's an invalid escape sequence. We will simply return
# it as-if it was not invalid (i.e. \l for instance will get turned
# into \\l)
return b'\\' + char
class RawQuoteStyle(Enum):
Lua = 1
Python = 2
def _decode_string(stream, allow_identifier=False):
# When we enter here, we either start with " or [, or there is no quoting
# enabled.
_skip_whitespace(stream)
result = bytearray()
is_quoted = stream.peek() == b'\"' or stream.peek() == b'['
if not allow_identifier and not is_quoted:
raise ParseException('Quoted string expected', stream.get_location())
raw_quotes = None
# Try Python-style, """ delimited strings
if is_quoted and stream.peek(3) == b'\"\"\"':
stream.skip(3)
raw_quotes = RawQuoteStyle.Python
# Try Lua-style, [=[ delimited strings
elif is_quoted and stream.peek(3) == b'[=[':
stream.skip(3)
raw_quotes = RawQuoteStyle.Lua
elif is_quoted and stream.peek() == b'\"':
stream.skip()
elif is_quoted:
#
raise ParseException('Invalid quoted string, must start with ",'
'""", or [=[',
stream.get_location())
parse_as_identifier = not is_quoted
while True:
next_char = stream.peek()
if parse_as_identifier and not _is_identifier(next_char):
break
if raw_quotes:
if raw_quotes == RawQuoteStyle.Python and \
next_char == b'\"' and stream.peek(3) == b'\"\"\"':
# This is a tricky case -- we're in a """ quoted string, and
# we spotted three consecutive """. This could mean we're at the
# end, but it doesn't have to be -- we actually need to check
# all the cases below:
# * """: simple case, just end here
# * """": A single quote inside the string,
# followed by the end marker
# * """"": A double double quote inside the string,
# followed by the end marker
# Note that """""" is invalid, no matter what follows
# afterwards, as the first group of three terminates the string,
# and then we'd have an unrelated string afterwards. We don't
# concat strings automatically so this will trigger an error
# Start with longest match, as the other is prefix this has
# to be the first check
if stream.peek(5, allow_end_of_file=True) == b'\"\"\"\"\"':
result += b'\"\"'
stream.skip(5)
break
elif stream.peek(4, allow_end_of_file=True) == b'\"\"\"\"':
result += next_char
stream.skip(4)
break
stream.skip(3)
break
elif raw_quotes == RawQuoteStyle.Lua and \
next_char == b']' and stream.peek(3) == b']=]':
stream.skip(3)
break
else:
result += next_char
stream.skip(1)
else:
if next_char == b'\"':
stream.read()
break
elif next_char == b'\\':
stream.skip()
result += _decode_escaped_character(stream.read())
else:
result += next_char
stream.skip()
return str(result, encoding='utf-8')
_NUMBER_SEPARATOR_SET = _WHITESPACE_SET.union({b',', b']', b'}', None})
def _decode_number(stream, next_char):
"""Parse a number.
next_char -- the next byte in the stream.
"""
number_bytes = bytearray()
is_decimal_number = False
while True:
if next_char in _NUMBER_SEPARATOR_SET:
break
if next_char == b'.' or next_char == b'e' or next_char == b'E':
is_decimal_number = True
number_bytes += next_char
stream.skip()
next_char = stream.peek(allow_end_of_file=True)
value = number_bytes.decode('utf-8')
if is_decimal_number:
return float(value)
return int(value)
def _decode_dict(stream, delimited=False):
"""
delimited -- if ``True``, parsing will stop once the end-of-dictionary
delimiter has been reached(``}``)
"""
from collections import OrderedDict
result = OrderedDict()
if stream.peek() == b'{':
stream.skip()
next_char = _skip_whitespace(stream)
while True:
if not delimited and next_char is None:
break
if next_char == b'}':
stream.skip()
break
key = _decode_string(stream, True)
next_char = _skip_whitespace(stream)
# We allow both '=' and ':' as separators inside maps
if next_char == b'=' or next_char == b':':
_consume(stream, next_char)
value = _parse(stream)
result[key] = value
next_char = _skip_whitespace(stream)
if next_char == b',':
next_char = _skip_characters_and_whitespace(stream, 1)
return result
def _parse_list(stream):
result = []
# skip '['
next_char = _skip_characters_and_whitespace(stream, 1)
while True:
if next_char == b']':
stream.skip()
break
value = _parse(stream)
result.append(value)
next_char = _skip_whitespace(stream)
if next_char == b',':
next_char = _skip_characters_and_whitespace(stream, 1)
return result
def _parse(stream):
next_char = _skip_whitespace(stream)
if next_char == b't':
_consume(stream, b'true')
return True
elif next_char == b'f':
_consume(stream, b'false')
return False
elif next_char == b'n':
_consume(stream, b'null')
return None
elif next_char == b'{':
return _decode_dict(stream, True)
elif next_char == b'\"':
return _decode_string(stream)
elif next_char == b'[':
peek = stream.peek(2, allow_end_of_file=False)
# second lookup character for [=[]=] raw literal strings
next_char_2 = peek[1:2]
if next_char_2 != b'=':
return _parse_list(stream)
elif next_char_2 == b'=':
return _decode_string(stream)
try:
return _decode_number(stream, next_char)
except ValueError:
raise ParseException('Invalid character', stream.get_location())
def load(stream):
"""Load a SJSON object from a stream."""
return _decode_dict(ByteBufferInputStream(io.BufferedReader(stream)))
def loads(text):
"""Load a SJSON object from a string."""
return _decode_dict(MemoryInputStream(text.encode('utf-8')))
def dumps(obj, indent=None):
"""Dump an object to a string."""
import io
stream = io.StringIO()
dump(obj, stream, indent)
return stream.getvalue()
def dump(obj, fp, indent=None):
"""Dump an object to a stream."""
if not indent:
_indent = ''
elif isinstance(indent, numbers.Number):
if indent < 0:
indent = 0
_indent = ' ' * indent
else:
_indent = indent
for e in _encode(obj, indent=_indent):
fp.write(e)
_ESCAPE_CHARACTER_SET = {'\n': '\\n', '\b': '\\b', '\t': '\\t', '\"': '\\"'}
def _escape_string(obj, quote=True):
"""Escape a string.
If quote is set, the string will be returned with quotation marks at the
beginning and end. If quote is set to false, quotation marks will be only
added if needed(that is, if the string is not an identifier.)"""
if any([c not in _IDENTIFIER_SET for c in obj]):
# String must be quoted, even if quote was not requested
quote = True
if quote:
yield '"'
for key, value in _ESCAPE_CHARACTER_SET.items():
obj = obj.replace(key, value)
yield obj
if quote:
yield '"'
def _encode(obj, separators=(', ', '\n', ' = '), indent=0, level=0):
if obj is None:
yield 'null'
# Must check for true, false before number, as boolean is an instance of
# Number, and str(obj) would return True/False instead of true/false then
elif obj is True:
yield 'true'
elif obj is False:
yield 'false'
elif isinstance(obj, numbers.Number):
yield str(obj)
# Strings are also Sequences, but we don't want to encode as lists
elif isinstance(obj, str):
yield from _escape_string(obj)
elif isinstance(obj, collections.abc.Sequence):
yield from _encode_list(obj, separators, indent, level)
elif isinstance(obj, collections.abc.Mapping):
yield from _encode_dict(obj, separators, indent, level)
else:
raise RuntimeError("Unsupported object type")
def _indent(level, indent):
return indent * level
def _encode_key(k):
yield from _escape_string(k, False)
def _encode_list(obj, separators, indent, level):
yield '['
first = True
for element in obj:
if first:
first = False
else:
yield separators[0]
yield from _encode(element, separators, indent, level+1)
yield ']'
def _encode_dict(obj, separators, indent, level):
if level > 0:
yield '{\n'
first = True
for key, value in obj.items():
if first:
first = False
else:
yield '\n'
yield _indent(level, indent)
yield from _encode_key(key)
yield separators[2]
yield from _encode(value, separators, indent, level+1)
yield '\n'
yield _indent(level-1, indent)
if level > 0:
yield '}'