1
Fork 0

bug(import_bsi): Fix incorrect BSIZ parsing

This commit is contained in:
Lucas Schwiderski 2021-07-26 20:12:34 +02:00
parent 64a15a0274
commit f032838230
Signed by: lucas
GPG key ID: AA12679AAA6DF4D8

View file

@ -15,13 +15,16 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>.
import sys
import zlib
import json
import bpy
import math
import traceback
from mathutils import Vector, Matrix
from bpy_extras.io_utils import unpack_list
from bitsquid import sjson
def parse_sjson(file_path, skip_editor_data=True):
@ -32,140 +35,136 @@ def parse_sjson(file_path, skip_editor_data=True):
Taken from `bsi_import` in the Vermintide 2 SDK, but slightly
modified to fix some issues and improve readability.
"""
return_dict = {}
try:
with open(file_path, 'rb') as f:
data = f.read()
if data[:4] == 'bsiz':
data = zlib.decompress(data[8:])
with open(file_path, 'rb') as f:
data = f.read()
data = data.decode("utf-8")
if data[:4] == b'bsiz':
data = zlib.decompress(data[8:])
file_lines = ['{\n']
inside_list = False
check_editor_data = 1 if skip_editor_data else 0
data = data.decode("utf-8")
for line in data.splitlines():
if check_editor_data:
if check_editor_data > 1:
if line[0] == '}':
check_editor_data = 0
file_lines = ['{\n']
inside_list = False
check_editor_data = 1 if skip_editor_data else 0
continue
elif line[:18] == 'editor_metadata = ':
check_editor_data = 2
continue
for line in data.splitlines():
if check_editor_data:
if check_editor_data > 1:
if line[0] == '}':
check_editor_data = 0
if not line.strip():
continue
elif line[:18] == 'editor_metadata = ':
check_editor_data = 2
continue
# Strip trailing whitespace,
# including line break and carriage return
line = line.rstrip()
if line[-1] in ['\n', '\r']:
line = line[:-1]
if not line.strip():
continue
if ' = ' in line:
line_parts = line.split(' = ')
# Strip trailing whitespace,
# including line break and carriage return
line = line.rstrip()
if line[-1] in ['\n', '\r']:
line = line[:-1]
if '[ ' in line_parts[-1] and ' ]' in line_parts[-1]:
new_end = ''
end_parts = line_parts[-1].split(' ]')
short_len = len(end_parts) - 1
if ' = ' in line:
line_parts = line.split(' = ')
for i, end_part in enumerate(end_parts):
if not end_part:
if i < short_len:
new_end += ' ]'
continue
if '[ ' in line_parts[-1] and ' ]' in line_parts[-1]:
new_end = ''
end_parts = line_parts[-1].split(' ]')
short_len = len(end_parts) - 1
if '[ ' not in end_part:
new_end += end_part + ' ]'
continue
for i, end_part in enumerate(end_parts):
if not end_part:
if i < short_len:
new_end += ' ]'
continue
sub_part_pre = end_part.rpartition('[ ')
new_end = ''.join((
new_end,
sub_part_pre[0],
'[ ',
', '.join(sub_part_pre[-1].split(' ')),
' ]'
))
if '[ ' not in end_part:
new_end += end_part + ' ]'
continue
if i < short_len and end_parts[i + 1] and end_parts[i + 1][:2] == ' [':
new_end += ','
line_parts[-1] = new_end
# Handle indentation
if '\t' in line_parts[0]:
line_start = line_parts[0].rpartition('\t')
else:
tab_len = len(line_parts[0]) - len(line_parts[0].lstrip())
if tab_len > 4:
line_start = [line_parts[0][:tab_len - 4], '' , line_parts[0].lstrip()]
elif tab_len > 0:
line_start = ['', '', line_parts[0].lstrip()]
else:
line_start = line_parts[0].rpartition('\t')
if line_start[-1][0] == '"':
new_line = ''.join((
''.join(line_start[:-1]),
line_start[-1],
': ',
''.join(line_parts[1:])
))
else:
new_line = ''.join((
''.join(line_start[:-1]),
'"',
line_start[-1],
'": ',
''.join(line_parts[1:])
sub_part_pre = end_part.rpartition('[ ')
new_end = ''.join((
new_end,
sub_part_pre[0],
'[ ',
', '.join(sub_part_pre[-1].split(' ')),
' ]'
))
if not line_parts[-1][-1] == ',':
new_line += ','
elif ']' in line or '}' in line:
new_line = line + ','
if '} {' in new_line:
new_line = new_line.replace('} {', '}, {')
if file_lines[-1][-2] == ',':
file_lines[-1] = file_lines[-1][:-2] + '\n'
elif inside_list and ']' not in line:
tab_len = len(line) - len(line.lstrip())
new_line = line[:tab_len] + ', '.join([x for x in line[tab_len:].split(' ') if x])
if i < short_len and end_parts[i + 1] and end_parts[i + 1][:2] == ' [':
new_end += ','
line_parts[-1] = new_end
# Handle indentation
if '\t' in line_parts[0]:
line_start = line_parts[0].rpartition('\t')
else:
new_line = line
tab_len = len(line_parts[0]) - len(line_parts[0].lstrip())
if new_line[-2:] in ['[,', '{,'] or new_line[-3:] in ['[ ,', '{ ,']:
new_line = new_line[:-1]
if tab_len > 4:
line_start = [line_parts[0][:tab_len - 4], '' , line_parts[0].lstrip()]
elif tab_len > 0:
line_start = ['', '', line_parts[0].lstrip()]
else:
line_start = line_parts[0].rpartition('\t')
if inside_list:
if ']' in line:
inside_list = False
elif not new_line[-1] in ['[', '{', ',']:
new_line += ','
elif '[' in line:
inside_list = True
if line_start[-1][0] == '"':
new_line = ''.join((
''.join(line_start[:-1]),
line_start[-1],
': ',
''.join(line_parts[1:])
))
else:
new_line = ''.join((
''.join(line_start[:-1]),
'"',
line_start[-1],
'": ',
''.join(line_parts[1:])
))
file_lines.append(''.join(('\t', new_line.lstrip(), '\n')))
if not line_parts[-1][-1] == ',':
new_line += ','
elif ']' in line or '}' in line:
new_line = line + ','
if '} {' in new_line:
new_line = new_line.replace('} {', '}, {')
if file_lines[-1][-2] == ',':
file_lines[-1] = file_lines[-1][:-2] + '\n'
elif inside_list and ']' not in line:
tab_len = len(line) - len(line.lstrip())
new_line = line[:tab_len] + ', '.join([x for x in line[tab_len:].split(' ') if x])
else:
new_line = line
# To save memory...
if len(file_lines) > 100000:
file_lines = [''.join(file_lines)]
if new_line[-2:] in ['[,', '{,'] or new_line[-3:] in ['[ ,', '{ ,']:
new_line = new_line[:-1]
if file_lines[-1][-2] == ',':
file_lines[-1] = file_lines[-1][:-2] + '\n'
if inside_list:
if ']' in line:
inside_list = False
elif not new_line[-1] in ['[', '{', ',']:
new_line += ','
elif '[' in line:
inside_list = True
file_lines.append('}\n')
return_dict = json.loads(''.join(file_lines))
except ValueError:
print(file_path.replace('\\', '/') + ': SJSON file contains a syntax error')
return return_dict
file_lines.append(''.join(('\t', new_line.lstrip(), '\n')))
# To save memory...
if len(file_lines) > 100000:
file_lines = [''.join(file_lines)]
if file_lines[-1][-2] == ',':
file_lines[-1] = file_lines[-1][:-2] + '\n'
file_lines.append('}\n')
return json.loads(''.join(file_lines))
def find(arr, f):
@ -180,14 +179,15 @@ def find(arr, f):
def create_mesh(self, context, name, node_data, geo_data):
"""
Create a Blender object from a BSI node definition
Create a Blender mesh object from a BSI node definition
and additional data from the file.
"""
# A list of vectors that represent vertex locations.
# A list of 3-dimensional vectors. Each vector encodes a vertex position.
vertices = []
# A list of vectors, where each vector contains three indices into `vertices`.
# Those three indices define the vertices that make up the face.
# A list of vectors, where each vector contains three indices into
# `vertices`. Those three indices define the vertices that make up
# the face.
faces = []
uv_name = "UVMap"
uvs = []
@ -204,11 +204,11 @@ def create_mesh(self, context, name, node_data, geo_data):
# stride would only be possible for objects that can be built
# entirely from quads, which is very uncommon.
if stride != 3:
raise RuntimeError("stride != 3 cannot be handled")
raise NotImplementedError("stride != 3 cannot be handled")
# Get vertex positions.
# Iterate over data in sets of three values that represent
# `x`, `y` and `z`.
# Iterate over data in sets of three values. Each set
# represents `x`, `y` and `z`.
for j in range(0, len(stream_data), 3):
vertices.append(Vector((
stream_data[j],
@ -224,6 +224,9 @@ def create_mesh(self, context, name, node_data, geo_data):
index_stream[j + 1],
index_stream[j + 2],
)))
print(vertices)
print(faces)
elif channel["name"] == 'NORMAL':
# Blender is able to create normals from the face definition
# (i.e. the order in which the faces vertices were defined)
@ -464,10 +467,12 @@ def import_geometry(self, context, name, node_data, global_data):
obj = bpy.data.objects.new(mesh.name, mesh)
obj.matrix_world = Matrix()
# Check of a local offset
if "local" in node_data:
mat = matrix_from_list(node_data["local"])
obj.matrix_local = mat
# Recurse into child nodes and parent them to the current object
if "children" in node_data:
for child_name, child_data in node_data["children"].items():
if child_data["parent"] != name:
@ -517,6 +522,9 @@ def import_node(self, context, name, node_data, global_data):
if has_geometry:
return import_geometry(self, context, name, node_data, global_data)
else:
# Only the root node should be left now.
# It needs slightly different treatment compared to a regular geometry
# node
if name != "root_point":
self.report({'WARNING'}, "Unknown kind of node: '{}'. Falling back to Empty.".format(name))
@ -550,7 +558,19 @@ def import_node(self, context, name, node_data, global_data):
def load(self, context, filepath, *, relpath=None):
global_data = parse_sjson(filepath)
try:
with open(filepath, 'rb') as f:
data = f.read()
if data[:4] == b'bsiz':
data = zlib.decompress(data[8:])
data = data.decode("utf-8")
global_data = sjson.loads(data)
except Exception:
self.report({'ERROR'}, "Failed to parse SJSON: {}".format(filepath))
traceback.print_exc(file=sys.stderr)
return {'CANCELLED'}
# Nothing to do if there are no nodes
if "nodes" not in global_data: