1
Fork 0

bug(import_bsi): Fix incorrect BSIZ parsing

This commit is contained in:
Lucas Schwiderski 2021-07-26 20:12:34 +02:00
parent 64a15a0274
commit f032838230
Signed by: lucas
GPG key ID: AA12679AAA6DF4D8

View file

@ -15,13 +15,16 @@
# along with this program. If not, see <https://www.gnu.org/licenses/>. # along with this program. If not, see <https://www.gnu.org/licenses/>.
import sys
import zlib import zlib
import json import json
import bpy import bpy
import math import math
import traceback
from mathutils import Vector, Matrix from mathutils import Vector, Matrix
from bpy_extras.io_utils import unpack_list from bpy_extras.io_utils import unpack_list
from bitsquid import sjson
def parse_sjson(file_path, skip_editor_data=True): def parse_sjson(file_path, skip_editor_data=True):
@ -32,140 +35,136 @@ def parse_sjson(file_path, skip_editor_data=True):
Taken from `bsi_import` in the Vermintide 2 SDK, but slightly Taken from `bsi_import` in the Vermintide 2 SDK, but slightly
modified to fix some issues and improve readability. modified to fix some issues and improve readability.
""" """
return_dict = {}
try:
with open(file_path, 'rb') as f:
data = f.read()
if data[:4] == 'bsiz': with open(file_path, 'rb') as f:
data = zlib.decompress(data[8:]) data = f.read()
data = data.decode("utf-8") if data[:4] == b'bsiz':
data = zlib.decompress(data[8:])
file_lines = ['{\n'] data = data.decode("utf-8")
inside_list = False
check_editor_data = 1 if skip_editor_data else 0
for line in data.splitlines(): file_lines = ['{\n']
if check_editor_data: inside_list = False
if check_editor_data > 1: check_editor_data = 1 if skip_editor_data else 0
if line[0] == '}':
check_editor_data = 0
continue for line in data.splitlines():
elif line[:18] == 'editor_metadata = ': if check_editor_data:
check_editor_data = 2 if check_editor_data > 1:
continue if line[0] == '}':
check_editor_data = 0
if not line.strip(): continue
elif line[:18] == 'editor_metadata = ':
check_editor_data = 2
continue continue
# Strip trailing whitespace, if not line.strip():
# including line break and carriage return continue
line = line.rstrip()
if line[-1] in ['\n', '\r']:
line = line[:-1]
if ' = ' in line: # Strip trailing whitespace,
line_parts = line.split(' = ') # including line break and carriage return
line = line.rstrip()
if line[-1] in ['\n', '\r']:
line = line[:-1]
if '[ ' in line_parts[-1] and ' ]' in line_parts[-1]: if ' = ' in line:
new_end = '' line_parts = line.split(' = ')
end_parts = line_parts[-1].split(' ]')
short_len = len(end_parts) - 1
for i, end_part in enumerate(end_parts): if '[ ' in line_parts[-1] and ' ]' in line_parts[-1]:
if not end_part: new_end = ''
if i < short_len: end_parts = line_parts[-1].split(' ]')
new_end += ' ]' short_len = len(end_parts) - 1
continue
if '[ ' not in end_part: for i, end_part in enumerate(end_parts):
new_end += end_part + ' ]' if not end_part:
continue if i < short_len:
new_end += ' ]'
continue
sub_part_pre = end_part.rpartition('[ ') if '[ ' not in end_part:
new_end = ''.join(( new_end += end_part + ' ]'
new_end, continue
sub_part_pre[0],
'[ ',
', '.join(sub_part_pre[-1].split(' ')),
' ]'
))
if i < short_len and end_parts[i + 1] and end_parts[i + 1][:2] == ' [': sub_part_pre = end_part.rpartition('[ ')
new_end += ',' new_end = ''.join((
new_end,
line_parts[-1] = new_end sub_part_pre[0],
'[ ',
# Handle indentation ', '.join(sub_part_pre[-1].split(' ')),
if '\t' in line_parts[0]: ' ]'
line_start = line_parts[0].rpartition('\t')
else:
tab_len = len(line_parts[0]) - len(line_parts[0].lstrip())
if tab_len > 4:
line_start = [line_parts[0][:tab_len - 4], '' , line_parts[0].lstrip()]
elif tab_len > 0:
line_start = ['', '', line_parts[0].lstrip()]
else:
line_start = line_parts[0].rpartition('\t')
if line_start[-1][0] == '"':
new_line = ''.join((
''.join(line_start[:-1]),
line_start[-1],
': ',
''.join(line_parts[1:])
))
else:
new_line = ''.join((
''.join(line_start[:-1]),
'"',
line_start[-1],
'": ',
''.join(line_parts[1:])
)) ))
if not line_parts[-1][-1] == ',': if i < short_len and end_parts[i + 1] and end_parts[i + 1][:2] == ' [':
new_line += ',' new_end += ','
elif ']' in line or '}' in line:
new_line = line + ',' line_parts[-1] = new_end
if '} {' in new_line:
new_line = new_line.replace('} {', '}, {') # Handle indentation
if file_lines[-1][-2] == ',': if '\t' in line_parts[0]:
file_lines[-1] = file_lines[-1][:-2] + '\n' line_start = line_parts[0].rpartition('\t')
elif inside_list and ']' not in line:
tab_len = len(line) - len(line.lstrip())
new_line = line[:tab_len] + ', '.join([x for x in line[tab_len:].split(' ') if x])
else: else:
new_line = line tab_len = len(line_parts[0]) - len(line_parts[0].lstrip())
if new_line[-2:] in ['[,', '{,'] or new_line[-3:] in ['[ ,', '{ ,']: if tab_len > 4:
new_line = new_line[:-1] line_start = [line_parts[0][:tab_len - 4], '' , line_parts[0].lstrip()]
elif tab_len > 0:
line_start = ['', '', line_parts[0].lstrip()]
else:
line_start = line_parts[0].rpartition('\t')
if inside_list: if line_start[-1][0] == '"':
if ']' in line: new_line = ''.join((
inside_list = False ''.join(line_start[:-1]),
elif not new_line[-1] in ['[', '{', ',']: line_start[-1],
new_line += ',' ': ',
elif '[' in line: ''.join(line_parts[1:])
inside_list = True ))
else:
new_line = ''.join((
''.join(line_start[:-1]),
'"',
line_start[-1],
'": ',
''.join(line_parts[1:])
))
file_lines.append(''.join(('\t', new_line.lstrip(), '\n'))) if not line_parts[-1][-1] == ',':
new_line += ','
elif ']' in line or '}' in line:
new_line = line + ','
if '} {' in new_line:
new_line = new_line.replace('} {', '}, {')
if file_lines[-1][-2] == ',':
file_lines[-1] = file_lines[-1][:-2] + '\n'
elif inside_list and ']' not in line:
tab_len = len(line) - len(line.lstrip())
new_line = line[:tab_len] + ', '.join([x for x in line[tab_len:].split(' ') if x])
else:
new_line = line
# To save memory... if new_line[-2:] in ['[,', '{,'] or new_line[-3:] in ['[ ,', '{ ,']:
if len(file_lines) > 100000: new_line = new_line[:-1]
file_lines = [''.join(file_lines)]
if file_lines[-1][-2] == ',': if inside_list:
file_lines[-1] = file_lines[-1][:-2] + '\n' if ']' in line:
inside_list = False
elif not new_line[-1] in ['[', '{', ',']:
new_line += ','
elif '[' in line:
inside_list = True
file_lines.append('}\n') file_lines.append(''.join(('\t', new_line.lstrip(), '\n')))
return_dict = json.loads(''.join(file_lines))
except ValueError: # To save memory...
print(file_path.replace('\\', '/') + ': SJSON file contains a syntax error') if len(file_lines) > 100000:
return return_dict file_lines = [''.join(file_lines)]
if file_lines[-1][-2] == ',':
file_lines[-1] = file_lines[-1][:-2] + '\n'
file_lines.append('}\n')
return json.loads(''.join(file_lines))
def find(arr, f): def find(arr, f):
@ -180,14 +179,15 @@ def find(arr, f):
def create_mesh(self, context, name, node_data, geo_data): def create_mesh(self, context, name, node_data, geo_data):
""" """
Create a Blender object from a BSI node definition Create a Blender mesh object from a BSI node definition
and additional data from the file. and additional data from the file.
""" """
# A list of vectors that represent vertex locations. # A list of 3-dimensional vectors. Each vector encodes a vertex position.
vertices = [] vertices = []
# A list of vectors, where each vector contains three indices into `vertices`. # A list of vectors, where each vector contains three indices into
# Those three indices define the vertices that make up the face. # `vertices`. Those three indices define the vertices that make up
# the face.
faces = [] faces = []
uv_name = "UVMap" uv_name = "UVMap"
uvs = [] uvs = []
@ -204,11 +204,11 @@ def create_mesh(self, context, name, node_data, geo_data):
# stride would only be possible for objects that can be built # stride would only be possible for objects that can be built
# entirely from quads, which is very uncommon. # entirely from quads, which is very uncommon.
if stride != 3: if stride != 3:
raise RuntimeError("stride != 3 cannot be handled") raise NotImplementedError("stride != 3 cannot be handled")
# Get vertex positions. # Get vertex positions.
# Iterate over data in sets of three values that represent # Iterate over data in sets of three values. Each set
# `x`, `y` and `z`. # represents `x`, `y` and `z`.
for j in range(0, len(stream_data), 3): for j in range(0, len(stream_data), 3):
vertices.append(Vector(( vertices.append(Vector((
stream_data[j], stream_data[j],
@ -224,6 +224,9 @@ def create_mesh(self, context, name, node_data, geo_data):
index_stream[j + 1], index_stream[j + 1],
index_stream[j + 2], index_stream[j + 2],
))) )))
print(vertices)
print(faces)
elif channel["name"] == 'NORMAL': elif channel["name"] == 'NORMAL':
# Blender is able to create normals from the face definition # Blender is able to create normals from the face definition
# (i.e. the order in which the faces vertices were defined) # (i.e. the order in which the faces vertices were defined)
@ -464,10 +467,12 @@ def import_geometry(self, context, name, node_data, global_data):
obj = bpy.data.objects.new(mesh.name, mesh) obj = bpy.data.objects.new(mesh.name, mesh)
obj.matrix_world = Matrix() obj.matrix_world = Matrix()
# Check of a local offset
if "local" in node_data: if "local" in node_data:
mat = matrix_from_list(node_data["local"]) mat = matrix_from_list(node_data["local"])
obj.matrix_local = mat obj.matrix_local = mat
# Recurse into child nodes and parent them to the current object
if "children" in node_data: if "children" in node_data:
for child_name, child_data in node_data["children"].items(): for child_name, child_data in node_data["children"].items():
if child_data["parent"] != name: if child_data["parent"] != name:
@ -517,6 +522,9 @@ def import_node(self, context, name, node_data, global_data):
if has_geometry: if has_geometry:
return import_geometry(self, context, name, node_data, global_data) return import_geometry(self, context, name, node_data, global_data)
else: else:
# Only the root node should be left now.
# It needs slightly different treatment compared to a regular geometry
# node
if name != "root_point": if name != "root_point":
self.report({'WARNING'}, "Unknown kind of node: '{}'. Falling back to Empty.".format(name)) self.report({'WARNING'}, "Unknown kind of node: '{}'. Falling back to Empty.".format(name))
@ -550,7 +558,19 @@ def import_node(self, context, name, node_data, global_data):
def load(self, context, filepath, *, relpath=None): def load(self, context, filepath, *, relpath=None):
global_data = parse_sjson(filepath) try:
with open(filepath, 'rb') as f:
data = f.read()
if data[:4] == b'bsiz':
data = zlib.decompress(data[8:])
data = data.decode("utf-8")
global_data = sjson.loads(data)
except Exception:
self.report({'ERROR'}, "Failed to parse SJSON: {}".format(filepath))
traceback.print_exc(file=sys.stderr)
return {'CANCELLED'}
# Nothing to do if there are no nodes # Nothing to do if there are no nodes
if "nodes" not in global_data: if "nodes" not in global_data: