summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorOwl <isaclien9752@gmail.com>2025-09-26 14:32:34 -0400
committerOwl <isaclien9752@gmail.com>2025-09-26 14:32:34 -0400
commit70e647076418d114111aa76b5d3639a5b4271e94 (patch)
tree2e67e3c523818c7628497ff2f6e9e5c9645814d2
parent45dc6171705fd074657b0ed5bde2502431b74c4b (diff)
downloadblenxy-70e647076418d114111aa76b5d3639a5b4271e94.tar.gz
blenxy-70e647076418d114111aa76b5d3639a5b4271e94.zip
bcsv and other stuff
-rw-r--r--__init__.py88
-rw-r--r--basic_settings.py21
-rw-r--r--bck_export.py21
-rw-r--r--bck_funcs.py167
-rw-r--r--bck_import.py20
-rw-r--r--bcsv_editing.py1391
-rw-r--r--bcsv_funcs.py1169
-rw-r--r--bcsv_hashes.txt72
-rw-r--r--blender_funcs.py6
-rw-r--r--collada_superbmd_export.py4
-rw-r--r--file_ops.py23
-rw-r--r--smg_common.py1
-rw-r--r--start.bat95
-rwxr-xr-xstart.sh10
14 files changed, 2950 insertions, 138 deletions
diff --git a/__init__.py b/__init__.py
index 6628161..0e6316c 100644
--- a/__init__.py
+++ b/__init__.py
@@ -3,36 +3,23 @@
# https://web.archive.org/web/20210925181415/https://blenderbrew.com/custom-application-templates-in-blender/
# elemental python modules blenxy needs
-# that the python interpreter must have built in
-import bpy, importlib, sys, subprocess, os, shutil
-import math, mathutils, warnings, struct, site
+# that the python interpreter to be used must have built in
+# python interpreter to be used has to be an external one (I am sick of the internal one)
+import bpy, importlib, sys, subprocess, os, shutil, io, copy
+import math, mathutils, warnings, struct, site, re, random
from . import file_ops # uses os and shutil modules
# elemental python modules blenxy needs
# that the python interpreter probably does not have built in/needs to be updated
-pip_modules = ["pip", "wheel", "setuptools"]
-new_modules = ["lxml", "numpy"]
+NEEDED_MODULE_NAMES = ["pip", "lxml", "numpy"]
# check which of the new modules needs to be installed
def new_mod_check(mod_names):
# variable to return
rtn = []
- # traverse into all the new_modules
- for mod_name in mod_names:
- # skip numpy if the numpy_bad folder was created
- if (mod_name == "numpy"):
- for path in site.getsitepackages():
- numpy_path = path + "/numpy"
- numpy_bad_path = path + "/numpy_bad"
- # rename the existing numpy folder to numpy bad and install newer numpy
- if (("2.79" in path) and ("site-packages" in path)
- and (file_ops.f_exists(numpy_bad_path) == False)):
- file_ops.rename(numpy_path, numpy_bad_path)
- rtn.append("numpy")
- break
- continue
-
+ # traverse into all the needed modules
+ for mod_name in NEEDED_MODULE_NAMES:
# try importing the module
try:
importlib.import_module(mod_name)
@@ -41,23 +28,29 @@ def new_mod_check(mod_names):
# done!
return rtn
-
+
+# find python interpreter
+def find_python_interpreter():
+ # start.bat/start.sh sends the python interpreter
+ # path as an argument to Blender the only argument it sends
+ return sys.argv[2]
+
# install given modules with pip but be sure to have pip first
def new_mod_install(mod_names):
# get python's interpreter binary path
- py_bin = bpy.app.binary_path_python
+ py_bin = find_python_interpreter()
pip_install = [py_bin, "-B", "-m", "pip", "install",
"--trusted-host", "pypi.python.org",
"--trusted-host", "files.pythonhosted.org",
"--trusted-host", "pypi.org",
- "-U", "--force-reinstall", "--only-binary", ":all:"]
+ "-U", "--only-binary", ":all:"]
# check if the pip module is available
try:
importlib.import_module("pip")
- except: # install pip
+ except: # install pip with ensurepip
subprocess.run([py_bin, "-B", "-m", "ensurepip"])
- subprocess.run(pip_install + pip_modules)
+ subprocess.run(pip_install + ["pip", "wheel", "setuptools"])
# install the rest of the modules
if (mod_names != []):
@@ -68,13 +61,14 @@ def new_mod_install(mod_names):
return False
# all blenxy custom modules
-blenxy_modules = ["basic_settings", # user settings blenxy has
+BLENXY_MODULES = ["basic_settings", # user settings blenxy has
"collada_superbmd_import", # custom importer for SuperBMD collada files
"collada_superbmd_export", # custom exporter for SuperBMD collada files
"obj_kcl_export", # custom exporter for OBJ files (collision)
"obj_neokclcreate_import", # custom importer for OBJ files (collision, NeoKCLCreate)
"bck_import", # custom importer for SMG BCK files
- "bck_export"] # custom exporter for SMG BCK files
+ "bck_export", # custom exporter for SMG BCK files
+ "bcsv_editing"] # custom interface for BCSV loading
# function used to refresh blenxy's custom modules
# so that they can be updated without closing blender
@@ -82,11 +76,11 @@ blenxy_modules = ["basic_settings", # user settings blenxy has
def unload_blenxy_stuff(dummy):
# execute the unregister functions from each module
# these functions won't die if the stuff to unregister isn't there
- for mod_name in blenxy_modules:
+ for mod_name in BLENXY_MODULES:
mod = importlib.import_module("." + mod_name, __package__)
mod.unregister()
# reload the modules in case they were modified (dev stuff)
- for mod_name in blenxy_modules:
+ for mod_name in BLENXY_MODULES:
mod = importlib.import_module("." + mod_name, __package__)
importlib.reload(mod)
@@ -94,20 +88,44 @@ def unload_blenxy_stuff(dummy):
def register():
# print the welcome
print("\nWelcome to Blenxy!")
- print("Running on Blender: %s\n" % (bpy.app.version.__str__()))
+ print("Running on Blender: %s" % (bpy.app.version.__str__()))
+ print("Using interpreter: %s\n" % (find_python_interpreter()))
+
+ # "fix" python3.7/lib/python3.7/ctypes/__init__.py
+ ctypes_path = importlib.__path__[0] # I assume this path is alright
+ ctypes_path = file_ops.get_path_str(ctypes_path.replace("importlib", "ctypes") + "/__init__.py")
+ f = open(ctypes_path, "r", encoding = "utf-8")
+ all_lines = f.readlines()
+ f.close()
+ # comment this line out
+ # https://projects.blender.org/blender/blender/issues/84752
+ needs_update = False
+ for i in range(len(all_lines)):
+ if ((all_lines[i].startswith("#") == False)
+ and ("CFUNCTYPE(c_int)(lambda: None)" in all_lines[i])):
+ all_lines[i] = "#" + all_lines[i]
+ needs_update = True
+ break
+ # save the file
+ if (needs_update):
+ print("Patching %s..." % (ctypes_path))
+ f = open(ctypes_path, "w", encoding = "utf-8")
+ for line in all_lines:
+ f.write(line)
+ f.close()
# checking python modules
- if (new_mod_install(new_mod_check(new_modules)) == True):
+ if (new_mod_install(new_mod_check(NEEDED_MODULE_NAMES)) == True):
print("New modules installed. Exiting...")
exit(0)
- for mod_name in new_modules:
+ for mod_name in NEEDED_MODULE_NAMES:
mod = importlib.import_module(mod_name)
print("%s %s is installed!" % (mod_name, mod.__version__))
# add this function to load_post first
bpy.app.handlers.load_post.append(unload_blenxy_stuff)
# add the register functions to load_post
- for mod_name in blenxy_modules:
+ for mod_name in BLENXY_MODULES:
mod = importlib.import_module("." + mod_name, __package__)
bpy.app.handlers.load_post.append(mod.register)
@@ -117,7 +135,7 @@ def unregister():
print("\nLeaving Blenxy!\n")
# remove the stuff added to the load_post list
bpy.app.handlers.load_post.remove(unload_blenxy_stuff)
- for mod_name in blenxy_modules:
+ for mod_name in BLENXY_MODULES:
mod = importlib.import_module("." + mod_name, __package__)
for func in bpy.app.handlers.load_post:
if (mod.__name__ in func.__module__):
@@ -125,6 +143,6 @@ def unregister():
break
# execute module's unregister functions
- for mod_name in blenxy_modules:
+ for mod_name in BLENXY_MODULES:
mod = importlib.import_module("." + mod_name, __package__)
mod.unregister()
diff --git a/basic_settings.py b/basic_settings.py
index c1036ef..2a33eb0 100644
--- a/basic_settings.py
+++ b/basic_settings.py
@@ -1,4 +1,5 @@
import bpy
+from . import blender_funcs
# SMG was made under the centimeter unit
# SuperBMD assumes everything is in meters so
@@ -10,6 +11,7 @@ import bpy
# register func
@bpy.app.handlers.persistent
def register(dummy):
+
# the holy variable
scene = bpy.context.scene
print("Setting length/rotation units...")
@@ -36,15 +38,16 @@ def register(dummy):
break
# import Mario's model from DAE file included in the template
- # get blenxy template location for that and select it
- # but first delete all objects in scene so it is clean
- bpy.ops.object.select_all(action = "SELECT")
- bpy.ops.object.delete()
+ # get blenxy's template location for that
+ bpy.ops.object.select_all(action = "DESELECT")
+ # remove the default cube if present
+ if ("Cube" in bpy.data.objects):
+ bpy.data.objects.remove(bpy.data.objects["Cube"])
print("Importing Mario's Model...")
blenxy_path = bpy.utils.user_resource("SCRIPTS", "startup/bl_app_templates_user/blenxy/")
bpy.ops.wm.collada_import(filepath = blenxy_path + "Mario.dae")
Mario = bpy.data.objects.get("Mario")
- Mario.select = True
+ blender_funcs.select_obj(Mario, False, "OBJECT")
# set a SMG axis for reference when doing models
# it is just a visual queue to get how the model will appear on game
@@ -86,11 +89,11 @@ def register(dummy):
scene.frame_preview_end = 100
# ~ bpy.context.user_preferences.view.use_mouse_depth_navigate = True
# ~ bpy.context.user_preferences.view.use_zoom_to_mouse = True
+ # ~ bpy.context.user_preferences.view.show_developer_ui = True
+ bpy.context.user_preferences.system.use_international_fonts = True
+ bpy.context.user_preferences.system.select_method = "GL_QUERY"
print("Done with the basic settings!\n")
# unregister func
def unregister():
- # the holy variable again
- scene = bpy.context.scene
- # remove the galaxy unit
- bpy.ops.scene.units_length_preset_add(remove_active = True)
+ return
diff --git a/bck_export.py b/bck_export.py
index 4ed7d4e..0fc9a20 100644
--- a/bck_export.py
+++ b/bck_export.py
@@ -108,7 +108,8 @@ def export_bck_func(options, context):
if (options.euler_mode != pose_bone.rotation_mode):
# generate the temp fcurves but first check if they are animation tracks named that way already
for fcurve in list(armature.animation_data.action.fcurves):
- if (fcurve.group.name == temp_fcurve_group
+ if (fcurve.group != None
+ and fcurve.group.name == temp_fcurve_group
and fcurve.data_path == temp_fcurve_data_path): # delete this animation track
armature.animation_data.action.fcurves.remove(fcurve)
for j in range(3):
@@ -206,7 +207,8 @@ def export_bck_func(options, context):
elif (k == 1 or k == 4 or k == 7):
value = round(new_mat.to_euler(options.euler_mode)[int((k - 1) / 3)], options.rounding_vec[1])
elif (k == 2 or k == 5 or k == 8):
- value = round(100 * new_mat.to_translation()[int((k - 2) / 3)], options.rounding_vec[2])
+ value = round((100 * options.scale_transl)
+ * new_mat.to_translation()[int((k - 2) / 3)], options.rounding_vec[2])
# 100 times because of blenxy's coordinates
bck_anim.anim_data[i].comp[k].value.append(value)
@@ -328,7 +330,7 @@ def export_bck_func(options, context):
# ~ print(bck_anim)
# create a raw bck struct and write the BCK file
- raw = bck_funcs.create_smg_bck_raw(bck_anim)
+ raw = bck_funcs.create_smg_bck_raw(bck_anim, options.use_std_pad_size)
# ~ print(raw)
endian_ch = ">" # big endian character for struct.unpack()
if (options.endian == "OPT_B"): # little character
@@ -430,7 +432,7 @@ class export_bck(Operator, ExportHelper):
)
)
mult_order = EnumProperty(
- name = "Scale/Rot/Transl mult order",
+ name = "Scale/Rot/Transl order",
description = "Export animations in the specified matrix multiplication order",
default = "SRT",
items = (
@@ -442,6 +444,17 @@ class export_bck(Operator, ExportHelper):
("SRT", "SRT", "Scaling first, Rotation second, Translation last")
)
)
+ scale_transl = FloatProperty(
+ name = "Scale translation values:",
+ description = "Scale up/down translation values in all animation tracks.",
+ default = 1,
+ min = 10e-9,
+ )
+ use_std_pad_size = BoolProperty(
+ name = "Keep \"standard\" padding sizes",
+ description = "Keep the usual binary padding sizes when building the BCK.",
+ default = True
+ )
# what the importer actually does
def execute(self, context):
return export_bck_func(self, context)
diff --git a/bck_funcs.py b/bck_funcs.py
index e8482f8..73fd9de 100644
--- a/bck_funcs.py
+++ b/bck_funcs.py
@@ -1,5 +1,6 @@
-import os, struct, math
+import struct, math
from . import smg_common
+from . import file_ops
# python file to read the important information out of a BCK file
# will try its best to decode the information either on big/little endian
@@ -41,11 +42,11 @@ class smg_bck_raw:
def __str__(self):
rtn = " ### HEADER START\n"
- rtn += " Magic: %s\n" % (self.magic.__str__())
- rtn += " File type: %s\n" % (self.ftype.__str__())
- rtn += " File size: %s\n" % (self.file_size.__str__())
- rtn += " Section count: %s\n" % (self.section_count.__str__())
- rtn += " Unknown 1: %s\n" % (self.unknown1.__str__())
+ rtn += " Magic: %s\n" % (self.magic.__str__())
+ rtn += " File type: %s\n" % (self.ftype.__str__())
+ rtn += " File size: %s\n" % (self.file_size.__str__())
+ rtn += " Section count: %s\n" % (self.section_count.__str__())
+ rtn += " Unknown 1: %s\n" % (self.unknown1.__str__())
rtn += " ### HEADER END\n"
return rtn
@@ -72,26 +73,26 @@ class smg_bck_raw:
def __str__(self):
rtn = " ### ANK1 - START\n"
- rtn += " Magic: %s\n" % (self.magic.__str__())
- rtn += " Section size: %s\n" % (self.size.__str__())
- rtn += " Loop mode: %s\n" % (self.loop_mode.__str__())
- rtn += " Rotation left shift: %s\n" % (self.rot_lshift.__str__())
- rtn += " Anim length: %s\n" % (self.anim_length.__str__())
- rtn += " Bone count: %s\n" % (self.bone_count.__str__())
- rtn += " Scale array length: %s\n" % (self.scale_arr_length.__str__())
- rtn += " Rotation array length: %s\n" % (self.rot_arr_length.__str__())
- rtn += " Translation array length: %s\n" % (self.transl_arr_length.__str__())
- rtn += " Anim data offset: %s\n" % (self.anim_data_offset.__str__())
- rtn += " Scale array offset: %s\n" % (self.scale_arr_offset.__str__())
- rtn += " Rotation array offset: %s\n" % (self.rot_arr_offset.__str__())
- rtn += " Translation array offset: %s\n" % (self.transl_arr_offset.__str__())
- rtn += " Animation data:\n"
+ rtn += " Magic: %s\n" % (self.magic.__str__())
+ rtn += " Section size: %s\n" % (self.size.__str__())
+ rtn += " Loop mode: %s\n" % (self.loop_mode.__str__())
+ rtn += " Rotation left shift: %s\n" % (self.rot_lshift.__str__())
+ rtn += " Anim length: %s\n" % (self.anim_length.__str__())
+ rtn += " Bone count: %s\n" % (self.bone_count.__str__())
+ rtn += " Scale array length: %s\n" % (self.scale_arr_length.__str__())
+ rtn += " Rotation array length: %s\n" % (self.rot_arr_length.__str__())
+ rtn += " Translation array length: %s\n" % (self.transl_arr_length.__str__())
+ rtn += " Anim data offset: %s\n" % (self.anim_data_offset.__str__())
+ rtn += " Scale array offset: %s\n" % (self.scale_arr_offset.__str__())
+ rtn += " Rotation array offset: %s\n" % (self.rot_arr_offset.__str__())
+ rtn += " Translation array offset: %s\n" % (self.transl_arr_offset.__str__())
+ rtn += " Animation data: kf count, data index, interp mode\n"
for i in range(len(self.anim_data)):
rtn += " Bone %d\n" % (i)
rtn += "%s" % (self.anim_data[i].__str__())
- rtn += " Scale array: %s\n" % (self.scale_arr.__str__())
- rtn += " Rotation array: %s\n" % (self.rot_arr.__str__())
- rtn += " Translation array: %s\n" % (self.transl_arr.__str__())
+ rtn += " Scale array: %s\n" % (self.scale_arr.__str__())
+ rtn += " Rotation array: %s\n" % (self.rot_arr.__str__())
+ rtn += " Translation array: %s\n" % (self.transl_arr.__str__())
rtn += " ### ANK1 - END\n"
return rtn
@@ -109,15 +110,15 @@ class smg_bck_raw:
self.comp()] # transl z
def __str__(self):
- rtn = " Scale X: %s" % (self.comp[0].__str__())
- rtn += " Rot X: %s" % (self.comp[1].__str__())
- rtn += " Transl X: %s" % (self.comp[2].__str__())
- rtn += " Scale Y: %s" % (self.comp[3].__str__())
- rtn += " Rot Y: %s" % (self.comp[4].__str__())
- rtn += " Transl Y: %s" % (self.comp[5].__str__())
- rtn += " Scale Z: %s" % (self.comp[6].__str__())
- rtn += " Rot Z: %s" % (self.comp[7].__str__())
- rtn += " Transl Z: %s" % (self.comp[8].__str__())
+ rtn = " Scale X: %s" % (self.comp[0].__str__())
+ rtn += " Rot X: %s" % (self.comp[1].__str__())
+ rtn += " Transl X: %s" % (self.comp[2].__str__())
+ rtn += " Scale Y: %s" % (self.comp[3].__str__())
+ rtn += " Rot Y: %s" % (self.comp[4].__str__())
+ rtn += " Transl Y: %s" % (self.comp[5].__str__())
+ rtn += " Scale Z: %s" % (self.comp[6].__str__())
+ rtn += " Rot Z: %s" % (self.comp[7].__str__())
+ rtn += " Transl Z: %s" % (self.comp[8].__str__())
return rtn
@@ -143,12 +144,12 @@ class smg_bck_anim:
def __str__(self):
rtn = "### SMG_BCK_ANIM START\n"
- rtn += " Loop mode: %s\n" % (self.loop_mode.__str__())
- rtn += " Animation length: %s\n" % (self.anim_length.__str__())
- rtn += " Bone count: %s\n" % (self.bone_count.__str__())
- rtn += " Animation data:\n"
+ rtn += "Loop mode: %s\n" % (self.loop_mode.__str__())
+ rtn += "Animation length: %s\n" % (self.anim_length.__str__())
+ rtn += "Bone count: %s\n" % (self.bone_count.__str__())
+ rtn += "Animation data:\n"
for i in range(len(self.anim_data)):
- rtn += " Bone %d\n" % (i)
+ rtn += " Bone %d\n" % (i)
rtn += "%s" % (self.anim_data[i].__str__())
rtn += "### SMG_BCK_ANIM END"
return rtn
@@ -167,15 +168,15 @@ class smg_bck_anim:
self.comp()] # transl z
def __str__(self):
- rtn = " Scale X: %s" % (self.comp[0].__str__())
- rtn += " Rot X: %s" % (self.comp[1].__str__())
- rtn += " Transl X: %s" % (self.comp[2].__str__())
- rtn += " Scale Y: %s" % (self.comp[3].__str__())
- rtn += " Rot Y: %s" % (self.comp[4].__str__())
- rtn += " Transl Y: %s" % (self.comp[5].__str__())
- rtn += " Scale Z: %s" % (self.comp[6].__str__())
- rtn += " Rot Z: %s" % (self.comp[7].__str__())
- rtn += " Transl Z: %s" % (self.comp[8].__str__())
+ rtn = " Scale X: %s" % (self.comp[0].__str__())
+ rtn += " Rot X: %s" % (self.comp[1].__str__())
+ rtn += " Transl X: %s" % (self.comp[2].__str__())
+ rtn += " Scale Y: %s" % (self.comp[3].__str__())
+ rtn += " Rot Y: %s" % (self.comp[4].__str__())
+ rtn += " Transl Y: %s" % (self.comp[5].__str__())
+ rtn += " Scale Z: %s" % (self.comp[6].__str__())
+ rtn += " Rot Z: %s" % (self.comp[7].__str__())
+ rtn += " Transl Z: %s" % (self.comp[8].__str__())
return rtn
# anim_data
@@ -190,10 +191,10 @@ class smg_bck_anim:
def __str__(self):
rtn = "%s %s\n" % (self.kf_count, self.interp_mode)
- rtn += " time: %s\n" % (self.time.__str__())
- rtn += " value: %s\n" % (self.value.__str__())
- rtn += " in slope: %s\n" % (self.in_slope.__str__())
- rtn += " out slope: %s\n" % (self.out_slope.__str__())
+ rtn += " time: %s\n" % (self.time.__str__())
+ rtn += " value: %s\n" % (self.value.__str__())
+ rtn += " in slope: %s\n" % (self.in_slope.__str__())
+ rtn += " out slope: %s\n" % (self.out_slope.__str__())
return rtn
@@ -201,7 +202,6 @@ class smg_bck_anim:
bck_raw_info = None
bck_raw_error_str = "bck-raw-error: "
bck_anim_error_str = "bck-anim-error: "
-pad_str = "hoot"
f = None
# main function
@@ -286,7 +286,8 @@ def read_bck_file(filepath):
def pre_read_bck_file(filepath):
# check its size first
- if (os.path.getsize(filepath) <= 32):
+ file_size = file_ops.get_file_size(filepath)
+ if (file_size <= 32):
return bck_raw_error_str + "file size"
# make global variables editable
@@ -325,7 +326,7 @@ def pre_read_bck_file(filepath):
# file size
bck_raw_info.header.file_size = struct.unpack(endian_ch + "I", f.read(4))[0]
- if (bck_raw_info.header.file_size != os.path.getsize(filepath)):
+ if (bck_raw_info.header.file_size != file_ops.get_file_size(filepath)):
return bck_raw_error_str + "file size"
# section count
@@ -375,23 +376,23 @@ def pre_read_bck_file(filepath):
# bone animation data offset
bck_raw_info.ank1.anim_data_offset = struct.unpack(endian_ch + "I", f.read(4))[0]
- if (bck_raw_info.ank1.anim_data_offset
- + (bck_raw_info.ank1.bone_count * 9 * 6) > bck_raw_info.ank1.size):
+ if ((bck_raw_info.ank1.anim_data_offset + (bck_raw_info.ank1.bone_count * 9 * 6) > bck_raw_info.ank1.size)
+ or (bck_raw_info.ank1.anim_data_offset % 4 != 0)):
return bck_raw_error_str + "ank1 bone animation data offset"
# scale array offset
bck_raw_info.ank1.scale_arr_offset = struct.unpack(endian_ch + "I", f.read(4))[0]
- if (bck_raw_info.ank1.scale_arr_offset
- + (bck_raw_info.ank1.scale_arr_length * 4) > bck_raw_info.ank1.size):
+ if ((bck_raw_info.ank1.scale_arr_offset + (bck_raw_info.ank1.scale_arr_length * 4) > bck_raw_info.ank1.size)
+ or (bck_raw_info.ank1.scale_arr_offset % 4 != 0)):
return bck_raw_error_str + "ank1 scale array offset"
# rotation array offset
bck_raw_info.ank1.rot_arr_offset = struct.unpack(endian_ch + "I", f.read(4))[0]
- if (bck_raw_info.ank1.rot_arr_offset
- + (bck_raw_info.ank1.rot_arr_length * 2) > bck_raw_info.ank1.size):
+ if ((bck_raw_info.ank1.rot_arr_offset + (bck_raw_info.ank1.rot_arr_length * 2) > bck_raw_info.ank1.size)
+ or (bck_raw_info.ank1.rot_arr_offset % 4 != 0)):
return bck_raw_error_str + "ank1 rotation array offset"
# translation array offset
bck_raw_info.ank1.transl_arr_offset = struct.unpack(endian_ch + "I", f.read(4))[0]
- if (bck_raw_info.ank1.transl_arr_offset
- + (bck_raw_info.ank1.transl_arr_length * 4) > bck_raw_info.ank1.size):
+ if ((bck_raw_info.ank1.transl_arr_offset + (bck_raw_info.ank1.transl_arr_length * 4) > bck_raw_info.ank1.size)
+ or (bck_raw_info.ank1.transl_arr_offset % 4 != 0)):
return bck_raw_error_str + "ank1 translation array offset"
# refer to the offsets to read the animation data always (SMG does this)
@@ -550,7 +551,7 @@ def check_smg_bck_anim(anim):
# assumes angles are in radians
# and that the timings between the keyframes are t0 = 0 and tf = 1
# (cubic hermite spline)
-def create_smg_bck_raw(anim):
+def create_smg_bck_raw(anim, use_std_pad_size):
# calls check_smg_bck_anim()
result = check_smg_bck_anim(anim)
@@ -716,21 +717,26 @@ def create_smg_bck_raw(anim):
raw.ank1.transl_arr = arr
# assign these variables now
- # dont be crazy with it an assign the data tables to the "standard offsets"
raw.ank1.scale_arr_length = len(raw.ank1.scale_arr)
raw.ank1.rot_arr_length = len(raw.ank1.rot_arr)
raw.ank1.transl_arr_length = len(raw.ank1.transl_arr)
pad_str = smg_common.padding()
- raw.ank1.anim_data_offset = 0x40 # yes
+
+ # decide to use standard/compact padding sizes
+ pad_byte_align = 32
+ raw.ank1.anim_data_offset = 0x40
+ if (use_std_pad_size == False):
+ pad_byte_align = 4
+ raw.ank1.anim_data_offset = 0x24
raw.ank1.scale_arr_offset = raw.ank1.anim_data_offset + (raw.ank1.bone_count * 9 * 6)
- raw.ank1.scale_arr_offset += len(pad_str.string_fill(32, raw.ank1.scale_arr_offset))
+ raw.ank1.scale_arr_offset += len(pad_str.string_fill(pad_byte_align, raw.ank1.scale_arr_offset))
raw.ank1.rot_arr_offset = raw.ank1.scale_arr_offset + (raw.ank1.scale_arr_length * 4)
- raw.ank1.rot_arr_offset += len(pad_str.string_fill(32, raw.ank1.rot_arr_offset))
+ raw.ank1.rot_arr_offset += len(pad_str.string_fill(pad_byte_align, raw.ank1.rot_arr_offset))
raw.ank1.transl_arr_offset = raw.ank1.rot_arr_offset + (raw.ank1.rot_arr_length * 2)
- raw.ank1.transl_arr_offset += len(pad_str.string_fill(32, raw.ank1.transl_arr_offset))
+ raw.ank1.transl_arr_offset += len(pad_str.string_fill(pad_byte_align, raw.ank1.transl_arr_offset))
# section size and file size
raw.ank1.size = raw.ank1.transl_arr_offset + (raw.ank1.transl_arr_length * 4)
- raw.ank1.size += len(pad_str.string_fill(32, raw.ank1.size))
+ raw.ank1.size += len(pad_str.string_fill(pad_byte_align, raw.ank1.size))
raw.header.file_size = 32 + raw.ank1.size
# done!
@@ -766,18 +772,27 @@ def write_smg_bck_raw(raw, filepath, endian_ch):
f.write(struct.pack(endian_ch + "I", raw.ank1.rot_arr_offset))
f.write(struct.pack(endian_ch + "I", raw.ank1.transl_arr_offset))
pad = smg_common.padding()
- f.write(pad.string_fill(32, 0x24))
+ i = 0
+ while (f.tell() - 32 != raw.ank1.anim_data_offset):
+ f.write(pad.string[i].encode("ascii"))
+ i += 1
# anim data
for i in range(raw.ank1.bone_count):
for j in range(9):
f.write(struct.pack(endian_ch + "H", raw.ank1.anim_data[i].comp[j].keyframe_count))
f.write(struct.pack(endian_ch + "H", raw.ank1.anim_data[i].comp[j].anim_data_index))
f.write(struct.pack(endian_ch + "H", raw.ank1.anim_data[i].comp[j].interpolation_mode))
- f.write(pad.string_fill(32, raw.ank1.anim_data_offset + (raw.ank1.bone_count * 9 * 6)))
+ i = 0
+ while (f.tell() - 32 != raw.ank1.scale_arr_offset):
+ f.write(pad.string[i].encode("ascii"))
+ i += 1
# scale array
for i in range(raw.ank1.scale_arr_length):
f.write(struct.pack(endian_ch + "f", raw.ank1.scale_arr[i]))
- f.write(pad.string_fill(32, raw.ank1.scale_arr_offset + (raw.ank1.scale_arr_length * 4)))
+ i = 0
+ while (f.tell() - 32 != raw.ank1.rot_arr_offset):
+ f.write(pad.string[i].encode("ascii"))
+ i += 1
# rotation array
for i in range(raw.ank1.rot_arr_length):
# check if the slope is larger in magnitude than the max representation container
@@ -788,11 +803,17 @@ def write_smg_bck_raw(raw, filepath, endian_ch):
elif (value < -0x7FFF):
value = -0x7FFF
f.write(struct.pack(endian_ch + "h", value))
- f.write(pad.string_fill(32, raw.ank1.rot_arr_offset + (raw.ank1.rot_arr_length * 2)))
+ i = 0
+ while (f.tell() - 32 != raw.ank1.transl_arr_offset):
+ f.write(pad.string[i].encode("ascii"))
+ i += 1
# translation array
for i in range(raw.ank1.transl_arr_length):
f.write(struct.pack(endian_ch + "f", raw.ank1.transl_arr[i]))
- f.write(pad.string_fill(32, raw.ank1.transl_arr_offset + (raw.ank1.transl_arr_length * 4)))
+ i = 0
+ while (f.tell() != raw.header.file_size):
+ f.write(pad.string[i].encode("ascii"))
+ i += 1
# done!
f.close()
diff --git a/bck_import.py b/bck_import.py
index 398f18d..428cae6 100644
--- a/bck_import.py
+++ b/bck_import.py
@@ -160,7 +160,7 @@ def import_bck_func(context, options):
# convert translation values to blenxy's coordinate space
comp_value = bone_anim.comp[j].value[k]
if (j == 2 or j == 5 or j == 8):
- comp_value /= 100
+ comp_value = (comp_value / 100) * options.scale_transl
# create the keyframe
if (bone_anim.comp[j].time[k] == None): # consider 1 keyframe animation tracks
fcurve.keyframe_points.insert(lowest_anim_frame, comp_value)
@@ -179,8 +179,8 @@ def import_bck_func(context, options):
comp_out_slope = bone_anim.comp[j].out_slope[k]
if (j == 2 or j == 5 or j == 8):
if (bone_anim.comp[j].kf_count > 1):
- comp_in_slope = comp_in_slope / 100
- comp_out_slope = comp_out_slope / 100
+ comp_in_slope = (comp_in_slope / 100) * options.scale_transl
+ comp_out_slope = (comp_out_slope / 100) * options.scale_transl
# in slope, assign in-slopes only to keyframes after the first one
if (k != 0):
@@ -286,9 +286,9 @@ def import_bck_func(context, options):
# get the initial animation values
scale = [bone_anim.comp[0].value[0], bone_anim.comp[3].value[0], bone_anim.comp[6].value[0]]
rot = [bone_anim.comp[1].value[0], bone_anim.comp[4].value[0], bone_anim.comp[7].value[0]]
- transl = [bone_anim.comp[2].value[0] / 100,
- bone_anim.comp[5].value[0] / 100,
- bone_anim.comp[8].value[0] / 100]
+ transl = [(bone_anim.comp[2].value[0] / 100) * options.scale_transl,
+ (bone_anim.comp[5].value[0] / 100) * options.scale_transl,
+ (bone_anim.comp[8].value[0] / 100) * options.scale_transl]
# declare the current bone's fcurves list
fcurves = [None, None, None, # scale XYZ
None, None, None, # rot XYZ
@@ -370,7 +370,7 @@ def import_bck_func(context, options):
rot[int((k - 1) / 3)] = result
elif (k == 2 or k == 5 or k == 8): # transl
is_transl_anim = True
- transl[int((k - 2) / 3)] = result / 100
+ transl[int((k - 2) / 3)] = (result / 100) * options.scale_transl
# ~ print("frame S R T: %d %s %s %s" %(j, is_scale_anim, is_rot_anim, is_transl_anim))
# ~ print(scale)
@@ -582,6 +582,12 @@ class import_bck(Operator, ExportHelper):
("SRT", "SRT", "Scaling first, Rotation second, Translation last")
)
)
+ scale_transl = FloatProperty(
+ name = "Scale translation values:",
+ description = "Scale up/down translation values in all animation tracks.",
+ default = 1,
+ min = 10e-9,
+ )
# what the importer actually does
def execute(self, context):
diff --git a/bcsv_editing.py b/bcsv_editing.py
new file mode 100644
index 0000000..8aad5e8
--- /dev/null
+++ b/bcsv_editing.py
@@ -0,0 +1,1391 @@
+# file defining the structures and types
+# going to be used to display BCSV data in blender
+# add descriptions/names to all properties declared
+import bpy, bpy_extras, bpy_types
+import sys, random, io
+from . import blender_funcs
+from . import bcsv_funcs
+from . import file_ops
+
+# https://blenderartists.org/t/property-speed/606970/8
+# Blender's properties are slow to access/modify,
+# need a faster approach to store the BCSV table
+#
+# - Seems like I must store the BCSV table serialized in a custom
+# property as a bytes object (can be saved on the BLEND file).
+# - "Open" the stored table (deserialize it) and store the
+# resulting structure in a defined object property.
+# - Pull a section of the table data into a Blender's
+# properties structure to be able to modify it through UI.
+# - On UI modifications, store the updated values into the deserialized structure.
+# - Serialize ("Save") structure into the custom property. This save option will have to
+# be triggered manually by the user (inneficient if this happens for every little modification).
+# When saving the BLEND file a "Save" will be triggered on all "open" BCSV tables.
+# - "Close" option to release the memory used by the deserialized structure.
+# - Export/Import will also be defined.
+# - A undo/redo buffer for each table will also be defined
+
+# constants
+DEFAULT_COL_COUNT = 7
+DEFAULT_ROW_COUNT = 10
+DEFAULT_VISIBLE_ROWS = 15
+DEFAULT_VISIBLE_COLS = 6
+
+# global variables
+ALLOW_UPDATE_CALLBACKS = True
+
+# set the properties area to be redrawn
+def redraw_properties_area(context):
+ for area in context.screen.areas:
+ if (area.type == "PROPERTIES"):
+ area.tag_redraw()
+
+# called when the type value of a col_info is changed
+def cols_info_type_interface_upd(self, context):
+ global ALLOW_UPDATE_CALLBACKS
+ if (ALLOW_UPDATE_CALLBACKS == False): return
+ table = context.object.smg_bcsv.get_table_from_address()
+ interf = context.object.smg_bcsv.interface
+ # type does not need to be checked but the rows_data
+ # columns will need to be updated on type change
+ interf_col_index = None # get the column index whose type was changed
+ for i in range(interf.visible_number_of_cols):
+ if (self == interf.cols_info[i]):
+ interf_col_index = i
+ break
+ # convert the interface col index to the real table column index
+ table_col_index = interf.col_slider + interf_col_index
+ # in the table check if all the elements on the column are good
+ # if one is not good, reset the whole column values
+ # LONG, LONG_2, SHORT, CHAR
+ python_type = int; def_value = 0
+ if (self.type == "FLOAT"): python_type = float; def_value = 0.0
+ elif (self.type in ["STRING", "STRING_OFFSET"]): python_type = str; def_value = "default"
+ for i in range(table.row_count):
+ if (type(table.rows_data[i][table_col_index]) != python_type):
+ for j in range(table.row_count):
+ table.rows_data[j][table_col_index] = def_value
+ break
+ # change the table col_info type manually
+ table.cols_info[table_col_index].type = self.type
+ # load the table section
+ load_section_table(context)
+
+# update the col_info values when modified
+def cols_info_data_interface_upd(self, context):
+ global ALLOW_UPDATE_CALLBACKS
+ if (ALLOW_UPDATE_CALLBACKS == False): return
+ interf = context.object.smg_bcsv.interface
+ # ensure the correct values are stored in the structure
+ # name or hash
+ try:
+ self.name_or_hash.encode("cp932")
+ if (self.name_or_hash.lower().startswith("0x")):
+ try:
+ num = int(self.name_or_hash, 16)
+ if (num < 0): col_info["name_or_hash"] = "0x00000000"
+ elif (num > 0xFFFFFFFF): col_info["name_or_hash"] = "0xFFFFFFFF"
+ except:
+ blender_funcs.disp_msg("Hash \"%s\" cannot be interpreted as hex string" % self.name_or_hash[ : 9])
+ except:
+ blender_funcs.disp_msg("Name or hash \"%s\" is not CP932 encodable" % self.name_or_hash[ : 9])
+ col_info["name_or_hash"] = "default"
+ # bitmask
+ try:
+ int(self.bitmask, 16)
+ if (self.bitmask.lower().startswith("0x")):
+ col_info["bitmask"] = self.bitmask[2 : ].upper()
+ except:
+ blender_funcs.disp_msg("Bitmask \"%s\" cannot be interpreted as hex string" % self.bitmask)
+ col_info["bitmask"] = "FFFFFFFF"
+ # rshift does not need to be checked
+ # type will be checked with another function
+ # load the changes into the bcsv table
+ apply_interf_to_table(context)
+ bpy.ops.ed.undo_push()
+
+# interface for cols_info data
+class smg_cols_info_interface(bpy.types.PropertyGroup):
+ name_or_hash = bpy.props.StringProperty(
+ name = "Name or hash",
+ description = "BCSVs hash their column name identifier. If the name is known, it will be printed, if not, the hash value (as a string) will be displayed",
+ default = "default",
+ update = cols_info_data_interface_upd
+ )
+ bitmask = bpy.props.StringProperty(
+ name = "Bitmask",
+ description = "Bitmask used when pulling the encoded column data from a BCSV. After this bitmask, a right shift is done.",
+ default = "FFFFFFFF",
+ maxlen = 8,
+ update = cols_info_data_interface_upd
+ )
+ rshift = bpy.props.IntProperty(
+ name = "Right shift",
+ description = "Right shift used after bitmasking the encoded value from a BCSV. The value after the right shift will be the actual value wanted to be stored",
+ min = 0,
+ max = 255,
+ default = 0,
+ update = cols_info_data_interface_upd
+ )
+ type = bpy.props.EnumProperty(
+ name = "Data type",
+ description = "BCSV column data type",
+ items = (
+ ("LONG", "Long", "Long integer type (4 bytes long)."),
+ ("STRING", "String", "String type (32 bytes long)."),
+ ("FLOAT", "Float", "Float type (4 bytes long)"),
+ ("LONG_2", "Long 2", "Long 2 integer type (4 bytes long)."),
+ ("SHORT", "Short", "Short integer type (2 bytes long)."),
+ ("CHAR", "Char", "Char integer type (1 byte long)."),
+ ("STRING_OFFSET", "String offset", "String offset type (4 bytes long)."),
+ ),
+ update = cols_info_type_interface_upd
+ )
+
+# update string/string_offset values when modified
+def rows_data_cell_interface_upd(self, context):
+ global ALLOW_UPDATE_CALLBACKS
+ if (ALLOW_UPDATE_CALLBACKS == False): return
+ # STRING/STRING_OFFSET must be CP932 encodable
+ # STRING encoded size must be 32 bytes or less
+ try:
+ enc = self.string.encode("CP932")
+ if (len(enc) >= 32):
+ blender_funcs.disp_msg("STRING type \"%s\" encoded type is larger than 31 bytes" % (self.string[ : 9]))
+ self["string"] = "default"
+ except:
+ blender_funcs.disp_msg("STRING type \"%s\" is not CP932 encodable" % (self.string[ : 9]))
+ self["string"] = "default"
+ try:
+ self.string_offset.encode("CP932")
+ except:
+ blender_funcs.disp_msg("STRING_OFFSET type \"%s\" is not CP932 encodable" % (self.string_offset[ : 9]))
+ self["string_offset"] = "default"
+ # load the changes into the bcsv table
+ apply_interf_to_table(context)
+ bpy.ops.ed.undo_push()
+
+# interface for rows_data rows
+class smg_rows_data_cell_interface(bpy.types.PropertyGroup):
+ char = bpy.props.IntProperty(
+ name = "CHAR",
+ description = "Value to be stored in a BCSV table if the column type is a CHAR type",
+ default = 0,
+ min = -128,
+ max = 127,
+ update = rows_data_cell_interface_upd
+ )
+ short = bpy.props.IntProperty(
+ name = "SHORT",
+ description = "Value to be stored in a BCSV table if the column type is a SHORT type",
+ default = 0,
+ min = -32768,
+ max = 32767,
+ update = rows_data_cell_interface_upd
+ )
+ long = bpy.props.IntProperty(
+ name = "LONG/LONG_2",
+ description = "Value to be stored in a BCSV table if the column type is a LONG/LONG_2 type",
+ default = 0,
+ min = -2147483648,
+ max = 2147483647,
+ update = rows_data_cell_interface_upd
+ )
+ float = bpy.props.FloatProperty(
+ name = "FLOAT",
+ description = "Value to be stored in a BCSV table if the column type is a FLOAT type",
+ default = 0.0,
+ update = rows_data_cell_interface_upd
+ )
+ string = bpy.props.StringProperty(
+ name = "STRING",
+ description = "Value to be stored in a BCSV table if the column type is a STRING type",
+ default = "default",
+ update = rows_data_cell_interface_upd
+ )
+ string_offset = bpy.props.StringProperty(
+ name = "STRING_OFFSET",
+ description = "Value to be stored in a BCSV table if the column type is a STRING_OFFSET type",
+ default = "default",
+ update = rows_data_cell_interface_upd
+ )
+
+# interface for rows_data
+class smg_rows_data_interface(bpy.types.PropertyGroup):
+ cells = bpy.props.CollectionProperty(
+ name = "Row cells",
+ description = "The cells in a row of a BCSV table",
+ type = smg_rows_data_cell_interface
+ )
+
+# function to load a section of the table into the table interface structure
+# assumes the interface data and the bcsv table are compatible
+def load_section_table(context):
+ # check params
+ interf = context.object.smg_bcsv.interface
+ table = context.object.smg_bcsv.get_table_from_address()
+
+ # disable some function callbacks
+ global ALLOW_UPDATE_CALLBACKS; ALLOW_UPDATE_CALLBACKS = False
+ # cols_info
+ interf.cols_info.clear()
+ for i in range(interf.col_slider, interf.col_slider + interf.visible_number_of_cols):
+ interf.cols_info.add()
+ interf.cols_info[-1].name_or_hash = table.cols_info[i].name_or_hash
+ interf.cols_info[-1].bitmask = hex(table.cols_info[i].bitmask)[2 : ].upper()
+ interf.cols_info[-1].rshift = table.cols_info[i].rshift
+ interf.cols_info[-1].type = table.cols_info[i].type
+ # rows_data
+ interf.rows_data.clear()
+ for i in range(interf.visible_number_of_rows):
+ interf.rows_data.add()
+ for i in range(interf.col_slider, interf.col_slider + interf.visible_number_of_cols):
+ attr = None
+ if (table.cols_info[i].type in ["LONG", "LONG_2"]): attr = "long"
+ elif (table.cols_info[i].type == "SHORT"): attr = "short"
+ elif (table.cols_info[i].type == "CHAR"): attr = "char"
+ elif (table.cols_info[i].type == "FLOAT"): attr = "float"
+ elif (table.cols_info[i].type == "STRING"): attr = "string"
+ elif (table.cols_info[i].type == "STRING_OFFSET"): attr = "string_offset"
+ for j in range(interf.row_slider, interf.row_slider + interf.visible_number_of_rows):
+ interf.rows_data[j - interf.row_slider].cells.add()
+ setattr(interf.rows_data[j - interf.row_slider].cells[-1], attr, table.rows_data[j][i])
+
+ # enable the disabled function callbacks
+ ALLOW_UPDATE_CALLBACKS = True
+ bpy.ops.ed.undo_push()
+
+# function to be called manually to apply the bcsv
+# interface data to the deserialized bcsv data table
+def apply_interf_to_table(context):
+ # save the section of the table shown in UI into the table in the bcsv table
+ table = context.object.smg_bcsv.get_table_from_address()
+ interf = context.object.smg_bcsv.interface
+
+ # cols_info
+ for i in range(interf.col_slider, interf.col_slider + interf.visible_number_of_cols):
+ self_index = i - interf.col_slider
+ table.cols_info[i].name_or_hash = interf.cols_info[self_index].name_or_hash
+ table.cols_info[i].bitmask = int(interf.cols_info[self_index].bitmask, 16)
+ table.cols_info[i].rshift = interf.cols_info[self_index].rshift
+ # check if the type changed (need to update the whole table column)
+ if (table.cols_info[i].type != interf.cols_info[self_index].type):
+ def_value = 0 # LONG, LONG_2, SHORT, CHAR
+ if (interf.cols_info[self_index] == "FLOAT"):
+ def_value = 0.0
+ elif (interf.cols_info[self_index] in ["STRING", "STRING_OFFSET"]):
+ def_value = "default"
+ # update the table column
+ for j in range(table.row_count):
+ table.rows_data[j][i] = def_value
+ # update the type
+ table.cols_info[i].type = interf.cols_info[self_index].type
+
+ # rows_data (update all UI visible values)
+ for i in range(interf.col_slider, interf.col_slider + interf.visible_number_of_cols):
+ to_eval = "interf.rows_data[%d].cells[%d]"
+ self_col_index = i - interf.col_slider
+ if (interf.cols_info[self_col_index].type in ["LONG", "LONG_2"]): to_eval += ".long"
+ elif (interf.cols_info[self_col_index].type == "SHORT"): to_eval += ".short"
+ elif (interf.cols_info[self_col_index].type == "CHAR"): to_eval += ".char"
+ elif (interf.cols_info[self_col_index].type == "FLOAT"): to_eval += ".float"
+ elif (interf.cols_info[self_col_index].type == "STRING"): to_eval += ".string"
+ elif (interf.cols_info[self_col_index].type == "STRING_OFFSET"): to_eval += ".string_offset"
+ for j in range(interf.row_slider, interf.row_slider + interf.visible_number_of_rows):
+ table.rows_data[j][i] = eval(to_eval % (j - interf.row_slider, self_col_index))
+
+ # push an undo
+ bpy.ops.ed.undo_push()
+
+# assign a custom property directly or assign the attribute
+# function to help the function assignments below
+def try_assign_custom_prop(obj, data_path, value):
+ if (data_path in obj): obj[data_path] = value
+ else: setattr(obj, data_path, value)
+
+# sync all the row/col related values from the bcsv interface with the bcsv table
+def interf_row_col_data_upd(self, context, data_path):
+ global ALLOW_UPDATE_CALLBACKS
+ if (ALLOW_UPDATE_CALLBACKS == False): return
+ # table and interface
+ table = context.object.smg_bcsv.get_table_from_address()
+ interf = context.object.smg_bcsv.interface
+ # decide whether to update or not the BCSV interface
+ update_interf = False
+
+ # row/column count
+ if (data_path in ["row_count", "col_count",
+ "row_slider", "visible_number_of_rows",
+ "col_slider", "visible_number_of_cols"]):
+ try_assign_custom_prop(self, "row_count", table.row_count)
+ try_assign_custom_prop(self, "col_count", table.col_count)
+ # row slider, visible number of rows
+ if (data_path in ["row_count", "row_slider", "visible_number_of_rows"]):
+ if (self.row_slider + self.visible_number_of_rows > table.row_count):
+ row_slider_tmp = self.row_slider
+ visible_number_of_rows_tmp = self.visible_number_of_rows
+ while (row_slider_tmp + visible_number_of_rows_tmp > table.row_count):
+ if (row_slider_tmp > 0):
+ row_slider_tmp -= 1
+ elif (visible_number_of_rows_tmp > 0):
+ visible_number_of_rows_tmp -= 1
+ try_assign_custom_prop(self, "row_slider", row_slider_tmp)
+ try_assign_custom_prop(self, "visible_number_of_rows", visible_number_of_rows_tmp)
+ # interface must update now
+ update_interf = True
+ # column slider, visible number of columns
+ elif (data_path in ["col_count", "col_slider", "visible_number_of_cols"]):
+ if (self.col_slider + self.visible_number_of_cols > table.col_count):
+ col_slider_tmp = self.col_slider
+ visible_number_of_cols_tmp = self.visible_number_of_cols
+ while (col_slider_tmp + visible_number_of_cols_tmp > table.col_count):
+ if (col_slider_tmp > 0):
+ col_slider_tmp -= 1
+ elif (visible_number_of_cols_tmp > 0):
+ visible_number_of_cols_tmp -= 1
+ try_assign_custom_prop(self, "col_slider", col_slider_tmp)
+ try_assign_custom_prop(self, "visible_number_of_cols", visible_number_of_cols_tmp)
+ # interface must update now
+ update_interf = True
+ # active row index
+ elif (data_path == "active_row_index"):
+ if (self.active_row_index[0] > table.row_count): self["active_row_index"][0] = table.row_count
+ if (self.active_row_index[1] >= table.row_count): self["active_row_index"][1] = table.row_count - 1
+ # active column index
+ elif (data_path == "active_col_index"):
+ if (self.active_col_index[0] > table.col_count): self["active_col_index"][0] = table.col_count
+ if (self.active_col_index[1] >= table.col_count): self["active_col_index"][1] = table.col_count - 1
+
+ # update the bcsv interface?
+ if (update_interf):
+ load_section_table(context)
+ bpy.ops.ed.undo_push()
+
+# structure to serve as an interface to the actual bcsv table data stored in a custom property
+class smg_bcsv_table_interface(bpy.types.PropertyGroup):
+ # smg bcsv table interface
+ row_count = bpy.props.IntProperty(
+ name = "Number of rows",
+ description = "Number of rows in the BCSV table",
+ min = 0,
+ default = 0,
+ update = (lambda self, context: interf_row_col_data_upd(self, context, "row_count"))
+ )
+ col_count = bpy.props.IntProperty(
+ name = "Number of columns",
+ description = "Number of columns in the BCSV table",
+ min = 0,
+ default = 0,
+ update = (lambda self, context: interf_row_col_data_upd(self, context, "col_count"))
+ )
+ cols_info = bpy.props.CollectionProperty(
+ name = "Columns information",
+ description = "Array with the variables that characterize the columns of a BCSV",
+ type = smg_cols_info_interface
+ )
+ rows_data = bpy.props.CollectionProperty(
+ name = "Rows data",
+ description = "2D array with the data going to be stored into a BCSV table.",
+ type = smg_rows_data_interface
+ )
+
+ # blender specific data
+ show_col_info = bpy.props.BoolProperty(
+ name = "Show column info",
+ description = "Show the BCSV column information in the UI",
+ default = False
+ )
+ visible_number_of_rows = bpy.props.IntProperty(
+ name = "Visible number of rows",
+ description = "Max number of BCSV rows to be drawn in the UI",
+ min = 0,
+ default = 0,
+ update = (lambda self, context: interf_row_col_data_upd(self, context, "visible_number_of_rows"))
+ )
+ visible_number_of_cols = bpy.props.IntProperty(
+ name = "Visible number of columns",
+ description = "Max number of BCSV columns to be drawn in the UI",
+ min = 0,
+ default = 0,
+ update = (lambda self, context: interf_row_col_data_upd(self, context, "visible_number_of_cols"))
+ )
+ row_slider = bpy.props.IntProperty(
+ name = "Row slider",
+ description = "Slider to be used as a scroller for rows to be able to navigate the BCSV table (Ctrl + scroll over the property)",
+ min = 0,
+ default = 0,
+ update = (lambda self, context: interf_row_col_data_upd(self, context, "row_slider"))
+ )
+ col_slider = bpy.props.IntProperty(
+ name = "Column slider",
+ description = "Slider to be used as a scroller for columns to be able to navigate the BCSV table (Ctrl + scroll over the property)",
+ min = 0,
+ default = 0,
+ update = (lambda self, context: interf_row_col_data_upd(self, context, "col_slider"))
+ )
+ active_row_index = bpy.props.IntVectorProperty(
+ name = "Active row index",
+ description = "Value used to specify an operation with the row associated with the index (insert / move / remove). Value at index 0 will be used for inserting/removing a row at the specified index. On moving, value at index 0 represents the current index row to move and value at index 1 represents the new row index position",
+ default = (0, 0),
+ size = 2,
+ min = 0,
+ update = (lambda self, context: interf_row_col_data_upd(self, context, "active_row_index"))
+ )
+ active_col_index = bpy.props.IntVectorProperty(
+ name = "Active column index",
+ description = "Value used to specify an operation with the column associated with the index (insert / move / remove). Value at index 0 will be used for inserting/removing a column at the specified index. On moving, value at index 0 represents the current index column to move and value at index 1 represents the new column index position",
+ default = (0, 0),
+ size = 2,
+ min = 0,
+ update = (lambda self, context: interf_row_col_data_upd(self, context, "active_col_index"))
+ )
+ hash_generator_str = bpy.props.StringProperty(
+ name = "Hash generator string",
+ description = "String variable to be used with the hash generator",
+ default = "",
+ update = (lambda self, context: undo_push_wrapper())
+ )
+
+# ~ # undo/redo list
+# ~ # single operations:
+# ~ # insert/move/remove a row/col at a certain index
+# ~ # change a cell value rows_data/cols_info
+# ~ # change a cols_info[index].type value (can change all values of the respective column)
+# ~ #
+# ~ # check bcsv_funcs.py to see the operation definitions.
+# ~ #
+# ~ # when doing undos, the redos will be kept only if no new operations are added
+# ~ # doing something different will erase the following redos (it is the most logical thing to do)
+# ~ # (imagine keeping the undo/redo branches)
+
+# ~ DEFAULT_UNDO_REDO_BUFFER_SIZE = 25
+
+# BCSV table buffer
+class table_info:
+ def __init__(self):
+ self.table = None
+ self.undo_redo_list = None
+
+BCSV_TABLE_BUFFER_LENGTH = 25
+BCSV_TABLE_BUFFER = []
+
+# add a table to the buffer
+def add_table_to_buffer(table):
+ if ("all good" not in bcsv_funcs.check_smg_bcsv_table(table)): return
+ global BCSV_TABLE_BUFFER
+ new_table_info = table_info()
+ new_table_info.table = table
+ new_table_info.undo_redo_list = []
+ BCSV_TABLE_BUFFER.append(new_table_info)
+ while (len(BCSV_TABLE_BUFFER) > BCSV_TABLE_BUFFER_LENGTH):
+ BCSV_TABLE_BUFFER.remove(BCSV_TABLE_BUFFER[0])
+
+# function to get the "open" bcsv table in buffer
+def get_table_from_address(self):
+ # check params
+ if (type(self) != smg_bcsv): return None
+ global BCSV_TABLE_BUFFER
+ for table_info in BCSV_TABLE_BUFFER:
+ if (id(table_info.table) == int(self.table_address, 16)):
+ return table_info.table
+ # nothing was found
+ return None
+
+# function to get the "open" bcsv table in buffer
+def remove_table_from_buffer(address):
+ global BCSV_TABLE_BUFFER
+ for table_info in BCSV_TABLE_BUFFER:
+ if (id(table_info.table) == address):
+ BCSV_TABLE_BUFFER.remove(table_info)
+ break
+
+# when the table address is modified
+def table_address_upd(self, context):
+ try:
+ int(self.table_address, 16)
+ except:
+ self["table_address"] = "0xABCDEF"
+
+# wrapper for undo_push()
+def undo_push_wrapper():
+ bpy.ops.ed.undo_push()
+
+# structure to store all the information needed for
+# BCSV table to be useful into blender
+class smg_bcsv(bpy.types.PropertyGroup):
+ # the table interface
+ interface = bpy.props.PointerProperty(
+ name = "SMG BCSV Table Interface",
+ description = "Interface used to modify the SMG BCSV table through Blender's UI",
+ type = smg_bcsv_table_interface
+ )
+
+ # hex string of the address of the table
+ table_address = bpy.props.StringProperty(
+ name = "SMG BCSV table address string",
+ description = "Hex string address of the deserialized SMG BCSV table",
+ default = "0xABCDEF",
+ update = table_address_upd
+ )
+
+ # storage for when serializing the table
+ table_raw = bpy.props.StringProperty(
+ name = "Raw SMG BCSV table",
+ description = "Serialized BCSV table storage",
+ subtype = "BYTE_STRING",
+ default = "",
+ update = (lambda self, context: undo_push_wrapper())
+ )
+
+ # function to get the "opened" table of an object
+ get_table_from_address = (lambda self: get_table_from_address(self))
+
+ # undo/redo buffer size
+ bcsv_table_buffer_size = bpy.props.IntProperty(
+ name = "BCSV table buffer size",
+ description = "Size of the buffer with \"open\" smg BCSV tables",
+ default = BCSV_TABLE_BUFFER_LENGTH,
+ min = 1
+ )
+
+# Save a BCSV table into an object
+class DATA_OT_smg_bcsv_table_save(bpy.types.Operator):
+ """Save BCSV table into an object"""
+ bl_idname = "object.smg_bcsv_table_save"
+ bl_label = "Save BCSV table"
+
+ # save if an object is active/selected
+ # and it has valid deserialized table data
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ if (obj == None): return False
+ if (obj.smg_bcsv.get_table_from_address() == None): return False
+ return True
+
+ # what the operator does
+ def execute(self, context):
+ # apply interface values to table
+ apply_interf_to_table(context)
+ # serialize the table (big endian) and store it into the object's custom property slot
+ obj = context.object
+ raw = bcsv_funcs.create_smg_bcsv_raw(obj.smg_bcsv.get_table_from_address(), ">", False)
+ if (raw != None):
+ obj.smg_bcsv.table_raw = bcsv_funcs.write_smg_bcsv_raw(raw, None)
+ return {"FINISHED"}
+
+# Open a BCSV table stored in an object
+class DATA_OT_smg_bcsv_table_open(bpy.types.Operator):
+ """Open a BCSV table stored in an object"""
+ bl_idname = "object.smg_bcsv_table_open"
+ bl_label = "Open BCSV table"
+
+ # if an object is active/selected
+ # it has a smg_bcsv_table custom property
+ # it has a table loaded in the buffer
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ if (obj == None): return False
+ stream = io.BytesIO(obj.smg_bcsv.table_raw)
+ if ("all good" not in bcsv_funcs.check_bcsv_file(stream, ">")): return False
+ if (obj.smg_bcsv.get_table_from_address() != None): return False
+ return True
+
+ # what the operator does
+ def execute(self, context):
+ # check if the serialized struct is valid
+ obj = context.object
+ table = bcsv_funcs.read_bcsv_file(io.BytesIO(obj.smg_bcsv.table_raw), "BIG")
+ if (type(table) != bcsv_funcs.smg_bcsv_table):
+ blender_funcs.disp_msg("Cannot open BCSV table, it is malformed. Removing it.")
+ obj.smg_bcsv.table_raw = bytes()
+ return {"FINISHED"}
+
+ # load the deserialized table data into the buffer
+ add_table_to_buffer(table)
+ obj.smg_bcsv.table_address = hex(id(table))
+
+ # assign the respective values to the smg_bcsv_table_interface
+ # to be able to see the table through the UI
+ interf = obj.smg_bcsv.interface
+ interf.update_buffer_table = False # disable table updating
+ interf.row_count = table.row_count
+ interf.col_count = table.col_count
+ interf.show_col_info = False
+ interf.visible_number_of_rows = (DEFAULT_VISIBLE_ROWS
+ if (table.row_count > DEFAULT_VISIBLE_ROWS)
+ else table.row_count)
+ interf.visible_number_of_cols = (DEFAULT_VISIBLE_COLS
+ if (table.col_count > DEFAULT_VISIBLE_COLS)
+ else table.col_count)
+ interf.row_slider = 0
+ interf.col_slider = 0
+ interf.active_row_index = (0, 0)
+ interf.active_col_index = (0, 0)
+ load_section_table(context)
+ return {"FINISHED"}
+
+# Close a BCSV table from an object (does not save it)
+class DATA_OT_smg_bcsv_table_close(bpy.types.Operator):
+ """Close a BCSV table from an object (does not save it)"""
+ bl_idname = "object.smg_bcsv_table_close"
+ bl_label = "Close BCSV table"
+
+ # if an object is active/selected
+ # it has a valid deserialized smg bcsv table
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ if (obj == None): return False
+ if (obj.smg_bcsv.get_table_from_address() == None): return False
+ return True
+
+ # what the operator does
+ def execute(self, context):
+ obj = context.object
+ remove_table_from_buffer(int(obj.smg_bcsv.table_address, 16))
+ return {"FINISHED"}
+
+# create a BCSV table for an object in blender (overrides previously stored table)
+class DATA_OT_smg_bcsv_table_create(bpy.types.Operator):
+ """Create a new BCSV table from scratch"""
+ bl_idname = "object.smg_bcsv_table_create"
+ bl_label = "Create BCSV table"
+
+ # variables exclusive to the operator
+ row_count = bpy.props.IntProperty(
+ name = "Number of rows",
+ description = "Number of rows in the BCSV table",
+ default = DEFAULT_ROW_COUNT,
+ min = 0
+ )
+ col_count = bpy.props.IntProperty(
+ name = "Number of columns",
+ description = "Number of columns in the BCSV table",
+ default = DEFAULT_COL_COUNT,
+ min = 0
+ )
+
+ # create if an object is active/selected
+ @classmethod
+ def poll(cls, context):
+ if (context.object == None): return False
+ return True
+
+ # what will be draw by the operator on the floating window
+ def draw(self, context):
+ layout = self.layout
+ layout.prop(self, "row_count", text = "Row count")
+ layout.prop(self, "col_count", text = "Column count")
+
+ # don't exactly know why this is needed but it is needed
+ def invoke(self, context, event):
+ return context.window_manager.invoke_props_dialog(self)
+
+ # what the operator does
+ def execute(self, context):
+ # create save and open a "default" BCSV table
+ new = bcsv_funcs.smg_bcsv_table()
+ new.row_count = self.row_count
+ new.col_count = self.col_count
+ for i in range(new.col_count):
+ new.cols_info.append(bcsv_funcs.smg_bcsv_table.cols_info())
+ new.cols_info[-1].name_or_hash = "default"
+ new.cols_info[-1].bitmask = 0xFFFFFFFF
+ new.cols_info[-1].rshift = 0
+ new.cols_info[-1].type = "LONG"
+ for i in range(new.row_count):
+ new.rows_data.append([])
+ for j in range(new.col_count):
+ new.rows_data[-1].append(0)
+ # load the serialized/deserialized data
+ add_table_to_buffer(new)
+ context.object.smg_bcsv.table_address = hex(id(new))
+ raw = bcsv_funcs.create_smg_bcsv_raw(new, ">", False)
+ context.object.smg_bcsv.table_raw = bcsv_funcs.write_smg_bcsv_raw(raw, None)
+ bpy.ops.ed.undo_push()
+ # disable table updating
+ global ALLOW_UPDATE_CALLBACKS; ALLOW_UPDATE_CALLBACKS = False
+ interf = context.object.smg_bcsv.interface
+ interf.row_count = new.row_count
+ interf.col_count = new.col_count
+ interf.visible_number_of_rows = (DEFAULT_VISIBLE_ROWS
+ if (new.row_count > DEFAULT_VISIBLE_ROWS)
+ else new.row_count)
+ interf.visible_number_of_cols = (DEFAULT_VISIBLE_COLS
+ if (new.col_count > DEFAULT_VISIBLE_COLS)
+ else new.col_count)
+ interf.active_row_index = (0, 0)
+ interf.active_col_index = (0, 0)
+ # enable table updating
+ ALLOW_UPDATE_CALLBACKS = True
+ load_section_table(context)
+ return {"FINISHED"}
+
+# Remove all BCSV table data from an object
+class DATA_OT_smg_bcsv_table_remove(bpy.types.Operator):
+ """Remove a BCSV table from an object"""
+ bl_idname = "object.smg_bcsv_table_remove"
+ bl_label = "Remove BCSV table"
+
+ # if an object is active/selected
+ # it has a smg_bcsv_table custom property or
+ # it has valid deserialized table data loaded
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ if (obj == None): return False
+ return True
+
+ # what the operator does
+ def execute(self, context):
+ # remove whatever the object has completely
+ obj = context.object
+ obj.smg_bcsv.table_raw = bytes()
+ remove_table_from_buffer(int(obj.smg_bcsv.table_address, 16))
+ return {"FINISHED"}
+
+# import a BCSV table data
+class DATA_OT_smg_bcsv_table_import(bpy.types.Operator, bpy_extras.io_utils.ImportHelper):
+ """Import a BCSV table into the object's data"""
+ bl_idname = "object.smg_bcsv_table_import"
+ bl_label = "Import BCSV table"
+
+ # importer options
+ endian_mode = bpy.props.EnumProperty(
+ name = "Endian mode",
+ description = "Specify the Endianness of the file",
+ default = "BIG",
+ items = (
+ ("BIG", "Big", "Read the file as Big Endian"),
+ ("LITTLE", "Little", "Read the file as Big Endian"),
+ ("AUTO", "Auto-detect", "Read the file as Big endian first and if it fails, try reading it as Little Endian")
+ )
+ )
+
+ # create if an object is active/selected
+ @classmethod
+ def poll(cls, context):
+ if (context.object == None): return False
+ return True
+
+ # what the operator does
+ def execute(self, context):
+ # try importing the BCSV table
+ result = bcsv_funcs.read_bcsv_file(self.properties.filepath, self.endian_mode)
+ # check how the reading went
+ if (type(result) == str):
+ blender_funcs.disp_msg(result)
+ return {"FINISHED"}
+
+ # append the table to the buffer and save it
+ obj = context.object
+ add_table_to_buffer(result)
+ obj.smg_bcsv.table_address = hex(id(result))
+
+ # assign the respective values to the smg_bcsv_table_interface
+ # to be able to see the table through the UI
+ interf = obj.smg_bcsv.interface
+ # disable some callbacks
+ global ALLOW_UPDATE_CALLBACKS; ALLOW_UPDATE_CALLBACKS = False
+ interf.row_slider = 0
+ interf.col_slider = 0
+ interf.row_count = result.row_count
+ interf.col_count = result.col_count
+ interf.show_col_info = False
+ interf.visible_number_of_rows = (DEFAULT_VISIBLE_ROWS
+ if (result.row_count > DEFAULT_VISIBLE_ROWS)
+ else result.row_count)
+ interf.visible_number_of_cols = (DEFAULT_VISIBLE_COLS
+ if (result.col_count > DEFAULT_VISIBLE_COLS)
+ else result.col_count)
+ interf.active_row_index = (0, 0)
+ interf.active_col_index = (0, 0)
+ # table can update now
+ ALLOW_UPDATE_CALLBACKS = True
+ # load the default section of the table
+ load_section_table(context)
+ # save the table
+ bpy.ops.object.smg_bcsv_table_save()
+ return {'FINISHED'}
+
+# export the data from a deserialized bcsv table
+class DATA_OT_smg_bcsv_table_export(bpy.types.Operator, bpy_extras.io_utils.ExportHelper):
+ """Save the BCSV table from the object's data into a file (auto-saves the table)"""
+ bl_idname = "object.smg_bcsv_table_export"
+ bl_label = "Export BCSV table"
+
+ filename_ext = ""
+ filter_glob = bpy.props.StringProperty(default = "*", options = {"HIDDEN"}, maxlen = 255)
+
+ # exporter options
+ endian = bpy.props.EnumProperty(
+ name = "Endian",
+ description = "Way in which the table data will be written",
+ default = "BIG",
+ items = (
+ ("BIG", "Big", "Write data in the big endian byte ordering"),
+ ("LITTLE", "Little", "Write data in the little endian byte ordering")
+ )
+ )
+ use_std_pad_size = bpy.props.BoolProperty(
+ name = "Use standard padding sizes",
+ description = "Use the usual padding sizes when making the BCSV file",
+ default = True
+ )
+
+ # object is selected
+ # it has a valid deserialized BCSV table
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ if (obj == None): return False
+ if (obj.smg_bcsv.get_table_from_address() == None): return False
+ return True
+
+ # what the importer actually does
+ def execute(self, context):
+ # save the table first
+ bpy.ops.object.smg_bcsv_table_save()
+ # create a raw BCSV struct with bcsv_funcs.create_smg_bcsv_raw()
+ obj = context.object
+ endian_ch = ">" if (self.endian == "BIG") else "<"
+ raw = bcsv_funcs.create_smg_bcsv_raw(obj.smg_bcsv.get_table_from_address(), endian_ch, self.use_std_pad_size)
+ # then write it to a real file with bcsv_funcs.write_smg_bcsv_raw(()
+ bcsv_funcs.write_smg_bcsv_raw(raw, self.filepath)
+ blender_funcs.disp_msg("BCSV file \"%s\" written." % (file_ops.get_file_name(self.filepath)))
+ return {"FINISHED"}
+
+# insert a row in a BCSV table
+class DATA_OT_smg_bcsv_table_insert_row(bpy.types.Operator):
+ """Insert a row in a BCSV table"""
+ bl_idname = "object.smg_bcsv_table_insert_row"
+ bl_label = "Insert row at active index"
+
+ # object is selected
+ # it has a valid deserialized BCSV table
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ if (obj == None): return False
+ if (obj.smg_bcsv.get_table_from_address() == None): return False
+ return True
+
+ # what the operator does
+ def execute(self, context):
+ # get all the variables
+ obj = context.object
+ table = obj.smg_bcsv.get_table_from_address()
+ interf = obj.smg_bcsv.interface
+
+ # insert a new row on the table at the specified index
+ row_to_insert_values = []
+ for i in range(table.col_count):
+ value = 0 # LONG, LONG_2, SHORT, CHAR
+ if (table.cols_info[i].type == "FLOAT"): value = 0.0
+ elif (table.cols_info[i].type in ["STRING", "STRING_OFFSET"]): value = "default"
+ row_to_insert_values.append(value)
+ bcsv_funcs.exec_table_cmd(table, "INSERT", "ROW", [interf.active_row_index[0], row_to_insert_values])
+ # trigger a load section table
+ interf.row_count = table.row_count
+
+ # done!
+ print("Row inserted at index %s" % (interf.active_row_index[0]))
+ return {"FINISHED"}
+
+# remove a row in a BCSV table
+class DATA_OT_smg_bcsv_table_remove_row(bpy.types.Operator):
+ """Remove a row from a BCSV table"""
+ bl_idname = "object.smg_bcsv_table_remove_row"
+ bl_label = "Remove row at active index"
+
+ # object is selected
+ # it has a valid deserialized BCSV table
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ if (obj == None): return False
+ if (obj.smg_bcsv.get_table_from_address() == None): return False
+ return True
+
+ # what the operator does
+ def execute(self, context):
+ obj = context.object
+ table = obj.smg_bcsv.get_table_from_address()
+ interf = obj.smg_bcsv.interface
+ if (interf.row_count == 0):
+ blender_funcs.disp_msg("Cannot remove more rows on a 0 row BCSV table")
+ return {"FINISHED"}
+
+ # get the real index to be removed
+ row_to_remove_index = interf.active_row_index[0]
+ if (row_to_remove_index >= interf.row_count):
+ row_to_remove_index = interf.row_count - 1
+ interf.active_row_index[0] = row_to_remove_index
+ # remove the row at the index
+ row_to_remove_values = table.rows_data[row_to_remove_index]
+ bcsv_funcs.exec_table_cmd(table, "REMOVE", "ROW", [row_to_remove_index, row_to_remove_values])
+ # trigger a load table
+ interf.row_slider = interf.row_slider
+
+ # done!
+ print("Row at index %s removed" % (row_to_remove_index))
+ return {"FINISHED"}
+
+# move a row in a BCSV table
+class DATA_OT_smg_bcsv_table_move_row(bpy.types.Operator):
+ """Move a row in a BCSV table to another position"""
+ bl_idname = "object.smg_bcsv_table_move_row"
+ bl_label = "Move row at active index"
+
+ # object is selected
+ # it has a valid deserialized BCSV table
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ if (obj == None): return False
+ if (obj.smg_bcsv.get_table_from_address() == None): return False
+ return True
+
+ # what the operator does
+ def execute(self, context):
+ obj = context.object
+ table = obj.smg_bcsv.get_table_from_address()
+ interf = obj.smg_bcsv.interface
+ if (interf.row_count == 0):
+ blender_funcs.disp_msg("Cannot move rows on a 0 row BCSV table")
+ return {"FINISHED"}
+
+ # get the indexes
+ old_index = interf.active_row_index[0]
+ new_index = interf.active_row_index[1]
+ if (old_index >= interf.row_count):
+ old_index = interf.row_count - 1
+ interf.active_row_index[0] = old_index
+ # move the specified row
+ bcsv_funcs.exec_table_cmd(table, "MOVE", "ROW", [old_index, new_index])
+ # trigger a load table
+ load_section_table(context)
+
+ # done!
+ print("Row at index %s moved to index %s" % (old_index, new_index))
+ return {"FINISHED"}
+
+# insert a column in a BCSV table
+class DATA_OT_smg_bcsv_table_insert_col(bpy.types.Operator):
+ """Insert a column in a BCSV table"""
+ bl_idname = "object.smg_bcsv_table_insert_col"
+ bl_label = "Insert column at active index"
+
+ # object is selected
+ # it has a valid deserialized BCSV table
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ if (obj == None): return False
+ if (obj.smg_bcsv.get_table_from_address() == None): return False
+ return True
+
+ # what the operator does
+ def execute(self, context):
+ # get all the variables
+ obj = context.object
+ table = obj.smg_bcsv.get_table_from_address()
+ interf = obj.smg_bcsv.interface
+
+ # insert a new col on the table at the specified index
+ insert_index = interf.active_col_index[0]
+ col_info_values = ["default", 0xFFFFFFFF, 0, "LONG"]
+ col_to_insert_values = [0 for i in range(table.row_count)]
+ bcsv_funcs.exec_table_cmd(table, "INSERT", "COLUMN", [insert_index, col_info_values, col_to_insert_values])
+ # trigger a load section table
+ interf.col_count = table.col_count
+
+ # done!
+ print("Column inserted at index %s" % (interf.active_col_index[0]))
+ return {"FINISHED"}
+
+# remove a column in a BCSV table
+class DATA_OT_smg_bcsv_table_remove_col(bpy.types.Operator):
+ """Remove a column from a BCSV table"""
+ bl_idname = "object.smg_bcsv_table_remove_col"
+ bl_label = "Remove column at active index"
+
+ # object is selected
+ # it has a valid deserialized BCSV table
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ if (obj == None): return False
+ if (obj.smg_bcsv.get_table_from_address() == None): return False
+ return True
+
+ # what the operator does
+ def execute(self, context):
+ obj = context.object
+ table = obj.smg_bcsv.get_table_from_address()
+ interf = obj.smg_bcsv.interface
+ if (interf.col_count == 1):
+ blender_funcs.disp_msg("Cannot remove more columns on a 1 column BCSV table")
+ return {"FINISHED"}
+
+ # get the real index to be removed
+ col_to_remove_index = interf.active_col_index[0]
+ if (col_to_remove_index >= interf.col_count):
+ col_to_remove_index = interf.col_count - 1
+ interf.active_col_index[0] = col_to_remove_index
+ # remove the column at the index
+ col_info = table.cols_info[col_to_remove_index]
+ col_info_values = [col_info.name_or_hash, col_info.bitmask, col_info.rshift, col_info.type]
+ col_to_remove_values = []
+ for i in range(table.row_count):
+ col_to_remove_values.append(table.rows_data[i][col_to_remove_index])
+ bcsv_funcs.exec_table_cmd(table, "REMOVE", "COLUMN", [col_to_remove_index,
+ col_info_values,
+ col_to_remove_values])
+ # trigger a load table
+ interf.col_slider = interf.col_slider
+
+ # done!
+ print("Column at index %s removed" % (col_to_remove_index))
+ return {"FINISHED"}
+
+
+# move a column in a BCSV table
+class DATA_OT_smg_bcsv_table_move_col(bpy.types.Operator):
+ """Move a column in a BCSV table to another position"""
+ bl_idname = "object.smg_bcsv_table_move_col"
+ bl_label = "Move column at active index"
+
+ # object is selected
+ # it has a valid deserialized BCSV table
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ if (obj == None): return False
+ if (obj.smg_bcsv.get_table_from_address() == None): return False
+ return True
+
+ # what the operator does
+ def execute(self, context):
+ obj = context.object
+ table = obj.smg_bcsv.get_table_from_address()
+ interf = obj.smg_bcsv.interface
+
+ # get the indexes
+ old_index = interf.active_col_index[0]
+ new_index = interf.active_col_index[1]
+ if (old_index >= interf.col_count):
+ old_index = interf.col_count - 1
+ interf.active_col_index[0] = old_index
+ # move the specified column
+ bcsv_funcs.exec_table_cmd(table, "MOVE", "COLUMN", [old_index, new_index])
+ # trigger a load table
+ load_section_table(context)
+
+ # done!
+ print("Column at index %s moved to index %s" % (old_index, new_index))
+ return {"FINISHED"}
+
+# add a hash string as a known hash string
+class DATA_OT_smg_bcsv_table_interface_add_new_known_hash(bpy.types.Operator):
+ """Add the current string as a new known hash string"""
+ bl_idname = "object.smg_bcsv_table_interface_add_new_known_hash"
+ bl_label = "Add new known string hash"
+
+ # only if the string is CP932 encodable
+ @classmethod
+ def poll(cls, context):
+ # ensure correct encoding
+ try:
+ context.object.smg_bcsv.interface.hash_generator_str.encode("cp932")
+ return True
+ except:
+ return False
+
+ # what the operator does
+ def execute(self, context):
+ interf = context.object.smg_bcsv.interface
+ enc = interf.hash_generator_str.encode("cp932")
+ bcsv_funcs.add_new_known_hash(enc)
+ print("Hash string \"%s\" added as a known hash!" % (interf.hash_generator_str))
+ return {"FINISHED"}
+
+# copy the hash of a valid string
+class DATA_OT_smg_bcsv_table_interface_copy_str_hash(bpy.types.Operator):
+ """Copy the generated hash of a string"""
+ bl_idname = "object.smg_bcsv_table_interface_copy_str_hash"
+ bl_label = "Copy string hash"
+
+ # only if the string is CP932 encodable
+ @classmethod
+ def poll(cls, context):
+ # ensure correct encoding
+ try:
+ context.object.smg_bcsv.interface.hash_generator_str.encode("cp932")
+ return True
+ except:
+ return False
+
+ # what the operator does
+ def execute(self, context):
+ string = context.object.smg_bcsv.interface.hash_generator_str
+ hash_string = bcsv_funcs.calc_bytes_hash(string.encode("cp932"))
+ hash_string = "0x%08X" % (hash_string)
+ context.window_manager.clipboard = hash_string
+ print("Hash string \"%s\" copied!" % (hash_string))
+ return {"FINISHED"}
+
+# panel to be drawn on an object's properties
+class DATA_PT_smg_bcsv_table_interface(bpy.types.Panel):
+ bl_space_type = "PROPERTIES"
+ bl_region_type = "WINDOW"
+ bl_context = "data"
+ bl_label = "BCSV table"
+
+ # panel will be drawn only when
+ # the object selected in in context
+ @classmethod
+ def poll(cls, context):
+ obj = context.object
+ if (obj == None): return False
+ return True
+
+ # draw the panel
+ def draw(self, context):
+ # layout stuff
+ layout = self.layout
+ obj = context.object
+
+ # draw these buttons
+ row = layout.row()
+ row.operator("object.smg_bcsv_table_create")
+ row.operator("object.smg_bcsv_table_remove")
+ row.operator("object.smg_bcsv_table_open")
+ row.operator("object.smg_bcsv_table_close")
+ row = layout.row()
+ row.operator("object.smg_bcsv_table_save")
+ row.operator("object.smg_bcsv_table_import")
+ row.operator("object.smg_bcsv_table_export")
+
+ # draw table
+ table = obj.smg_bcsv.get_table_from_address()
+ interf = obj.smg_bcsv.interface
+
+ # cols_info
+ if (table != None):
+ if (interf.show_col_info):
+ box = layout.box()
+ row = box.row()
+ row.label("Columns information: [Name or hash / Bitmask / Right shift / Data Type]")
+
+ # display the columns information
+ if (interf.visible_number_of_cols != 0):
+ for i in range(interf.visible_number_of_cols):
+ # first UI row
+ if (i == 0):
+ row = box.row(align = True)
+ # display column data
+ col = row.column(align = True)
+ col.label("C%s" % (interf.col_slider + i))
+ col.prop(interf.cols_info[i], "name_or_hash", text = "")
+ col.prop(interf.cols_info[i], "bitmask", text = "")
+ col.prop(interf.cols_info[i], "rshift", text = "")
+ col.prop(interf.cols_info[i], "type", text = "")
+
+ # show the column slider
+ row = box.row()
+ row.prop(interf, "col_slider", icon_only = True)
+ else:
+ row.label("No columns displayed.")
+
+ # show column info, visible cols per row and visible rows buttons
+ box = layout.box()
+ row = box.row()
+ row.label("Table display")
+ row = box.row()
+ row.prop(interf, "show_col_info", text = "Display columns information")
+ row = box.row()
+ row.prop(interf, "visible_number_of_rows", text = "Visible numbers of rows")
+ row.prop(interf, "visible_number_of_cols", text = "Visible numbers of columns")
+
+ # show the number of rows/cols
+ # show the active row/column property
+ # show the insert/remove row/column buttons
+ box = layout.box()
+ row = box.row()
+ row.label("Table operations")
+ row = box.row()
+ col = row.column(align = True)
+ col.label("Row count: %s" % (interf.row_count))
+ row_tmp = col.row(align = False)
+ row_tmp.prop(interf, "active_row_index", text = "Active row index")
+ col.operator("object.smg_bcsv_table_insert_row")
+ col.operator("object.smg_bcsv_table_move_row")
+ col.operator("object.smg_bcsv_table_remove_row")
+ col = row.column(align = True)
+ col.label("Column count: %s" % (interf.col_count))
+ row_tmp = col.row(align = False)
+ row_tmp.prop(interf, "active_col_index", text = "Active column index")
+ col.operator("object.smg_bcsv_table_insert_col")
+ col.operator("object.smg_bcsv_table_move_col")
+ col.operator("object.smg_bcsv_table_remove_col")
+
+ # box containing the rows data
+ box = layout.box()
+
+ # draw the table
+ row = box.row(align = True)
+ row.label("Rows information:")
+ row = box.row(align = True)
+
+ # check if there are rows to display
+ if (interf.row_count == 0):
+ row.label("BCSV table has no rows.")
+ elif (interf.visible_number_of_rows == 0):
+ row.label("No rows displayed.")
+ # ready to start
+ else:
+ # draw the column slider
+ row = box.row(align = True)
+ row.prop(interf, "col_slider", icon_only = True)
+ row.scale_x = 10
+ row.scale_y = 1
+
+ # draw the container for the rows (part for the rows, part for the row scroller)
+ table_row = box.row(align = True)
+ rows_data = table_row.column(align = True)
+ # draw each table column
+ for i in range(interf.visible_number_of_cols):
+ # first UI row
+ if (i == 0):
+ row = rows_data.row(align = True)
+ col = row.column(align = True)
+ col.scale_x = 1 / 2.4
+ col.label("R/C")
+ for j in range(interf.visible_number_of_rows):
+ col.label("R%s" % (interf.row_slider + j))
+ # create the column data
+ col = row.column(align = True)
+ data_path = "long"
+ if (interf.cols_info[i].type == "SHORT"): data_path = "short"
+ elif (interf.cols_info[i].type == "CHAR"): data_path = "char"
+ elif (interf.cols_info[i].type == "FLOAT"): data_path = "float"
+ elif (interf.cols_info[i].type == "STRING"): data_path = "string"
+ elif (interf.cols_info[i].type == "STRING_OFFSET"): data_path = "string_offset"
+ for j in range(interf.visible_number_of_rows):
+ if (j == 0): col.label("C%s: %s" % (interf.col_slider + i, interf.cols_info[i].name_or_hash))
+ col.prop(interf.rows_data[j].cells[i], data_path, text = "")
+
+ # draw the row slider
+ col = table_row.column(align = True)
+ col.label("")
+ col.scale_x = 1 / 18
+ col = table_row.column(align = True)
+ col.prop(interf, "row_slider", icon_only = True)
+ col.scale_x = 1 / 9
+ col.scale_y = interf.visible_number_of_rows + 1
+
+ # draw the column slider
+ row = box.row(align = True)
+ row.prop(interf, "col_slider", icon_only = True)
+ row.scale_x = 10
+ row.scale_y = 1
+
+ # hash generator (just like the one in whitehole >:])
+ box = layout.box()
+ row = box.row()
+ row.label("Hash generator:")
+ row_ops = box.row()
+ row_ops.operator("object.smg_bcsv_table_interface_add_new_known_hash", icon = "NEW")
+ row_ops.operator("object.smg_bcsv_table_interface_copy_str_hash", icon = "COPY_ID")
+ row = box.row()
+ row.prop(interf, "hash_generator_str", text = "Input")
+ try:
+ enc = interf.hash_generator_str.encode("cp932")
+ row.label("Hash: 0x%08X" % (bcsv_funcs.calc_bytes_hash(enc)))
+ except:
+ row.label("Hash: String not CP932 encodable.")
+
+ # add some extra padding to be able to "center" the table
+ for i in range(8):
+ layout.label("")
+
+# new classes to register
+classes = (
+ smg_cols_info_interface,
+ smg_rows_data_cell_interface,
+ smg_rows_data_interface,
+ smg_bcsv_table_interface,
+ smg_bcsv,
+ DATA_OT_smg_bcsv_table_save,
+ DATA_OT_smg_bcsv_table_open,
+ DATA_OT_smg_bcsv_table_close,
+ DATA_OT_smg_bcsv_table_create,
+ DATA_OT_smg_bcsv_table_remove,
+ DATA_OT_smg_bcsv_table_import,
+ DATA_OT_smg_bcsv_table_export,
+ DATA_OT_smg_bcsv_table_insert_row,
+ DATA_OT_smg_bcsv_table_remove_row,
+ DATA_OT_smg_bcsv_table_move_row,
+ DATA_OT_smg_bcsv_table_insert_col,
+ DATA_OT_smg_bcsv_table_remove_col,
+ DATA_OT_smg_bcsv_table_move_col,
+ DATA_OT_smg_bcsv_table_interface_add_new_known_hash,
+ DATA_OT_smg_bcsv_table_interface_copy_str_hash,
+ DATA_PT_smg_bcsv_table_interface,
+)
+
+# function to be executed:
+# - before a Blender undo/redo
+# - before saving the BLEND
+# the undo_post executes undo_pre so I am doing a little trick
+# https://projects.blender.org/blender/blender/issues/60247
+WAS_UNDO_PRE_EXECUTED = False
+@bpy.app.handlers.persistent
+def smg_bcsv_table_undo_redo_save_pre_post(dummy):
+ override = bpy.context.copy()
+ global WAS_UNDO_PRE_EXECUTED
+ # save all the open tables and close them
+ if (WAS_UNDO_PRE_EXECUTED == False):
+ for scene in bpy.data.scenes:
+ for obj in scene.objects:
+ if (obj.smg_bcsv.get_table_from_address() == None): continue
+ override["object"] = obj
+ bpy.ops.object.smg_bcsv_table_save(override)
+ bpy.ops.object.smg_bcsv_table_close(override)
+ WAS_UNDO_PRE_EXECUTED = True
+ # close the tables and open them again
+ else:
+ for scene in bpy.data.scenes:
+ for obj in scene.objects:
+ stream = io.BytesIO(obj.smg_bcsv.table_raw)
+ if ("all good" not in bcsv_funcs.check_bcsv_file(stream, ">")): continue
+ override["object"] = obj
+ bpy.ops.object.smg_bcsv_table_open(override)
+ WAS_UNDO_PRE_EXECUTED = False
+
+# register and unregister functions
+@bpy.app.handlers.persistent
+def register(dummy):
+ try:
+ for c in classes:
+ bpy.utils.register_class(c)
+ bpy.types.Object.smg_bcsv = bpy.props.PointerProperty(type = smg_bcsv)
+ # ~ bpy.app.handlers.undo_pre.append(smg_bcsv_table_undo_redo_save_pre_post)
+ # ~ bpy.app.handlers.redo_pre.append(smg_bcsv_table_undo_redo_save_pre_post)
+ # ~ bpy.app.handlers.save_pre.append(smg_bcsv_table_undo_redo_save_pre_post)
+ except:
+ return
+
+def unregister():
+ try:
+ for c in classes:
+ bpy.utils.unregister_class(c)
+ del bpy.types.Object.smg_bcsv
+ # ~ bpy.app.handlers.undo_pre.remove(smg_bcsv_table_undo_redo_save_pre_post)
+ # ~ bpy.app.handlers.redo_pre.remove(smg_bcsv_table_undo_redo_save_pre_post)
+ # ~ bpy.app.handlers.save_pre.remove(smg_bcsv_table_undo_redo_save_pre_post)
+ except:
+ return
diff --git a/bcsv_funcs.py b/bcsv_funcs.py
new file mode 100644
index 0000000..26686e0
--- /dev/null
+++ b/bcsv_funcs.py
@@ -0,0 +1,1169 @@
+import struct, math, re, io, copy
+from . import file_ops
+
+# python file to read the important information out of a BCSV file
+# will try its best to decode the information either on big/little endian
+# https://humming-owl.neocities.org/smg-stuff/pages/tutorials/bcsv
+
+# what this file will do is the following:
+# check_bcsv_file() takes the first look into the BCSV and it checks if the file is correct.
+# On the way it assigns all the variables to bcsv_raw_info (smg_bcsv_raw struct).
+# If the file is correct, then read_bcsv_file() will assign the actually useful variables
+# to a smg_bcsv_table structure and return that structure
+#
+# in case of any error check_bcsv_file() returns a string that can
+# be read by a human to identify what it is wrong with the BCSV file
+# if all is good it will return exactly that (as a string)
+
+TYPE_INT_TO_STRING = ["LONG", "STRING", "FLOAT",
+ "LONG_2", "SHORT", "CHAR",
+ "STRING_OFFSET"]
+TYPE_STRING_TO_INT = {"LONG": 0, "STRING": 1, "FLOAT": 2,
+ "LONG_2": 3, "SHORT": 4, "CHAR": 5,
+ "STRING_OFFSET": 6}
+TYPE_INT = [0, 1, 2, 3, 4, 5, 6]
+TYPE_INT_TO_SIZE = [4, 32, 4, 4, 2, 1, 4]
+TYPE_INT_TO_STD_BITMASK = [0xFFFFFFFF, 0x0, 0xFFFFFFFF,
+ 0xFFFFFFFF, 0xFFFF, 0xFF,
+ 0xFFFFFFFF]
+# ^ STRING type is told to be "deprecated" and
+# I don't know a test BCSV file in SMG that has the type
+TYPE_INT_TO_STRUCT_CH = ["I", "32s", "f", "I", "H", "B", "I"]
+# ^ for python struct.unpack/pack funcs
+TYPE_INT_TO_PYTHON_TYPE = [int, str, float, int, int, int, str]
+# ^ last one is actually an integer but this
+# list is to map the types of a smg_bcsv_table
+
+# path to the hash file
+hashes_path = file_ops.get_base_path(__file__, True) + "/bcsv_hashes.txt"
+
+# get the byte size of a data type
+def get_data_type_size(type_int):
+ if (type_int in TYPE_INT):
+ return TYPE_INT_TO_SIZE[type_int]
+ return None
+
+# get the python struct char to read the data type
+def get_struct_read_char(type_int):
+ if (type_int in TYPE_INT):
+ return TYPE_INT_TO_STRUCT_CH[type_int]
+ return None
+
+# calculate the BCSV hash of a byte string
+def calc_bytes_hash(bytes_array):
+ # check
+ if (type(bytes_array) != bytes):
+ return None
+ try:
+ # BCSV strings are CP932 encoded
+ # I am not certain about this but I will assume it for now
+ bytes_array.decode("cp932")
+ except:
+ return None
+
+ # return the hash
+ result = 0
+ string_byte_size = len(bytes_array) - 1
+ if (bytes_array[-1] == 0):
+ string_byte_size -= 1
+ for i in range(string_byte_size + 1):
+ # ~ print(string_byte_size - i)
+ result = struct.unpack(">b", bytes_array[i : i + 1])[0] + (31 * result)
+ return 0xFFFFFFFF & result
+
+# add the new hash to the bcsv_hashes.txt file
+def add_new_known_hash(bytes_array):
+ # check if it is a CP932 encoded string
+ str_hash = calc_bytes_hash(bytes_array)
+ if (str_hash == None):
+ return "Not a CP932 decodable string."
+
+ # get all the previous hashes
+ try:
+ f = open(hashes_path, "r+", encoding = "cp932")
+ except:
+ return "\"bcsv_hashes.txt\" is not a CP932 encoded file."
+ string_list = []
+ first_non_comment_line_pos = 0
+ for line in f.readlines():
+ # avoid comments
+ if (line.startswith("#") == False):
+ # append the string
+ string_list.append(re.search("^.* ", line).group()[:-1])
+ else:
+ first_non_comment_line_pos += len(line.encode("cp932"))
+ line = f.readline()
+
+ # append the new string and sort string_list
+ # only if the string is not already present in the list
+ if ((bytes_array.decode("cp932") in string_list) == False):
+ string_list.append(bytes_array.decode("cp932"))
+ string_list.sort()
+
+ # generate all the hashes again
+ hash_list = []
+ for string in string_list:
+ hash_list.append("0x%08X" % (calc_bytes_hash(string.encode("cp932"))))
+
+ # from the first non comment line, delete everything
+ # and start filling the hash list again -> string space hash
+ f.truncate(first_non_comment_line_pos)
+ f.seek(first_non_comment_line_pos)
+ for i in range(len(string_list)):
+ f.write(string_list[i])
+ f.write(" ")
+ f.write(hash_list[i])
+ f.write("\n")
+
+ # done!
+ f.close()
+ return True
+
+# return the string related to a hash name
+# I hope there are no name collisions!
+def get_hash_string(hash_value):
+ rtn = "0x%08X" % (hash_value)
+ # ~ print(hashes_path)
+ f = open(hashes_path, "r", encoding = "cp932")
+ for line in f.readlines():
+ name = line.split()[0]
+ if (line.startswith("#") == False and calc_bytes_hash(name.encode("cp932")) == hash_value):
+ rtn = name
+ break
+ f.close()
+ return rtn
+
+# return the type as a string
+def get_type_string(type_int):
+ if (type_int in TYPE_INT):
+ return TYPE_INT_TO_STRING[type_int]
+ return None
+
+# return the type as a python type
+def get_type_python_type(type_int):
+ if (type_int in TYPE_INT):
+ return TYPE_INT_TO_PYTHON_TYPE[type_int]
+ return None
+
+# all the raw variables on a BCSV file
+class smg_bcsv_raw:
+ def __init__(self):
+ self.endian = None
+ self.header = self.header()
+ self.columns_info = []
+ self.data_pool = bytes()
+ self.string_pool = bytes()
+ def __str__(self):
+ rtn = "### SMG_BCSV_RAW - START\n"
+ rtn += "Endian: %s\n" % (self.endian)
+ rtn += self.header.__str__()
+ rtn += "Column data info: hash, bitmask, offset, right-shift, type\n"
+ for i in range(len(self.columns_info)):
+ rtn += " Col[%s]: " % (i.__str__())
+ rtn += self.columns_info[i].__str__()
+ rtn += "Data pool (size = %s):\n " % (len(self.data_pool).__str__())
+ for i in range(len(self.data_pool)):
+ if (i % 16 == 0 and i != 0):
+ rtn += "\n "
+ rtn += " %02X" % self.data_pool[i]
+ rtn += "\nString pool (size = %s):\n " % (len(self.string_pool).__str__())
+ for i in range(len(self.string_pool)):
+ if (i % 16 == 0 and i != 0):
+ rtn += "\n "
+ rtn += " %02X" % self.string_pool[i]
+ rtn += "\n### SMG_BCSV_RAW - END\n"
+ return rtn
+
+ # header
+ class header:
+ def __init__(self):
+ self.row_count = None
+ self.col_count = None
+ self.data_pool_offset = None
+ self.row_data_length = None
+ def __str__(self):
+ rtn = "Row count: %s\n" % (self.row_count.__str__())
+ rtn += "Column count: %s\n" % (self.col_count.__str__())
+ rtn += "Data pool offset (hex): %s\n" % (self.data_pool_offset.__str__())
+ rtn += "Row data length (bytes): %s\n" % (self.row_data_length.__str__())
+ return rtn
+
+ # cell data info
+ class col_cells_data_info:
+ def __init__(self):
+ self.name_hash = None
+ self.data_bitmask = None
+ self.data_offset = None
+ self.data_rshift = None
+ self.data_type = None
+ def __str__(self):
+ rtn = "0x%08X, 0x%08X, %s, %s, %s " % (self.name_hash,
+ self.data_bitmask,
+ self.data_offset.__str__(),
+ self.data_rshift.__str__(),
+ self.data_type.__str__())
+ # visible type string
+ rtn += "(%s)\n" % (get_type_string(self.data_type))
+ return rtn
+
+# structure with the data from the BCSV that actually matters
+class smg_bcsv_table:
+ def __init__(self):
+ self.row_count = None
+ self.col_count = None
+ self.cols_info = [] # see cols_info
+ self.rows_data = [] # all the other data
+ def __str__(self):
+ rtn = "### SMG_BCSV_TABLE - START\n"
+ rtn += "Row count: %s\n" % (self.row_count)
+ rtn += "Column count: %s\n" % (self.col_count)
+ rtn += "Columns info: hash or name, bitmask, right-shift, type\n"
+ for i in range(len(self.cols_info)):
+ rtn += " Col[%d]: " % (i)
+ rtn += "%s" % (self.cols_info[i])
+ rtn += "Row data:\n"
+ for i in range(len(self.rows_data)):
+ rtn += " Row[%s]:" % (i.__str__())
+ for data in self.rows_data[i]:
+ rtn += " %s," % (data.__str__())
+ rtn = rtn[: -1] + "\n"
+ rtn += "### SMG_BCSV_TABLE - END\n"
+ return rtn
+
+ # column info struct
+ class cols_info:
+ # keep the same column info
+ def __init__(self):
+ self.name_or_hash = None # if hash is known as its name if not, the hash as a hex string
+ self.bitmask = None
+ self.rshift = None
+ self.type = None # as a string bruh
+ def __str__(self):
+ rtn = "%s, 0x%08X, %d, %s\n" % (self.name_or_hash,
+ self.bitmask,
+ self.rshift,
+ self.type)
+ return rtn
+
+
+
+# create a global variable to hold temporal information
+bcsv_raw_info = None
+bcsv_raw_error_str = "bcsv-raw-error: "
+bcsv_table_error_str = "bcsv-table-error: "
+f = None
+
+# main function, will read and will check while reading
+# as BCSVs don't have magic, will have to check if it is well formatted
+# in big endian and if it is not it will try to check if it is good in
+# little endian, if both checks fail the file is bad (or I have a reading skill issue)
+def read_bcsv_file(filepath_or_stream, endian):
+ # check params
+ if (((type(filepath_or_stream) != io.BytesIO) and (type(filepath_or_stream) != str))
+ or (endian not in ["BIG", "LITTLE", "AUTO"])):
+ result = bcsv_raw_error_str + "function parameters"
+ print(result)
+ return result
+
+ # make global variables editable
+ global f
+ global bcsv_raw_info
+
+ # "pre read" the file
+ result_str = ""
+ if (endian == "BIG"):
+ result_str = check_bcsv_file(filepath_or_stream, ">")
+ print("big endian: %s" % (result_str))
+ elif (endian == "LITTLE"):
+ result_str = check_bcsv_file(filepath_or_stream, "<")
+ print("little endian: %s" % (result_str))
+ elif (endian == "AUTO"):
+ result_str = check_bcsv_file(filepath_or_stream, ">")
+ print("big endian: %s" % (result_str))
+ if (big_result_str != bcsv_raw_error_str + "all good"):
+ result_str = check_bcsv_file(filepath_or_stream, "<")
+ print("little endian: %s" % (result_str))
+
+ # failure trying to identify the BCSV table
+ if ("all good" not in result_str):
+ return None
+
+ # get the BCSV useful data out of that prison
+ # ~ print(bcsv_raw_info)
+ bcsv_table_info = smg_bcsv_table()
+
+ # row and col count
+ bcsv_table_info.row_count = bcsv_raw_info.header.row_count
+ bcsv_table_info.col_count = bcsv_raw_info.header.col_count
+ # get the hash names/hex string (if known)
+ # and the column properties
+ for i in range(bcsv_table_info.col_count):
+ string = get_hash_string(bcsv_raw_info.columns_info[i].name_hash)
+ bcsv_table_info.cols_info.append(smg_bcsv_table.cols_info())
+ bcsv_table_info.cols_info[-1].name_or_hash = string
+ bcsv_table_info.cols_info[-1].bitmask = bcsv_raw_info.columns_info[i].data_bitmask
+ bcsv_table_info.cols_info[-1].rshift = bcsv_raw_info.columns_info[i].data_rshift
+ bcsv_table_info.cols_info[-1].type = get_type_string(bcsv_raw_info.columns_info[i].data_type)
+
+ # assign the row slots
+ for i in range(bcsv_table_info.row_count):
+ bcsv_table_info.rows_data.append([])
+
+ # get all the cell items
+ # iterate over the columns then the rows
+ # each column at a time
+ endian_ch = ">" if (bcsv_raw_info.endian == "BIG") else "<"
+ for i in range(bcsv_table_info.col_count):
+ # get the type, offset, endian
+ base_offset = bcsv_raw_info.columns_info[i].data_offset
+ data_type = bcsv_table_info.cols_info[i].type
+
+ for j in range(bcsv_table_info.row_count):
+ value_offset = base_offset + (j * bcsv_raw_info.header.row_data_length)
+ # grab the specific datatype
+ value = None
+ # crazy this but I will do the bitmask and right shift even with a float
+ # treat integer variables as signed, it is actually a bit more readable
+ if (data_type == "LONG" or data_type == "LONG_2" or data_type == "STRING_OFFSET" or data_type == "FLOAT"):
+ value = struct.unpack(endian_ch + "I", bcsv_raw_info.data_pool[value_offset : value_offset + 4])[0]
+ value = (value & bcsv_table_info.cols_info[i].bitmask) >> bcsv_table_info.cols_info[i].rshift
+ if (data_type == "LONG" or data_type == "LONG_2"):
+ value = struct.unpack(">i", struct.pack(">I", value))[0]
+ elif (data_type == "STRING"):
+ value = bcsv_raw_info.data_pool[value_offset : value_offset + 32].decode("cp932").replace("\0", "")
+ elif (data_type == "SHORT"):
+ value = struct.unpack(endian_ch + "H", bcsv_raw_info.data_pool[value_offset : value_offset + 2])[0]
+ value = (value & bcsv_table_info.cols_info[i].bitmask) >> bcsv_table_info.cols_info[i].rshift
+ value = struct.unpack(">h", struct.pack(">H", value))[0]
+ elif (data_type == "CHAR"):
+ value = struct.unpack(endian_ch + "B", bcsv_raw_info.data_pool[value_offset : value_offset + 1])[0]
+ value = (value & bcsv_table_info.cols_info[i].bitmask) >> bcsv_table_info.cols_info[i].rshift
+ value = struct.unpack(">b", struct.pack(">B", value))[0]
+
+ # check if the data type was a string offset or a float
+ if (data_type == "FLOAT"):
+ value = struct.unpack(">f", struct.pack(">I", value))[0]
+ elif (data_type == "STRING_OFFSET"):
+ string_offset = value
+ string_length = 0
+ while (bcsv_raw_info.string_pool[string_offset + string_length] != 0):
+ string_length += 1
+ value = bcsv_raw_info.string_pool[string_offset : string_offset + string_length].decode("cp932")
+
+ # assign the value
+ bcsv_table_info.rows_data[j].append(value)
+
+ f.close()
+ # ~ print(bcsv_table_info)
+ return bcsv_table_info
+
+# function to check a BCSV file before getting its full information out
+def check_bcsv_file(filepath_or_stream, endian_ch):
+ # check its size first
+ file_size = file_ops.get_file_size(filepath_or_stream)
+ if (file_size <= 16):
+ return bcsv_raw_error_str + "file size - header"
+
+ # make global variables editable
+ global f
+ global bcsv_raw_info
+
+ # open the file if it is a filepath
+ if (type(filepath_or_stream) == str):
+ f = open(filepath_or_stream, "rb")
+ else:
+ f = filepath_or_stream
+ f.seek(0)
+
+ # holder for variables
+ bcsv_raw_info = smg_bcsv_raw();
+
+ # header
+
+ # row count, col count, row data offset, row data length
+ bcsv_raw_info.header.row_count = struct.unpack(endian_ch + "I", f.read(4))[0]
+ bcsv_raw_info.header.col_count = struct.unpack(endian_ch + "I", f.read(4))[0]
+ bcsv_raw_info.header.data_pool_offset = struct.unpack(endian_ch + "I", f.read(4))[0]
+ bcsv_raw_info.header.row_data_length = struct.unpack(endian_ch + "I", f.read(4))[0]
+ # row_count can be 0
+ # StageData/AsteroidBlockZone.arc/stage/jmp/childobj/common/childobjinfo
+ if (bcsv_raw_info.header.col_count == 0):
+ return bcsv_raw_error_str + "col count"
+ # data pool offset will be read and used by the game, idk if not 4 byte aligments will work
+ # I know that the game crashes when reading a float not 4 byte aligned but it is
+ # better to keep the 4 byte alignment
+ if (bcsv_raw_info.header.row_count != 0):
+ if ((bcsv_raw_info.header.data_pool_offset >= file_size)
+ or (bcsv_raw_info.header.data_pool_offset % 4 != 0)):
+ return bcsv_raw_error_str + "row data offset"
+ else: # BCSVs with 0 rows
+ if (bcsv_raw_info.header.data_pool_offset % 4 != 0):
+ return bcsv_raw_error_str + "row data offset"
+
+ # "several data cells can reference data on the size byte field"
+ # so I can't expect row_data_length to be something related to col_count
+ if (bcsv_raw_info.header.row_data_length == 0):
+ return bcsv_raw_error_str + "row data length"
+
+ # check file size again
+
+ # considering the column data info
+ if (bcsv_raw_info.header.row_count != 0):
+ if (file_size <= (16 + bcsv_raw_info.header.col_count * 12)):
+ return bcsv_raw_error_str + "file size - row/col count"
+ else:
+ if (file_size < (16 + bcsv_raw_info.header.col_count * 12)):
+ return bcsv_raw_error_str + "file size - row/col count"
+ # considering the data pool offset + data row length
+ if (bcsv_raw_info.header.row_count != 0):
+ if (file_size
+ < (bcsv_raw_info.header.data_pool_offset
+ + (bcsv_raw_info.header.row_count * bcsv_raw_info.header.row_data_length))):
+ return bcsv_raw_error_str + "file size - data pool offset/row count/row data length"
+
+ # column data info
+
+ # read each column data info
+ max_data_pool_size = 0
+ for i in range(bcsv_raw_info.header.col_count):
+ bcsv_raw_info.columns_info.append(bcsv_raw_info.col_cells_data_info())
+ # check offset and data type
+ bcsv_raw_info.columns_info[-1].name_hash = struct.unpack(endian_ch + "I", f.read(4))[0]
+ bcsv_raw_info.columns_info[-1].data_bitmask = struct.unpack(endian_ch + "I", f.read(4))[0]
+ bcsv_raw_info.columns_info[-1].data_offset = struct.unpack(endian_ch + "H", f.read(2))[0]
+ bcsv_raw_info.columns_info[-1].data_rshift = struct.unpack(endian_ch + "B", f.read(1))[0]
+ bcsv_raw_info.columns_info[-1].data_type = struct.unpack(endian_ch + "B", f.read(1))[0]
+
+ # check data type
+ if (bcsv_raw_info.columns_info[-1].data_type > 6):
+ return bcsv_raw_error_str + "data type"
+ # check offset (BCSVs without rows are valid)
+ if ((bcsv_raw_info.header.row_count != 0)
+ and
+ (file_size < (bcsv_raw_info.header.data_pool_offset
+ + bcsv_raw_info.columns_info[-1].data_offset
+ + ((bcsv_raw_info.header.row_count - 1) * bcsv_raw_info.header.row_data_length)
+ + get_data_type_size(bcsv_raw_info.columns_info[-1].data_type)))):
+ return bcsv_raw_error_str + "data cell offset"
+ # if it is a float type, check if the float offset is a multiple of 4
+ # I think this is the only alignment restriction
+ if ((bcsv_raw_info.columns_info[-1].data_type == 2)
+ and (bcsv_raw_info.columns_info[-1].data_offset % 4 != 0)):
+ return bcsv_raw_error_str + "float value offset"
+ # get the updated max data pool size
+ tmp = (bcsv_raw_info.columns_info[-1].data_offset
+ + ((bcsv_raw_info.header.row_count - 1) * bcsv_raw_info.header.row_data_length)
+ + get_data_type_size(bcsv_raw_info.columns_info[-1].data_type))
+ if (tmp > max_data_pool_size):
+ max_data_pool_size = tmp
+
+ # interesting, max_data_pool_size does not necessarily match with row_count * row_data_length
+ # but the last one is the actual data pool length
+ # StageData/AsteroidBlockZone.arc/stage/jmp/childobj/layerb/childobjinfo
+ max_data_pool_size = bcsv_raw_info.header.row_count * bcsv_raw_info.header.row_data_length
+ # there are too much 4 byte alignments, like, too much (for floats)
+
+ # check if the data pool overflows
+ if (file_size < bcsv_raw_info.header.data_pool_offset + max_data_pool_size):
+ return bcsv_raw_error_str + "data pool size"
+
+ # check the string offset values to check for overflow
+ max_string_pool_size = 0
+ for cols_info in bcsv_raw_info.columns_info:
+ for i in range(bcsv_raw_info.header.row_count): # iterate through each row
+ if (cols_info.data_type == 6): # string offset
+ # get the offset value from the data pool
+ f.seek(bcsv_raw_info.header.data_pool_offset
+ + cols_info.data_offset
+ + (i * bcsv_raw_info.header.row_data_length))
+ string_offset = struct.unpack(endian_ch + "I", f.read(4))[0]
+ # get the string size
+ f.seek(bcsv_raw_info.header.data_pool_offset + max_data_pool_size + string_offset)
+ string_size = 1 # count nul character beforehand
+ tmp_byte = f.read(1) # pre-read the first character
+ while (tmp_byte != b"\x00"): # strings 0x00 terminated is a must
+ if (tmp_byte == b""): # end of file reached
+ return bcsv_raw_error_str + "string offset"
+ string_size += 1
+ tmp_byte = f.read(1)
+ # update the max string pool size
+ tmp = string_offset + string_size
+ if (tmp > max_string_pool_size):
+ max_string_pool_size = tmp
+
+ # whether there is a data pool there can
+ # be a string pool floating and unreferenced
+ # or just unreferenced strings attached to the string pool
+ f.seek(bcsv_raw_info.header.data_pool_offset
+ + max_data_pool_size
+ + max_string_pool_size)
+ tmp = f.read(1)
+ while (tmp != b""):
+ max_string_pool_size += 1
+ tmp = f.read(1)
+
+ # get the data pool
+ f.seek(bcsv_raw_info.header.data_pool_offset)
+ bcsv_raw_info.data_pool = f.read(max_data_pool_size)
+ # get the string pool
+ bcsv_raw_info.string_pool = f.read(max_string_pool_size)
+
+ # check the data pool and the string pool for the string types
+ # ensure they are CP932 decodable
+ for i in range(bcsv_raw_info.header.col_count):
+ for j in range(bcsv_raw_info.header.row_count):
+ # STRING or STRING_OFFSET types
+ string_offset = (bcsv_raw_info.columns_info[i].data_offset
+ + (j * bcsv_raw_info.header.row_data_length))
+ string = b""
+ if (bcsv_raw_info.columns_info[i].data_type == 1):
+ string = bcsv_raw_info.data_pool[string_offset : string_offset + 32]
+ if (string[-1] != 0): # null terminator (it is for safety)
+ return bcsv_raw_error_str + "string type not null terminated"
+ elif (bcsv_raw_info.columns_info[i].data_type == 6):
+ string_offset = struct.unpack(endian_ch + "I", bcsv_raw_info.data_pool[string_offset
+ : string_offset + 4])[0]
+ k = 0
+ while (bcsv_raw_info.string_pool[string_offset + k] != 0):
+ string += bcsv_raw_info.string_pool[string_offset + k
+ : string_offset + k + 1]
+ k += 1
+ # try decoding the string
+ try:
+ string.decode("cp932")
+ except:
+ return bcsv_raw_error_str + "string encoding"
+
+ # if shit is good so far then the endian choice was probably good!
+ bcsv_raw_info.endian = "BIG" if (endian_ch == ">") else "LITTLE"
+ return bcsv_raw_error_str + "all good"
+
+# check if a smg_bcsv_table structure is good
+def check_smg_bcsv_table(table):
+
+ # check if the information in the smg_bcsv_table struct is valid
+
+ # the only stuff I can check is:
+ # row/column count
+ # data types must be the known data types
+ # all the cells in a BCSV table must exist (not sure if this is the case always)
+ # type checking each of the columns against the type specified
+ # strings must be CP932 encoded (unsure but will do this anyways)
+
+ # enforce structure types
+ if (type(table) != smg_bcsv_table):
+ return bcsv_table_error_str + "smg_bcsv_table struct"
+
+ # row/col count
+ if (type(table.row_count) != int
+ or type(table.col_count) != int
+ # or table.row_count <= 0 # row_count can be 0
+ or table.col_count <= 0):
+ return bcsv_table_error_str + "row/col count"
+
+ # check cols_info
+ if (table.col_count != len(table.cols_info)):
+ return bcsv_table_error_str + "cols_info size"
+ for cols_info in table.cols_info:
+ # check cols_info struct
+ if (type(cols_info) != smg_bcsv_table.cols_info):
+ return bcsv_table_error_str + "cols_info struct"
+ # name or hash
+ if (type(cols_info.name_or_hash) != str):
+ return bcsv_table_error_str + "column name or hash"
+ try:
+ cols_info.name_or_hash.encode("cp932")
+ if (cols_info.name_or_hash.startswith("0x") or cols_info.name_or_hash.startswith("0X")):
+ number = int(cols_info.name_or_hash, 16)
+ if (number > 0xFFFFFFFF):
+ return bcsv_table_error_str + "column name or hash"
+ except:
+ return bcsv_table_error_str + "column name or hash"
+ # bitmask
+ if (type(cols_info.bitmask) != int or cols_info.bitmask < 0):
+ return bcsv_table_error_str + "column bitmask"
+ # right shift
+ if (type(cols_info.rshift) != int or cols_info.rshift < 0):
+ return bcsv_table_error_str + "column right shift"
+ # type
+ if (type(cols_info.type) != str or (cols_info.type in TYPE_INT_TO_STRING) == False):
+ return bcsv_table_error_str + "column data type"
+
+ # check rows_data and enforce the types
+ if (type(table.rows_data) != list or len(table.rows_data) != table.row_count):
+ return bcsv_table_error_str + "rows_data list (row)"
+ for row in table.rows_data:
+ if (type(row) != list or len(row) != table.col_count):
+ return bcsv_table_error_str + "rows_data list (column)"
+ # actually check the data now
+ for i in range(table.col_count):
+ type_to_compare = TYPE_INT_TO_PYTHON_TYPE[TYPE_STRING_TO_INT[table.cols_info[i].type]]
+ for j in range(table.row_count):
+ # check the type
+ if (type(table.rows_data[j][i]) != type_to_compare):
+ return bcsv_table_error_str + "incorrect cell datatype"
+ # check string encoding
+ string_size = 0
+ if (type_to_compare == str):
+ try:
+ string_size = len(table.rows_data[j][i].encode("cp932"))
+ except:
+ return bcsv_table_error_str + "string with incorrect encoding"
+ # if it is the STRING type, check if its encoded representation can fit in 32 bytes
+ # include the null terminator, although you could have out of bounds strings if you want (I think)
+ if (TYPE_STRING_TO_INT[table.cols_info[i].type] == 1 and string_size >= 32):
+ return bcsv_table_error_str + "STRING type overflow"
+
+ # all is good (hopefully)
+ return bcsv_table_error_str + "all good"
+
+# create smg_bcsv_raw from smg_bcsv_table
+# will only attempt to "compress data" into byte fields on "non-standard" bitmask/rshift values
+# this "compression" will be only done on consecutive data cells
+def create_smg_bcsv_raw(table, endian_ch, use_std_pad_size):
+
+ # calls check_smg_bcsv_table()
+ result = check_smg_bcsv_table(table)
+ print(result)
+ if (result != bcsv_table_error_str + "all good"):
+ return None
+
+ # build a new raw structure and return it
+ raw = smg_bcsv_raw()
+ raw.endian = "BIG"
+ if (endian_ch == "<"):
+ raw.endian = "LITTLE"
+
+ # assign the easy variables
+ raw.header.row_count = table.row_count
+ raw.header.col_count = table.col_count
+ raw.header.data_pool_offset = 16 + table.col_count * 12
+ # ^ lame calculation, this offset can be
+ # different and the game will read and use it >:]
+
+ # calculate row_data_length while filling the column data
+ raw.header.row_data_length = 0
+ i = 0
+ # iterate over the column data
+ # do not enforce "field order" for now (because I think it is unnecessary)
+ # the only enforcement I will add for now is that floats need to be 4 byte aligned
+ # hopefully the other integer types don't need alignment (to verify experimentally)
+ accumulated_bitmasks = 0
+ while (i < table.col_count):
+ # generate new column info
+ raw.columns_info.append(smg_bcsv_raw.col_cells_data_info())
+ # name hash
+ if (table.cols_info[i].name_or_hash.startswith("0x")):
+ raw.columns_info[-1].name_hash = int(table.cols_info[i].name_or_hash, 16)
+ else:
+ raw.columns_info[-1].name_hash = calc_bytes_hash(table.cols_info[i].name_or_hash.encode("cp932"))
+ # bitmask, data offset, rshift and data type
+ raw.columns_info[-1].data_bitmask = table.cols_info[i].bitmask
+ raw.columns_info[-1].data_offset = 0 # to update in the following if-else
+ raw.columns_info[-1].data_rshift = table.cols_info[i].rshift
+ raw.columns_info[-1].data_type = TYPE_STRING_TO_INT[table.cols_info[i].type]
+
+ # can be compressed?
+ # if it uses a weird bitmask, surely, the shift variable is read and used
+ # all types will be considered except for the "STRING" type and the "FLOAT" type
+ if ((raw.columns_info[-1].data_bitmask != TYPE_INT_TO_STD_BITMASK[raw.columns_info[-1].data_type])
+ and ((accumulated_bitmasks & raw.columns_info[-1].data_bitmask) == 0)
+ and (i != 0)
+ and (raw.columns_info[-1].data_type != 1)
+ and (raw.columns_info[-1].data_type != 2)):
+ # update the accumulated_bitmasks
+ accumulated_bitmasks |= raw.columns_info[-1].data_bitmask
+ # grab the previous column data_offset
+ raw.columns_info[-1].data_offset = raw.columns_info[-2].data_offset
+ # do not update raw.header.row_data_length
+ # pack the data normally
+ else:
+ # reset the accumulated bitmask to this exact column bitmask
+ if (raw.columns_info[-1].data_type == 2): # adjust offset for float
+ while (raw.header.row_data_length % 4 != 0):
+ raw.header.row_data_length += 1
+ accumulated_bitmasks = raw.columns_info[-1].data_bitmask
+ raw.columns_info[-1].data_offset = raw.header.row_data_length
+ raw.header.row_data_length += TYPE_INT_TO_SIZE[raw.columns_info[-1].data_type]
+
+ # increase i for the next loop
+ i += 1
+
+ # populate the data pool (use the last column offset to get the length of the data pool)
+ # bytearray with the save
+ raw.data_pool = bytearray(raw.columns_info[-1].data_offset
+ + ((raw.header.row_count - 1) * raw.header.row_data_length)
+ + TYPE_INT_TO_SIZE[raw.columns_info[-1].data_type])
+
+ # with the offsets defined, store the data
+ string_pool_strings_pos = {}
+ string_pool_offset_pos = 0
+ for i in range(table.row_count):
+ for j in range(table.col_count):
+ tmp = None
+ # only for integers
+ type_ch = None
+ # LONG or LONG_2
+ if (raw.columns_info[j].data_type == 0 or raw.columns_info[j].data_type == 3): type_ch = "I"
+ # SHORT
+ elif (raw.columns_info[j].data_type == 4): type_ch = "H"
+ # CHAR
+ elif (raw.columns_info[j].data_type == 5): type_ch = "B"
+
+ # LONG, LONG_2, SHORT or CHAR
+ if (type_ch == "I" or type_ch == "H" or type_ch == "B"):
+ # ~ print((table.rows_data[i][j] << raw.columns_info[j].data_rshift) & raw.columns_info[j].data_bitmask)
+ tmp = struct.pack(endian_ch + type_ch,
+ (table.rows_data[i][j] << raw.columns_info[j].data_rshift) & raw.columns_info[j].data_bitmask)
+ # STRING
+ elif (raw.columns_info[j].data_type == 1):
+ tmp = table.rows_data[i][j].encode("cp932")
+ # FLOAT
+ elif (raw.columns_info[j].data_type == 2):
+ tmp = struct.pack(endian_ch + "f", table.rows_data[i][j])
+ # STRING_OFFSET
+ elif (raw.columns_info[j].data_type == 6):
+ # search if the string is already in the string pool
+ if (table.rows_data[i][j] in string_pool_strings_pos):
+ tmp = struct.pack(endian_ch + "I", string_pool_strings_pos[table.rows_data[i][j]])
+ else:
+ encoded_string = table.rows_data[i][j].encode("cp932") + b"\x00"
+ tmp = struct.pack(endian_ch + "I", string_pool_offset_pos)
+ raw.string_pool += encoded_string
+ string_pool_strings_pos.update({table.rows_data[i][j] : string_pool_offset_pos})
+ string_pool_offset_pos += len(encoded_string)
+
+ # write the data
+ for k in range(len(tmp)):
+ raw.data_pool[raw.columns_info[j].data_offset + (i * raw.header.row_data_length) + k] |= tmp[k]
+
+ # lol bytes()
+ raw.data_pool = bytes(raw.data_pool)
+ # append the last padding
+ pad_size = 4
+ if (use_std_pad_size):
+ pad_size = 32
+ tmp_file_size = 16 + (raw.header.col_count * 12) + len(raw.data_pool) + len(raw.string_pool)
+ while ((tmp_file_size % pad_size) != 0):
+ raw.string_pool += b"@"
+ tmp_file_size += 1
+
+ # done!
+ print(raw)
+ return raw
+
+# write smg_bcsv_raw send a bytes object if filepath == None
+def write_smg_bcsv_raw(raw, filepath):
+ # create the bytes object
+ data = bytes()
+ # get endian_ch
+ endian_ch = ">"
+ if (raw.endian == "LITTLE"):
+ endian_ch = "<"
+ # header
+ data += struct.pack(endian_ch + "I", raw.header.row_count)
+ data += struct.pack(endian_ch + "I", raw.header.col_count)
+ data += struct.pack(endian_ch + "I", raw.header.data_pool_offset)
+ data += struct.pack(endian_ch + "I", raw.header.row_data_length)
+ # column info
+ for i in range(raw.header.col_count):
+ data += struct.pack(endian_ch + "I", raw.columns_info[i].name_hash)
+ data += struct.pack(endian_ch + "I", raw.columns_info[i].data_bitmask)
+ data += struct.pack(endian_ch + "H", raw.columns_info[i].data_offset)
+ data += struct.pack(endian_ch + "B", raw.columns_info[i].data_rshift)
+ data += struct.pack(endian_ch + "B", raw.columns_info[i].data_type)
+ # data pool
+ data += raw.data_pool
+ # string pool
+ data += raw.string_pool
+ # done!
+ if (filepath != None):
+ f = open(file_ops.get_path_str(filepath), "wb")
+ f.write(data)
+ f.close()
+ else:
+ return data
+
+# valid table operations
+# single operations:
+# insert/move/remove a row/col at a certain index
+# change a cell value rows_data/cols_info
+# change a cols_info[index].type value (can change all values of the respective column)
+#
+# what a command needs so that it can be executed
+# operation / type of element operated / list of values needed for the operation
+#
+# "INSERT" / "ROW" / [10, [row to insert values]]
+# insert a row at index 10
+#
+# "INSERT" / "COLUMN" / [7, [col_info to insert values], [column to insert values]]
+# insert a column at index 7
+#
+# "MOVE" / "COLUMN" / [9, 3]
+# move a column from index 9 to index 3
+#
+# "REMOVE" / "ROW" / [0, [row to remove values]]
+# remove the row at index 0
+#
+# "REMOVE" / "COL" / [7, [col_info to remove values], [column to remove values]]
+# remove the column at index 7
+#
+# "EDIT" / "CELL" / ["cols_info", 3, "bitmask", "FFFF", "ABAB"]
+# edit the cell cols_info[3].bitmask value from "FFFF" to "ABAB"
+#
+# "EDIT" / "CELL" / ["rows_data", 3, 4, "LMAO", "OAML"]
+# edit the cell rows_data[3][4] value from "LMAO" to "OAML"
+#
+# "EDIT" / "CELL" / ["cols_info", 0, "type", "LONG", "STRING", [old column values], [new column values]]
+# edit the cell cols_info[0].type value from "LONG" to "STRING"
+
+COMMAND_LIST = ["INSERT", "MOVE", "REMOVE", "EDIT"]
+ELEMENT_TO_OP = ["ROW", "COLUMN", "CELL"]
+
+# determines if a type is correct for a specific value
+def cell_data_is_type(type_string, value):
+ # check params
+ if (type_string not in TYPE_INT_TO_STRING):
+ print("value check: type is not valid")
+ return False
+
+ # check value
+ if (type_string in ["LONG", "LONG_2"]): # LONG, LONG_2
+ if (type(value) != int or value < -0x7FFFFFFF or value > 0x7FFFFFFF):
+ print("value check: value is not a LONG/LONG_2 type")
+ return False
+ elif (type_string == "SHORT"): # SHORT
+ if (type(value) != int or value < -0x7FFF or value > 0x7FFF):
+ print("value check: value is not a SHORT type")
+ return False
+ elif (type_string == "CHAR"): # CHAR
+ if (type(value) != int or value < -0x7F or value > 0x7F):
+ print("value check: value is not a CHAR type")
+ return False
+ elif (type_string == "FLOAT"): # FLOAT
+ if (type(value) != float):
+ print("value check: value is not a FLOAT type")
+ return False
+ elif (type_string == "STRING"): # STRING
+ if (type(value) != str):
+ print("value check: value is not a STRING type")
+ return False
+ try:
+ enc = value.encode("cp932")
+ if (len(enc) >= 32):
+ print("value check: STRING type encoded representation larger than 32 bytes")
+ return False
+ except:
+ print("value check: STRING type cannot be encoded into CP932")
+ return False
+ elif (type_string == "STRING_OFFSET"): # STRING_OFFSET
+ if (type(value) != str):
+ print("value check: value is not a STRING_OFFSET type")
+ return False
+ try:
+ enc = value.encode("cp932")
+ except:
+ print("value check: STRING_OFFSET type cannot be encoded into CP932")
+ return False
+
+ # all good
+ return True
+
+# determines if a col_info list of values is valid
+def check_col_info_values(col_info_values):
+ # check params
+ if ((type(col_info_values) != list)
+ or (len(col_info_values) != 4)):
+ print("col info check: invalid col info value list")
+ return False
+
+ # hash or name
+ if (type(col_info_values[0]) != str):
+ print("col info check: name or hash is not a string")
+ return False
+ try:
+ col_info_values[0].encode("cp932")
+ if (col_info_values[0].upper().startswith("0X")):
+ number = int(col_info_values[0], 16)
+ if (number > 0xFFFFFFFF):
+ print("col info check: hash value larger than expected")
+ return False
+ except:
+ print("col info check: name is not CP932 encodable/hash cannot be interpreted as a hex string")
+ return False
+ # bitmask
+ if ((type(col_info_values[1]) != int) or (col_info_values[2] < 0) or (col_info_values[2] > 0xFFFFFFFF)):
+ print("col info check: invalid bitmask value")
+ return False
+ # rshift
+ if ((type(col_info_values[2]) != int) or (col_info_values[2] < 0) or (col_info_values[2] > 0xFF)):
+ print("col info check: invalid rshift value")
+ return False
+ # type
+ if (col_info_values[3] not in TYPE_INT_TO_STRING):
+ print("col info check: invalid type string value")
+ return False
+
+ # all good
+ return True
+
+# check a smg bcsv table command
+def check_table_cmd(table, operation, element, rest_of_values):
+ # check the table
+ if ("all good" not in check_smg_bcsv_table(table)): return False
+ # check the command, operation and element
+ if ((operation not in COMMAND_LIST) or (element not in ELEMENT_TO_OP)): return False
+ # rest_of_values_needed
+ if (type(rest_of_values) != list): return False
+
+ # insert a row at a specific index with some row values
+ if (operation == "INSERT" and element == "ROW"):
+ # rest of values checking
+ if (len(rest_of_values) != 2): return False
+ insert_index = rest_of_values[0]
+ insert_row_values = rest_of_values[1]
+ if (type(insert_index) != int or insert_index < 0 or insert_index > table.row_count): return False
+ if (type(insert_row_values) != list or len(insert_row_values) != table.col_count): return False
+ # check if the elements on the row match the type of the column
+ for i in range(table.col_count):
+ if (cell_data_is_type(table.cols_info[i].type, insert_row_values[i]) == False): return False
+
+ # insert a column at a specific index with some col_info values and some column values
+ elif (operation == "INSERT" and element == "COLUMN"):
+ # rest of values checking
+ if (len(rest_of_values) != 3): return False
+ insert_index = rest_of_values[0]
+ insert_col_info = rest_of_values[1]
+ insert_col_values = rest_of_values[2]
+ if (type(insert_index) != int or insert_index < 0 or insert_index > table.col_count): return False
+ if (check_col_info_values(insert_col_info) == False): return False
+ if (type(insert_col_values) != list or len(insert_col_values) != table.row_count): return False
+ # check if the elements on the column match the type of the column
+ for i in range(table.row_count):
+ if (cell_data_is_type(insert_col_info[3], insert_col_values[i]) == False): return False
+
+ # remove a row from a specific index, specify the row values to be removed
+ elif (operation == "REMOVE" and element == "ROW"):
+ # rest of values checking
+ if (len(rest_of_values) != 2): return False
+ remove_index = rest_of_values[0]
+ remove_row_values = rest_of_values[1]
+ if (type(remove_index) != int or remove_index < 0 or remove_index >= table.row_count): return False
+ if (type(remove_row_values) != list or len(remove_row_values) != table.col_count): return False
+ # check if the elements on the row elements match the type of the column
+ for i in range(table.col_count):
+ if (cell_data_is_type(table.cols_info[i].type, remove_row_values[i]) == False): return False
+ # check if the row to remove values are equal to the values of the actual row going to be removed
+ if (remove_row_values != table.rows_data[remove_index]): return False
+
+ # remove a column from a specific index, specify the col_info and the column values to be removed
+ elif (operation == "REMOVE" and element == "COLUMN"):
+ # rest of values checking
+ if (len(rest_of_values) != 3): return False
+ remove_index = rest_of_values[0]
+ remove_col_info = rest_of_values[1]
+ remove_col_values = rest_of_values[2]
+ if (type(remove_index) != int or remove_index < 0 or remove_index >= table.col_count): return False
+ if (check_col_info_values(remove_col_info) == False): return False
+ # check if the col_info values to remove are the same as the col_info to remove
+ if ((remove_col_info[0] != table.cols_info[remove_index].name_or_hash)
+ or (remove_col_info[1] != table.cols_info[remove_index].bitmask)
+ or (remove_col_info[2] != table.cols_info[remove_index].rshift)
+ or (remove_col_info[3] != table.cols_info[remove_index].type)): return False
+ if (type(remove_col_values) != list or len(remove_col_values) != table.row_count): return False
+ # check if the elements on the column match the type of the column
+ for i in range(table.row_count):
+ if (cell_data_is_type(remove_col_info[3], remove_col_values[i]) == False): return False
+
+ # move a row from an index to another index
+ elif(operation == "MOVE" and element == "ROW"):
+ # rest of values checking
+ if (len(rest_of_values) != 2): return False
+ old_index = rest_of_values[0]
+ new_index = rest_of_values[1]
+ if (type(old_index) != int or old_index < 0 or old_index >= table.row_count): return False
+ if (type(new_index) != int or new_index < 0 or new_index >= table.row_count): return False
+
+ # move a column from an index to another index
+ elif(operation == "MOVE" and element == "COLUMN"):
+ # rest of values checking
+ if (len(rest_of_values) != 2): return False
+ old_index = rest_of_values[0]
+ new_index = rest_of_values[1]
+ if (type(old_index) != int or old_index < 0 or old_index >= table.col_count): return False
+ if (type(new_index) != int or new_index < 0 or new_index >= table.col_count): return False
+
+ # edit
+ elif (operation == "EDIT" and element == "CELL"):
+ # rest_of_values check
+ if (len(rest_of_values) < 5): return False
+ data_path = rest_of_values[0]
+ if (data_path not in ["cols_info", "rows_data"]): return False
+ # rows_data
+ if (data_path == "rows_data"):
+ if (len(rest_of_values) != 5): return False
+ row_index = rest_of_values[1]
+ col_index = rest_of_values[2]
+ if (type(row_index) != int or row_index < 0 or row_index >= table.row_count): return False
+ if (type(col_index) != int or col_index < 0 or col_index >= table.col_count): return False
+ old_value = rest_of_values[3]
+ new_value = rest_of_values[4]
+ if (old_value != table.row_data[row_index][col_index]): return False
+ if (cell_data_is_type(table.cols_info[col_index].type, new_value) == False): return False
+ # cols_info
+ elif (data_path == "cols_info"):
+ col_index = rest_of_values[1]
+ if (type(col_index) != int or col_index < 0 or col_index >= table.col_count): return False
+ inner_data_path = rest_of_values[2]
+ if (inner_data_path not in ["name_or_hash", "bitmask", "rshift", "type"]): return False
+ # type
+ if (inner_data_path == "type"):
+ if (len(rest_of_values) != 7): return False
+ old_value = rest_of_values[3]
+ new_value = rest_of_values[4]
+ if (old_value != table.cols_info[col_index].type): return False
+ if (new_value not in TYPE_INT_TO_STRING): return False
+ # reuse this same function cleverly (recursive lets goooo)
+ tmp = table.cols_info[col_index]
+ tmp_col_info = [tmp.name_or_hash, tmp.bitmask, tmp.rshift, tmp.type]
+ old_column_values = rest_of_values[5]
+ tmp_rest_of_values = [col_index, tmp_col_info, old_column_values]
+ if (check_table_cmd("REMOVE", "COLUMN", tmp_rest_of_values) == False): return False
+ tmp_col_info[3] = new_value
+ new_column_values = rest_of_values[6]
+ tmp_rest_of_values[3] = new_column_values
+ if (check_table_cmd("INSERT", "COLUMN", tmp_rest_of_values) == False): return False
+ # name_or_hash, bitmask, rshift
+ else:
+ if (len(rest_of_values) != 5): return False
+ # reuse check_col_info_values()
+ old_value = rest_of_values[3]
+ new_value = rest_of_values[4]
+ if (old_value != eval("table.cols_info[col_index].%s" % (inner_data_path))): return False
+ if (inner_data_path == "name_or_hash"):
+ if (check_col_info_values([new_value, 0, 0, "LONG"]) == False): return False
+ elif (inner_data_path == "bitmask"):
+ if (check_col_info_values(["a", new_value, 0, "LONG"]) == False): return False
+ elif (inner_data_path == "rshift"):
+ if (check_col_info_values(["a", 0, new_value, "LONG"]) == False): return False
+
+ # all good
+ return True
+
+# execute a table command
+def exec_table_cmd(table, operation, element, rest_of_values):
+ # check the command
+ if (check_table_cmd(table, operation, element, rest_of_values) == False):
+ return None
+
+ # execute the operation on the table
+ if (operation == "INSERT"):
+ insert_index = rest_of_values[0]
+ # insert a row
+ if (element == "ROW"):
+ table.row_count += 1
+ insert_list = rest_of_values[1]
+ # rows_data
+ table.rows_data = table.rows_data[ : insert_index] + [insert_list] + table.rows_data[insert_index : ]
+ # insert a column
+ elif (element == "COLUMN"):
+ table.col_count += 1
+ insert_col_info = smg_bcsv_table.cols_info()
+ insert_col_info.name_or_hash = rest_of_values[1][0]
+ insert_col_info.bitmask = rest_of_values[1][1]
+ insert_col_info.rshift = rest_of_values[1][2]
+ insert_col_info.type = rest_of_values[1][3]
+ insert_col_values = rest_of_values[2]
+ # col_info
+ table.cols_info = (table.cols_info[ : insert_index]
+ + [insert_col_info]
+ + table.cols_info[insert_index : ])
+ # rows_data
+ for i in range(table.row_count):
+ table.rows_data[i] = (table.rows_data[i][ : insert_index]
+ + [insert_col_values[i]]
+ + table.rows_data[i][insert_index : ])
+ elif (operation == "MOVE"):
+ # indexes
+ old_index = rest_of_values[0]
+ new_index = rest_of_values[1]
+ # move a row
+ if (element == "ROW"):
+ # rows_data
+ to_move = table.rows_data[old_index]
+ table.rows_data = table.rows_data[ : old_index] + table.rows_data[old_index + 1 : ]
+ table.rows_data = table.rows_data[ : new_index] + [to_move] + table.rows_data[new_index : ]
+ # move a column
+ elif (element == "COLUMN"):
+ # cols_info
+ col_info_to_move = table.cols_info[old_index]
+ table.cols_info = table.cols_info[ : old_index] + table.cols_info[old_index + 1 : ]
+ table.cols_info = table.cols_info[ : new_index] + [col_info_to_move] + table.cols_info[new_index : ]
+ # row_data
+ for i in range(table.row_count):
+ value_to_move = table.rows_data[i][old_index]
+ table.rows_data[i] = table.rows_data[i][ : old_index] + table.rows_data[i][old_index + 1 : ]
+ table.rows_data[i] = table.rows_data[i][ : new_index] + [value_to_move] + table.rows_data[i][new_index : ]
+ elif (operation == "REMOVE"):
+ remove_index = rest_of_values[0]
+ # remove a row
+ if (element == "ROW"):
+ table.row_count -= 1
+ # rows_data
+ table.rows_data = table.rows_data[ : remove_index] + table.rows_data[remove_index + 1 : ]
+ # remove a column
+ elif (element == "COLUMN"):
+ table.col_count -= 1
+ # cols_info
+ table.cols_info = table.cols_info[ : remove_index] + table.cols_info[remove_index + 1: ]
+ # row_data
+ for i in range(table.row_count):
+ table.rows_data[i] = table.rows_data[i][ : remove_index] + table.rows_data[i][remove_index + 1 : ]
+ elif (operation == "EDIT"):
+ # edit a cell
+ if (element == "CELL"):
+ data_path = rest_of_values[0]
+ # rows_data cell
+ if (data_path == "rows_data"):
+ row_index = rest_of_values[1]
+ col_index = rest_of_values[2]
+ table.rows_data[row_index][col_index] = rest_of_values[4]
+ # cols_info cell
+ elif (data_path == "cols_info"):
+ col_index = rest_of_values[1]
+ inner_data_path = rest_of_values[2]
+ # type
+ if (inner_data_path == "type"):
+ table.cols_info[col_index].type = rest_of_values[4]
+ for i in range(table.row_count):
+ table.rows_data[i][col_index] = rest_of_values[6][i]
+ # name_or_hash, bitmask, rshift
+ else:
+ if (inner_data_path == "name_or_hash"): table.cols_info[col_index].name_or_hash = rest_of_values[4]
+ elif (inner_data_path == "bitmask"): table.cols_info[col_index].bitmask = rest_of_values[4]
+ elif (inner_data_path == "rshift"): table.cols_info[col_index].rshift = rest_of_values[4]
+
+ # all good, return the command list
+ return [operation, element, rest_of_values]
+
+# assign a table reference values to another table reference
+def assign_table_values(src, dest):
+ # src must be valid
+ if ("all good" not in check_smg_bcsv_table(src)
+ or type(dest) != smg_bcsv_table
+ or type(dest.cols_info) != list
+ or type(dest.rows_data) != list):
+ return False
+
+ # assign the values
+ dest.row_count = src.row_count
+ dest.col_count = src.col_count
+ dest.cols_info.clear()
+ dest.rows_data.clear()
+ for i in range(src.col_count):
+ dest.cols_info.append(copy.deepcopy(src.cols_info[i]))
+ for i in range(src.row_count):
+ dest.rows_data.append(src.rows_data[i].copy())
+
+ # done!
+ return True
diff --git a/bcsv_hashes.txt b/bcsv_hashes.txt
new file mode 100644
index 0000000..56926e4
--- /dev/null
+++ b/bcsv_hashes.txt
@@ -0,0 +1,72 @@
+# known BSCV hashes used in SMG (only the ones I've seen)
+# character encoding used in the file is CP932
+# "‚‹‚š_KoopaPannel" should look similar to "kz_KoopaPannel"
+# hashes in here are ordered alphabetically
+#
+BloomFlag 0xC0029FEF
+CameraSetId 0xDD7658D8
+Camera_through 0xB506CBCB
+CastId 0x77E19CDA
+ClippingGroupId 0x4B5830B8
+CommonPath_ID 0x4B700AEA
+DemoGroupId 0x8E34C877
+Floor_code 0x1B5BC660
+ForceLowScenarioName0 0x2253AB4C
+ForceLowScenarioName1 0x2253AB4D
+ForceLowScenarioName2 0x2253AB4E
+ForceLowScenarioName3 0x2253AB4F
+ForceLowScenarioName4 0x2253AB50
+ForceLowScenarioName5 0x2253AB51
+ForceLowScenarioName6 0x2253AB52
+ForceLowScenarioName7 0x2253AB53
+GroupId 0x74B5F3DA
+IndirectFlag 0x393DE9FA
+LowFlag 0x7871E4C0
+MapParts_ID 0x81497C36
+MessageId 0x219D4362
+MiddleFlag 0x460956C1
+Obj_arg0 0x08E9C302
+Obj_arg1 0x08E9C303
+Obj_arg2 0x08E9C304
+Obj_arg3 0x08E9C305
+Obj_arg4 0x08E9C306
+Obj_arg5 0x08E9C307
+Obj_arg6 0x08E9C308
+Obj_arg7 0x08E9C309
+ParentID 0x49E5F365
+PlanetName 0xADF2B063
+SW_A 0x00270D26
+SW_APPEAR 0x749DFBD0
+SW_B 0x00270D27
+SW_DEAD 0xC075815F
+SW_SLEEP 0x4F11491C
+ShapeModelNo 0x1176D409
+Sound_code 0x6260CB3D
+ViewGroupId 0x74550D75
+Wall_code 0xCE698322
+WaterFlag 0x6AA7F503
+ZoneName 0x3666C077
+camera_id 0xEB9DA075
+default 0x5C13D641
+dir_x 0x05B2A146
+dir_y 0x05B2A147
+dir_z 0x05B2A148
+dx 0x00000C94
+dy 0x00000C95
+dz 0x00000C96
+l_id 0x003289CE
+mass 0x003306F4
+name 0x00337A8B
+pos_x 0x065E794D
+pos_y 0x065E794E
+pos_z 0x065E794F
+priority 0xBA8879A4
+radius 0xC80E6C92
+range 0x0674393D
+scale_x 0x71E5EAC3
+scale_y 0x71E5EAC4
+scale_z 0x71E5EAC5
+tx 0x00000E84
+ty 0x00000E85
+type 0x00368F3A
+tz 0x00000E86
diff --git a/blender_funcs.py b/blender_funcs.py
index a4a15aa..09e7db0 100644
--- a/blender_funcs.py
+++ b/blender_funcs.py
@@ -34,7 +34,7 @@ def disp_msg(string):
# select object and its children in the interaction mode specified
def select_obj(obj, recursive, interact_mode):
- # get the object scene
+ # get the scene
scene = obj.users_scene[0]
# make it the only object selected and active
@@ -46,13 +46,17 @@ def select_obj(obj, recursive, interact_mode):
# select the new object and set it to the selected mode 2 times lol
scene.objects.active = obj
obj.select = True
+ # unhide it if hidden
+ obj.hide = False
bpy.ops.object.mode_set(mode = interact_mode)
bpy.ops.object.mode_set(mode = interact_mode)
# select the children objects as well, if on object mode
+ # unhide them as well
if (recursive == True):
for child in obj.children:
child.select = True
+ obj.hide = False
# duplicate a selected object with its children objects
diff --git a/collada_superbmd_export.py b/collada_superbmd_export.py
index 2f92fa3..e574dc4 100644
--- a/collada_superbmd_export.py
+++ b/collada_superbmd_export.py
@@ -30,7 +30,9 @@ def write_bmd_bdl_collada(context, filepath, triangulate):
blender_funcs.select_obj(armature, False, "OBJECT")
# check if the armature contains only mesh objects inside
+ # unhide all objects
for child in armature.children:
+ child.hide = False
if (child.type != "MESH"):
blender_funcs.disp_msg("\"%s\": contains non-mesh object (%s)." % (armature.name, child.name))
return {"FINISHED"}
@@ -164,7 +166,7 @@ def write_bmd_bdl_collada(context, filepath, triangulate):
bpy.ops.wm.collada_export(filepath = filepath, use_blender_profile = False,
selected = True, include_children = True,
triangulate = triangulate)
-
+
# delete the duplicate object
blender_funcs.select_obj(armature, True, "OBJECT")
bpy.ops.object.delete(use_global = False)
diff --git a/file_ops.py b/file_ops.py
index bbad813..8c7178d 100644
--- a/file_ops.py
+++ b/file_ops.py
@@ -1,6 +1,6 @@
# my functions for file creation/deletion operations
# also for file path string stuff
-import os, shutil
+import os, shutil, io
# function to format a path string correctly:
# - no duplicate slashes
@@ -10,7 +10,7 @@ def get_path_str(path):
# check params
if (type(path) != str):
- return ""
+ return None
# else work on the string
rtn_str = ""
@@ -233,12 +233,19 @@ def rm_folder(path):
return True
# function to get the size of a file
-def get_file_size(path):
-
- # check params
- if (is_file(path) == False):
- return 0
- return os.path.getsize(get_path_str(path))
+def get_file_size(path_or_stream):
+ # check params and get the size if the
+ # thing is a file or a stream object
+ if (type(path_or_stream) == str and is_file(path_or_stream) == True):
+ return os.path.getsize(get_path_str(path_or_stream))
+ elif (type(path_or_stream) == io.BytesIO):
+ size = 0
+ path_or_stream.seek(0)
+ while (path_or_stream.read(1) != b""):
+ size += 1
+ path_or_stream.seek(0)
+ return size
+ return 0
# function to rename a folder or a file
def rename(current_name, new_name):
diff --git a/smg_common.py b/smg_common.py
index 4fa4017..be26960 100644
--- a/smg_common.py
+++ b/smg_common.py
@@ -5,6 +5,7 @@ class padding:
# the fill string
def __init__(self):
+ # use ascii string please
self.string = "This is padding data to alignme"
# return the padding string to align data at a
diff --git a/start.bat b/start.bat
new file mode 100644
index 0000000..a96bcd2
--- /dev/null
+++ b/start.bat
@@ -0,0 +1,95 @@
+@echo off
+setlocal EnableDelayedExpansion
+
+:: check if the script is at the correct place
+echo.
+if not exist ".\blender.exe" (
+ echo ERROR:
+ echo Script is not located at the correct position...
+ echo Put it in the same folder as the "blender.exe" file.
+ echo.
+ pause
+ exit
+)
+
+:: check blender's python version
+if exist ".\2.79\python" (
+ for /f "delims=" %%i in ('.\2.79\python\bin\python.exe -c "import sys; print(sys.version[:3])"') do set blender_python_version=%%i
+)
+if exist ".\2.79\python_original" (
+ for /f "delims=" %%i in ('.\2.79\python_original\bin\python.exe -c "import sys; print(sys.version[:3])"') do set blender_python_version=%%i
+)
+echo.
+echo Blender Python version: %blender_python_version%
+
+:: check if the manually installed version exists
+
+:: Python 3.5
+echo.
+if "%blender_python_version%" == "3.5" (
+ py -3.5 --version
+ if !errorlevel! geq 1 (
+ echo.
+ echo ERROR:
+ echo Python 3.5 is not installed...
+ echo Be sure to add it to PATH.
+ echo.
+ pause
+ exit
+ )
+ goto all_is_setup
+)
+:: Python 3.7
+if "%blender_python_version%" == "3.7" (
+ py -3.7 --version
+ if !errorlevel! geq 1 (
+ echo.
+ echo ERROR:
+ echo Python 3.7 is not installed...
+ echo Be sure to add it to PATH.
+ echo.
+ pause
+ exit
+ )
+ goto all_is_setup
+)
+echo.
+echo ERROR:
+echo Blender has different Python version as the one expected.
+echo.
+exit
+
+:: surely nothing else can go wrong
+:all_is_setup
+echo Correct Python version found.
+
+:: will rename the original python folder to python_original
+echo Renaming some files...
+ren .\2.79\python python_original
+:: will also rename the python35.dll/python37.dll
+if "%blender_python_version%" == "3.5" (
+ ren python35.dll python35_original.dll
+)
+if "%blender_python_version%" == "3.7" (
+ ren python37.dll python37_original.dll
+)
+
+:: set the BLENDER_SYSTEM_PYTHON variable and get the python interpreter path
+for /f "delims=" %%i in ('py -%blender_python_version% -c "import sys; print(sys.exec_prefix)"') do set python_base_path=%%i
+for /f "delims=" %%i in ('py -%blender_python_version% -c "import sys; print(sys.executable)"') do set python_interpreter_path=%%i
+set BLENDER_SYSTEM_PYTHON=%python_base_path%
+echo %BLENDER_SYSTEM_PYTHON%
+:: run Blender sending as first argument the python interpreter path
+blender.exe -- %python_interpreter_path%
+
+:: will unrename the files renamed
+:: so that blender can be used without Blenxy as well
+echo Unrenaming renamed files...
+ren .\2.79\python_original python
+if "%blender_python_version%" == "3.5" (
+ ren python35_original.dll python35.dll
+)
+if "%blender_python_version%" == "3.7" (
+ ren python37_original.dll python37.dll
+)
+pause
diff --git a/start.sh b/start.sh
new file mode 100755
index 0000000..c8c83c5
--- /dev/null
+++ b/start.sh
@@ -0,0 +1,10 @@
+# file to be put where the "blender" binary file is located
+# this file will basically call blender specifying the
+# external python install that is expected to be ran with
+# path must be the python's interpreter path
+
+PYTHON_INTERPRETER_PATH=/YOUR/ABSOLUTE/PYTHON/INTERPRETER/PATH
+echo $PYTHON_INTERPRETER_PATH
+BLENDER_SYSTEM_PYTHON=$($PYTHON_INTERPRETER_PATH -c 'import sys; print(sys.exec_prefix)')
+echo $BLENDER_SYSTEM_PYTHON
+BLENDER_SYSTEM_PYTHON=$BLENDER_SYSTEM_PYTHON ./blender -- "$PYTHON_INTERPRETER_PATH"