summaryrefslogtreecommitdiff
path: root/training/export_cnn_v2_weights.py
diff options
context:
space:
mode:
authorskal <pascal.massimino@gmail.com>2026-02-13 12:41:35 +0100
committerskal <pascal.massimino@gmail.com>2026-02-13 12:41:35 +0100
commita101d76e3eab4ee4d93357d71e2c7d4e0114f56f (patch)
tree54fa8bcb50bfcfa401fb58fcb2c6ad69c54a6788 /training/export_cnn_v2_weights.py
parent561d1dc446db7d1d3e02b92b43abedf1a5017850 (diff)
CNN v2: Restore per-layer kernel sizes support
Training: - train_cnn_v2.py: Accept --kernel-sizes as comma-separated list - CNNv2 model: Per-layer kernel sizes (e.g., [1,3,5]) - Single value replicates across layers (e.g., "3" → [3,3,3]) Export: - export_cnn_v2_weights.py: Backward compatible with old checkpoints - Handles both kernel_size (old) and kernel_sizes (new) format Documentation: - CNN_V2.md: Updated code examples and config format - HOWTO.md: Updated training examples to show comma-separated syntax Binary format: Already supports per-layer kernel sizes (no changes) Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
Diffstat (limited to 'training/export_cnn_v2_weights.py')
-rwxr-xr-xtraining/export_cnn_v2_weights.py18
1 files changed, 14 insertions, 4 deletions
diff --git a/training/export_cnn_v2_weights.py b/training/export_cnn_v2_weights.py
index 07254fc..bbe94dd 100755
--- a/training/export_cnn_v2_weights.py
+++ b/training/export_cnn_v2_weights.py
@@ -45,11 +45,20 @@ def export_weights_binary(checkpoint_path, output_path):
state_dict = checkpoint['model_state_dict']
config = checkpoint['config']
- kernel_size = config.get('kernel_size', 3)
- num_layers = config.get('num_layers', 3)
+ # Support both old (kernel_size) and new (kernel_sizes) format
+ if 'kernel_sizes' in config:
+ kernel_sizes = config['kernel_sizes']
+ elif 'kernel_size' in config:
+ kernel_size = config['kernel_size']
+ num_layers = config.get('num_layers', 3)
+ kernel_sizes = [kernel_size] * num_layers
+ else:
+ kernel_sizes = [3, 3, 3] # fallback
+
+ num_layers = config.get('num_layers', len(kernel_sizes))
print(f"Configuration:")
- print(f" Kernel size: {kernel_size}×{kernel_size}")
+ print(f" Kernel sizes: {kernel_sizes}")
print(f" Layers: {num_layers}")
print(f" Architecture: uniform 12D→4D (bias=False)")
@@ -65,6 +74,7 @@ def export_weights_binary(checkpoint_path, output_path):
layer_weights = state_dict[layer_key].detach().numpy()
layer_flat = layer_weights.flatten()
+ kernel_size = kernel_sizes[i]
layers.append({
'kernel_size': kernel_size,
@@ -76,7 +86,7 @@ def export_weights_binary(checkpoint_path, output_path):
all_weights.extend(layer_flat)
weight_offset += len(layer_flat)
- print(f" Layer {i}: 12D→4D, {len(layer_flat)} weights")
+ print(f" Layer {i}: 12D→4D, {kernel_size}×{kernel_size}, {len(layer_flat)} weights")
# Convert to f16
# TODO: Use 8-bit quantization for 2× size reduction