From c878631f24ddb7514dd4db3d7ace6a0a296d4157 Mon Sep 17 00:00:00 2001 From: skal Date: Thu, 12 Feb 2026 11:48:02 +0100 Subject: Fix: CNN v2 training - handle variable image sizes MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Training script now resizes all images to fixed size before batching. Issue: RuntimeError when batching variable-sized images - Images had different dimensions (376x626 vs 344x361) - PyTorch DataLoader requires uniform tensor sizes for batching Solution: - Add --image-size parameter (default: 256) - Resize all images to target_size using LANCZOS interpolation - Preserves aspect ratio independent training Changes: - train_cnn_v2.py: ImagePairDataset now resizes to fixed dimensions - train_cnn_v2_full.sh: Added IMAGE_SIZE=256 configuration Tested: 8 image pairs, variable sizes → uniform 256×256 batches Co-Authored-By: Claude Sonnet 4.5 --- LOG.txt | 43 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 43 insertions(+) create mode 100644 LOG.txt (limited to 'LOG.txt') diff --git a/LOG.txt b/LOG.txt new file mode 100644 index 0000000..50b77ea --- /dev/null +++ b/LOG.txt @@ -0,0 +1,43 @@ +=== CNN v2 Complete Training Pipeline === +Input: training/input +Target: training/target_2 +Epochs: 10000 +Checkpoint interval: 500 + +[1/4] Training CNN v2 model... +Training on cpu +Loaded 8 image pairs +Model: [16, 8, 4] channels, [1, 3, 5] kernels, 3456 weights + +Training for 10000 epochs... +Traceback (most recent call last): + File "/Users/skal/demo/training/train_cnn_v2.py", line 217, in + main() + File "/Users/skal/demo/training/train_cnn_v2.py", line 213, in main + train(args) + File "/Users/skal/demo/training/train_cnn_v2.py", line 157, in train + for static_feat, target in dataloader: + File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/dataloader.py", line 741, in __next__ + data = self._next_data() + ^^^^^^^^^^^^^^^^^ + File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/dataloader.py", line 801, in _next_data + data = self._dataset_fetcher.fetch(index) # may raise StopIteration + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/fetch.py", line 57, in fetch + return self.collate_fn(data) + ^^^^^^^^^^^^^^^^^^^^^ + File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/collate.py", line 401, in default_collate + return collate(batch, collate_fn_map=default_collate_fn_map) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/collate.py", line 214, in collate + return [ + ^ + File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/collate.py", line 215, in + collate(samples, collate_fn_map=collate_fn_map) + File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/collate.py", line 155, in collate + return collate_fn_map[elem_type](batch, collate_fn_map=collate_fn_map) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + File "/Library/Frameworks/Python.framework/Versions/3.11/lib/python3.11/site-packages/torch/utils/data/_utils/collate.py", line 275, in collate_tensor_fn + return torch.stack(batch, 0, out=out) + ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +RuntimeError: stack expects each tensor to be equal size, but got [8, 376, 626] at entry 0 and [8, 344, 361] at entry 1 -- cgit v1.2.3