Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 32 additions & 0 deletions faststack/ChangeLog.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,39 @@
# ChangeLog

## [0.5.0] - 2025-11-03

### Added
- Load full-resolution images when zooming in for maximum detail.
- Call Helicon Focus for each defined stack when multiple stacks are present.

### Changed
- The filesystem watcher is now less sensitive to spurious modification events, reducing unnecessary refreshes.
- The preloading process now shares the same thread pool as the prefetcher for better resource utilization.
- Stacks are now cleared automatically after being sent to Helicon Focus.

### Fixed
- Corrected a `ValueError` in `PyTurboJPEG` caused by unsupported scaling factors.
- Resolved an `AttributeError` in the JPEG scaling factor calculation.
- Fixed an issue where panning the image was not working correctly.
- Addressed a bug where panning speed was incorrect at high zoom levels.
- Ensured that stale prefetcher futures are cancelled when the display size changes.

### Performance
- Improved image decoding performance by using `PyTurboJPEG` for resized decoding.
- Tuned the number of prefetcher thread pool workers based on system CPU cores.
- Replaced synchronous file reads with memory-mapped I/O for faster image loading.
- Optimized image resizing by using `BILINEAR` resampling for large downscales.
- Debounced display size change notifications to reduce redundant UI updates.

## Version 0.4

### Todo

Make it use the full res image when zooming in
When multiple stacks are selected, call Helicon multiple times
After Helicon is called, clear the stacks
Fix S key - I guess it should remove an image from the stack? Clarify what it does now.

### New Features
- **Two-tier caching system:** Implemented a two-tier caching system to prefetch display-sized images, significantly improving performance and reducing GPU memory usage.
- **"Preload All Images" feature:** Added a new menu option under "Actions" to preload all images in the current directory into the cache, ensuring quick access even for unviewed images.
Expand Down
133 changes: 67 additions & 66 deletions faststack/faststack/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ def __init__(self, image_dir: Path, engine: QQmlApplicationEngine):
self.display_width = 0
self.display_height = 0
self.display_generation = 0
self.is_zoomed = False

# -- Backend Components --
self.watcher = Watcher(self.image_dir, self.refresh_image_list)
Expand All @@ -61,7 +62,6 @@ def __init__(self, image_dir: Path, engine: QQmlApplicationEngine):
prefetch_radius=config.getint('core', 'prefetch_radius', 4),
get_display_info=self.get_display_info
)
self.preload_executor = concurrent.futures.ThreadPoolExecutor(max_workers=1, thread_name_prefix="PreloadAll")

# -- UI State --
self.ui_state = UIState(self)
Expand All @@ -73,9 +73,8 @@ def __init__(self, image_dir: Path, engine: QQmlApplicationEngine):
self.selected_raws: set[Path] = set()

def get_display_info(self):
return self.display_width, self.display_height, self.display_generation

def get_display_info(self):
if self.is_zoomed:
return 0, 0, self.display_generation
return self.display_width, self.display_height, self.display_generation

def on_display_size_changed(self, width: int, height: int):
Expand All @@ -87,9 +86,22 @@ def on_display_size_changed(self, width: int, height: int):
self.display_height = height
self.display_generation += 1
self.image_cache.clear()
self.prefetcher.cancel_all() # Clear existing prefetch tasks
self.prefetcher.update_prefetch(self.current_index)
self.sync_ui_state() # To refresh the image

def set_zoomed(self, zoomed: bool):
if self.is_zoomed == zoomed:
return
self.is_zoomed = zoomed
log.info(f"Zoom state changed to: {zoomed}")
self.display_generation += 1 # Invalidate cache
self.image_cache.clear()
self.prefetcher.cancel_all()
self.prefetcher.update_prefetch(self.current_index)
self.sync_ui_state()
self.ui_state.isZoomedChanged.emit()

def eventFilter(self, watched: QObject, event: QEvent) -> bool:
if watched == self.main_window and event.type() == QEvent.Type.KeyPress:
handled = self.keybinder.handle_key_press(event)
Expand Down Expand Up @@ -246,47 +258,53 @@ def toggle_selection(self):

def launch_helicon(self):
"""Launches Helicon Focus with selected RAWs or all RAWs in defined stacks."""
raw_files_to_process = []
if self.selected_raws:
log.info(f"Launching Helicon with {len(self.selected_raws)} selected RAW files.")
raw_files_to_process.extend(sorted(list(self.selected_raws))) # Sort for consistent order
self._launch_helicon_with_files(sorted(list(self.selected_raws)))
self.selected_raws.clear()

elif self.stacks:
log.info("No selection, launching Helicon with all defined stacks.")
log.info(f"Launching Helicon for {len(self.stacks)} defined stacks.")
for start, end in self.stacks:
raw_files_to_process = []
for idx in range(start, end + 1):
if idx < len(self.image_files) and self.image_files[idx].raw_pair:
raw_files_to_process.append(self.image_files[idx].raw_pair)

if raw_files_to_process:
self._launch_helicon_with_files(raw_files_to_process)
else:
log.warning(f"No valid RAW files found for stack [{start}, {end}].")

self.clear_all_stacks()

else:
log.warning("No selection or stacks defined to launch Helicon Focus.")
return

if raw_files_to_process:
log.info(f"Launching Helicon Focus with {len(raw_files_to_process)} RAW files.")
# Remove duplicates that might arise from stacks
unique_raw_files = sorted(list(set(raw_files_to_process)))
success, tmp_path = launch_helicon_focus(unique_raw_files)
if success and tmp_path:
# Schedule delayed deletion of the temporary file
QTimer.singleShot(5000, lambda: self._delete_temp_file(tmp_path))

# Record stacking metadata
today = date.today().isoformat()
for raw_path in unique_raw_files:
# Find the corresponding image file to get the stem
for img_file in self.image_files:
if img_file.raw_pair == raw_path:
stem = img_file.path.stem
meta = self.sidecar.get_metadata(stem)
meta.stacked = True
meta.stacked_date = today
break
self.sidecar.save()

# Clear selection after launching
self.selected_raws.clear()
self.sync_ui_state()
else:
log.warning("No valid RAW files found to launch Helicon.")
self.sync_ui_state()

def _launch_helicon_with_files(self, raw_files: List[Path]):
"""Helper to launch Helicon with a specific list of files."""
log.info(f"Launching Helicon Focus with {len(raw_files)} RAW files.")
unique_raw_files = sorted(list(set(raw_files)))
success, tmp_path = launch_helicon_focus(unique_raw_files)
if success and tmp_path:
# Schedule delayed deletion of the temporary file
QTimer.singleShot(5000, lambda: self._delete_temp_file(tmp_path))

# Record stacking metadata
today = date.today().isoformat()
for raw_path in unique_raw_files:
# Find the corresponding image file to get the stem
for img_file in self.image_files:
if img_file.raw_pair == raw_path:
stem = img_file.path.stem
meta = self.sidecar.get_metadata(stem)
meta.stacked = True
meta.stacked_date = today
break
self.sidecar.save()

def _delete_temp_file(self, tmp_path: Path):
if tmp_path.exists():
Expand Down Expand Up @@ -371,38 +389,22 @@ def preload_all_images(self):
self.reporter.progress_updated.connect(self._update_preload_progress)
self.reporter.finished.connect(self._finish_preloading)

def _preload_and_report_progress():
log.info(f"Preloading images.")

futures = []
for i in range(len(self.image_files)):
future = self.prefetcher.submit_task(i, self.prefetcher.generation)
if future:
futures.append(future)

num_futures = len(futures)
if num_futures == 0:
# Use existing prefetch executor (better resource utilization)
total = len(self.image_files)
completed = 0

def _on_done(future):
nonlocal completed
completed += 1
progress = int((completed / total) * 100)
self.reporter.progress_updated.emit(progress)
if completed == total:
self.reporter.finished.emit()
return

log.info(f"Submitted {num_futures} preloading tasks.")
completed_count = 0
lock = threading.Lock()

def _on_future_done(future):
nonlocal completed_count
with lock:
completed_count += 1
progress = int((completed_count / num_futures) * 100)
self.reporter.progress_updated.emit(progress)

if completed_count == num_futures:
self.reporter.finished.emit()

for future in futures:
future.add_done_callback(_on_future_done)

self.preload_executor.submit(_preload_and_report_progress)

for i in range(total):
future = self.prefetcher.submit_task(i, self.prefetcher.generation)
if future:
future.add_done_callback(_on_done)

Comment thread
AlanRockefeller marked this conversation as resolved.
def _update_preload_progress(self, progress: int):
log.debug(f"Updating preload progress in UI: {progress}%")
Expand All @@ -422,7 +424,6 @@ def shutdown(self):

self.watcher.stop()
self.prefetcher.shutdown()
self.preload_executor.shutdown(wait=False)
self.sidecar.set_last_index(self.current_index)
self.sidecar.save()

Expand Down
71 changes: 58 additions & 13 deletions faststack/faststack/imaging/jpeg.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def decode_jpeg_rgb(jpeg_bytes: bytes) -> Optional[np.ndarray]:
# The flags prevent upsampling of chroma channels, which is faster.
return jpeg_decoder.decode(jpeg_bytes, pixel_format=TJPF_RGB, flags=TJFLAG_FASTDCT)
except Exception as e:
log.error(f"PyTurboJPEG failed to decode image: {e}. Trying Pillow.")
log.exception(f"PyTurboJPEG failed to decode image: {e}. Trying Pillow.")
# Fall through to Pillow fallback

# Fallback to Pillow
Expand All @@ -35,7 +35,7 @@ def decode_jpeg_rgb(jpeg_bytes: bytes) -> Optional[np.ndarray]:
img = Image.open(BytesIO(jpeg_bytes)).convert("RGB")
return np.array(img)
except Exception as e:
log.error(f"Pillow also failed to decode image: {e}")
log.exception(f"Pillow also failed to decode image: {e}")
return None

def decode_jpeg_thumb_rgb(
Expand All @@ -53,7 +53,7 @@ def decode_jpeg_thumb_rgb(

return jpeg_decoder.decode(jpeg_bytes, scaling_factor=scaling_factor, pixel_format=TJPF_RGB, flags=TJFLAG_FASTDCT)
except Exception as e:
log.error(f"PyTurboJPEG failed to decode thumbnail: {e}. Trying Pillow.")
log.exception(f"PyTurboJPEG failed to decode thumbnail: {e}. Trying Pillow.")

# Fallback to Pillow
try:
Expand All @@ -62,32 +62,77 @@ def decode_jpeg_thumb_rgb(
img.thumbnail((max_dim, max_dim))
return np.array(img.convert("RGB"))
except Exception as e:
log.error(f"Pillow also failed to decode thumbnail: {e}")
log.exception(f"Pillow also failed to decode thumbnail: {e}")
return None

def _get_turbojpeg_scaling_factor(width: int, height: int, max_dim: int) -> Optional[Tuple[int, int]]:
"""Finds the best libjpeg-turbo scaling factor to get a thumbnail <= max_dim."""
# libjpeg-turbo supports scaling factors of N/8 for N in [1, 16]
for n in range(8, 0, -1):
if (width * n / 8) <= max_dim and (height * n / 8) <= max_dim:
return (n, 8)
return None # Should not happen if max_dim is reasonable
if not TURBO_AVAILABLE or not jpeg_decoder:
return None

# PyTurboJPEG provides a set of supported scaling factors
supported_factors = sorted(
jpeg_decoder.scaling_factors,
key=lambda x: x[0] / x[1],
reverse=True,
)

for num, den in supported_factors:
if (width * num / den) <= max_dim and (height * num / den) <= max_dim:
return (num, den)

# If no suitable factor is found, return the smallest one
return supported_factors[-1] if supported_factors else None


def decode_jpeg_resized(
jpeg_bytes: bytes, width: int, height: int
) -> Optional[np.ndarray]:
"""Decodes and resizes a JPEG to fit within the given dimensions."""
if width == 0 or height == 0:
# Fallback to full decode if size is not specified
return decode_jpeg_rgb(jpeg_bytes)

if TURBO_AVAILABLE and jpeg_decoder:
try:
# Get image header to determine dimensions
img_width, img_height, _, _ = jpeg_decoder.decode_header(jpeg_bytes)

# Calculate best scaling factor for TurboJPEG (supports 1/8, 1/4, 1/2, etc.)
scale_factor = _get_turbojpeg_scaling_factor(img_width, img_height, max(width, height))

if scale_factor:
decoded = jpeg_decoder.decode(
jpeg_bytes,
scaling_factor=scale_factor,
pixel_format=TJPF_RGB,
flags=TJFLAG_FASTDCT
)

# Only use Pillow for final resize if needed
if decoded.shape[0] > height or decoded.shape[1] > width:
from io import BytesIO
img = Image.fromarray(decoded)
img.thumbnail((width, height), Image.Resampling.LANCZOS)
return np.array(img)
return decoded
except Exception as e:
log.exception(f"PyTurboJPEG failed: {e}")

# Fallback to Pillow (existing code)
try:
from io import BytesIO

img = Image.open(BytesIO(jpeg_bytes))
img.thumbnail((width, height), Image.Resampling.LANCZOS) # High quality downsampling

scale_factor_ratio = min(img.width / width, img.height / height)

# Use faster BILINEAR for large downscales, LANCZOS for smaller
if scale_factor_ratio > 4:
resampling = Image.Resampling.BILINEAR # Much faster
else:
resampling = Image.Resampling.LANCZOS # Higher quality

img.thumbnail((width, height), resampling)
return np.array(img.convert("RGB"))
except Exception as e:
log.error(f"Pillow failed to decode and resize image: {e}")
log.exception(f"Pillow failed to decode and resize image: {e}")
return None
11 changes: 9 additions & 2 deletions faststack/faststack/imaging/prefetch.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
import os
from concurrent.futures import ThreadPoolExecutor, Future
from typing import List, Dict, Optional, Callable
import mmap

from faststack.models import ImageFile, DecodedImage
from faststack.imaging.jpeg import decode_jpeg_rgb, decode_jpeg_resized
Expand All @@ -16,8 +17,12 @@ def __init__(self, image_files: List[ImageFile], cache_put: Callable, prefetch_r
self.cache_put = cache_put
self.prefetch_radius = prefetch_radius
self.get_display_info = get_display_info
# Use CPU count for I/O-bound JPEG decoding
# Rule of thumb: 2x CPU cores for I/O bound, 1x for CPU bound
optimal_workers = min((os.cpu_count() or 1) * 2, 8) # Cap at 8

self.executor = ThreadPoolExecutor(
max_workers=min(4, os.cpu_count() or 1),
max_workers=optimal_workers,
thread_name_prefix="Prefetcher"
)
self.futures: Dict[int, Future] = {}
Expand Down Expand Up @@ -72,8 +77,10 @@ def _decode_and_cache(self, image_file: ImageFile, index: int, generation: int,
return None

try:
# Memory-mapped file reading (faster than traditional read)
with open(image_file.path, "rb") as f:
jpeg_bytes = f.read()
with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as mmapped:
jpeg_bytes = mmapped[:]

buffer = decode_jpeg_resized(jpeg_bytes, display_width, display_height)
if buffer is not None:
Expand Down
Loading