Split processing into geometry phase (crop/resize) and adjustment phase. Cache the post-geometry pixels keyed by image fingerprint + crop/resize config. When only adjustments change, skip expensive decode/crop/resize operations. Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
460 lines
12 KiB
TypeScript
460 lines
12 KiB
TypeScript
// Web Worker for image processing using Photon WASM
|
|
|
|
import type { Device, PipelineConfig } from '../types';
|
|
|
|
let photon: typeof import('@silvia-odwyer/photon') | null = null;
|
|
|
|
// Cache for post-crop/resize data to speed up adjustment-only changes
|
|
type CachedGeometry = {
|
|
pixels: Uint8ClampedArray;
|
|
width: number;
|
|
height: number;
|
|
};
|
|
const geometryCache: Map<string, CachedGeometry> = new Map();
|
|
const MAX_CACHE_SIZE = 10; // Keep last 10 unique geometry states
|
|
|
|
async function initPhoton() {
|
|
if (photon) return photon;
|
|
const module = await import('@silvia-odwyer/photon');
|
|
await module.default();
|
|
photon = module;
|
|
return photon;
|
|
}
|
|
|
|
function buildGeometryCacheKey(
|
|
imageData: ArrayBuffer,
|
|
device: Device,
|
|
config: PipelineConfig
|
|
): string {
|
|
// Use image size + first 1000 bytes as fingerprint
|
|
const view = new Uint8Array(imageData);
|
|
const fingerprint = Array.from(view.slice(0, 1000)).join(',');
|
|
const imageKey = `${imageData.byteLength}:${fingerprint}`;
|
|
|
|
// Include geometry-affecting config
|
|
const cropKey = config.crop.enabled
|
|
? `crop:${config.crop.mode}:${JSON.stringify(config.crop.region ?? null)}`
|
|
: 'crop:off';
|
|
const resizeKey = config.resize.enabled ? `resize:${device.width}x${device.height}` : 'resize:off';
|
|
|
|
return `${imageKey}|${cropKey}|${resizeKey}`;
|
|
}
|
|
|
|
function pruneCache(): void {
|
|
if (geometryCache.size > MAX_CACHE_SIZE) {
|
|
// Remove oldest entries (first in map)
|
|
const keysToDelete = Array.from(geometryCache.keys()).slice(
|
|
0,
|
|
geometryCache.size - MAX_CACHE_SIZE
|
|
);
|
|
for (const key of keysToDelete) {
|
|
geometryCache.delete(key);
|
|
}
|
|
}
|
|
}
|
|
|
|
function getPhoton() {
|
|
if (!photon) throw new Error('Photon not initialized');
|
|
return photon;
|
|
}
|
|
|
|
function getImageData(img: ReturnType<typeof getPhoton.prototype.PhotonImage>): {
|
|
data: Uint8ClampedArray;
|
|
width: number;
|
|
height: number;
|
|
} {
|
|
const p = getPhoton();
|
|
const width = img.get_width();
|
|
const height = img.get_height();
|
|
const rawPixels = img.get_raw_pixels();
|
|
return { data: new Uint8ClampedArray(rawPixels), width, height };
|
|
}
|
|
|
|
function fromImageData(data: Uint8ClampedArray, width: number, height: number) {
|
|
const p = getPhoton();
|
|
return new p.PhotonImage(new Uint8Array(data.buffer), width, height);
|
|
}
|
|
|
|
function quantizePixels(data: Uint8ClampedArray, levels: number): void {
|
|
const step = 255 / (levels - 1);
|
|
for (let i = 0; i < data.length; i += 4) {
|
|
const grey = data[i];
|
|
const quantized = Math.round(grey / step) * step;
|
|
data[i] = quantized;
|
|
data[i + 1] = quantized;
|
|
data[i + 2] = quantized;
|
|
}
|
|
}
|
|
|
|
function autoLevels(
|
|
data: Uint8ClampedArray,
|
|
width: number,
|
|
height: number,
|
|
clipPercent: number
|
|
): void {
|
|
const histogram = new Array(256).fill(0);
|
|
for (let i = 0; i < data.length; i += 4) {
|
|
histogram[data[i]]++;
|
|
}
|
|
|
|
const totalPixels = width * height;
|
|
const clipPixels = Math.floor(totalPixels * (clipPercent / 100));
|
|
|
|
let minLevel = 0,
|
|
count = 0;
|
|
for (let i = 0; i < 256; i++) {
|
|
count += histogram[i];
|
|
if (count > clipPixels) {
|
|
minLevel = i;
|
|
break;
|
|
}
|
|
}
|
|
|
|
let maxLevel = 255;
|
|
count = 0;
|
|
for (let i = 255; i >= 0; i--) {
|
|
count += histogram[i];
|
|
if (count > clipPixels) {
|
|
maxLevel = i;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (maxLevel <= minLevel) maxLevel = minLevel + 1;
|
|
const range = maxLevel - minLevel;
|
|
|
|
for (let i = 0; i < data.length; i += 4) {
|
|
const stretched = Math.round(((data[i] - minLevel) / range) * 255);
|
|
const clamped = Math.max(0, Math.min(255, stretched));
|
|
data[i] = clamped;
|
|
data[i + 1] = clamped;
|
|
data[i + 2] = clamped;
|
|
}
|
|
}
|
|
|
|
function orderedDither(data: Uint8ClampedArray, width: number, levels: number): void {
|
|
const bayer = [
|
|
[0, 8, 2, 10],
|
|
[12, 4, 14, 6],
|
|
[3, 11, 1, 9],
|
|
[15, 7, 13, 5]
|
|
];
|
|
const step = 255 / (levels - 1);
|
|
|
|
for (let i = 0; i < data.length; i += 4) {
|
|
const pixelIdx = i / 4;
|
|
const x = pixelIdx % width;
|
|
const y = Math.floor(pixelIdx / width);
|
|
const threshold = (bayer[y % 4][x % 4] / 16 - 0.5) * step;
|
|
const grey = data[i] + threshold;
|
|
const quantized = Math.round(grey / step) * step;
|
|
const clamped = Math.max(0, Math.min(255, quantized));
|
|
data[i] = clamped;
|
|
data[i + 1] = clamped;
|
|
data[i + 2] = clamped;
|
|
}
|
|
}
|
|
|
|
function atkinsonDither(
|
|
data: Uint8ClampedArray,
|
|
width: number,
|
|
height: number,
|
|
levels: number
|
|
): void {
|
|
const step = 255 / (levels - 1);
|
|
|
|
for (let y = 0; y < height; y++) {
|
|
for (let x = 0; x < width; x++) {
|
|
const idx = (y * width + x) * 4;
|
|
const oldVal = data[idx];
|
|
const newVal = Math.round(oldVal / step) * step;
|
|
const error = (oldVal - newVal) / 8;
|
|
|
|
data[idx] = newVal;
|
|
data[idx + 1] = newVal;
|
|
data[idx + 2] = newVal;
|
|
|
|
const neighbors = [
|
|
[x + 1, y],
|
|
[x + 2, y],
|
|
[x - 1, y + 1],
|
|
[x, y + 1],
|
|
[x + 1, y + 1],
|
|
[x, y + 2]
|
|
];
|
|
for (const [nx, ny] of neighbors) {
|
|
if (nx >= 0 && nx < width && ny >= 0 && ny < height) {
|
|
const ni = (ny * width + nx) * 4;
|
|
data[ni] = Math.max(0, Math.min(255, data[ni] + error));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
for (let i = 0; i < data.length; i += 4) {
|
|
data[i + 1] = data[i];
|
|
data[i + 2] = data[i];
|
|
}
|
|
}
|
|
|
|
async function decodeImage(
|
|
imageData: ArrayBuffer
|
|
): Promise<{ pixels: Uint8Array; width: number; height: number }> {
|
|
const blob = new Blob([imageData]);
|
|
const bitmap = await createImageBitmap(blob);
|
|
|
|
const canvas = new OffscreenCanvas(bitmap.width, bitmap.height);
|
|
const ctx = canvas.getContext('2d');
|
|
if (!ctx) throw new Error('Could not get 2d context');
|
|
|
|
ctx.drawImage(bitmap, 0, 0);
|
|
bitmap.close();
|
|
|
|
const imgData = ctx.getImageData(0, 0, canvas.width, canvas.height);
|
|
return {
|
|
pixels: new Uint8Array(imgData.data.buffer),
|
|
width: canvas.width,
|
|
height: canvas.height
|
|
};
|
|
}
|
|
|
|
async function processImage(
|
|
imageData: ArrayBuffer,
|
|
device: Device,
|
|
config: PipelineConfig
|
|
): Promise<{ blob: Blob; dataUrl: string }> {
|
|
await initPhoton();
|
|
const p = getPhoton();
|
|
|
|
// Check geometry cache first
|
|
const cacheKey = buildGeometryCacheKey(imageData, device, config);
|
|
const cached = geometryCache.get(cacheKey);
|
|
|
|
let geometryPixels: Uint8ClampedArray;
|
|
let geometryWidth: number;
|
|
let geometryHeight: number;
|
|
|
|
if (cached) {
|
|
// Use cached post-geometry data - skip expensive crop/resize
|
|
geometryPixels = new Uint8ClampedArray(cached.pixels);
|
|
geometryWidth = cached.width;
|
|
geometryHeight = cached.height;
|
|
} else {
|
|
// Run geometry phase: decode → crop → resize
|
|
let img;
|
|
try {
|
|
const { pixels, width, height } = await decodeImage(imageData);
|
|
img = new p.PhotonImage(pixels, width, height);
|
|
} catch (e) {
|
|
throw new Error(`Failed to load image: ${e}`);
|
|
}
|
|
|
|
try {
|
|
// Crop
|
|
if (config.crop.enabled) {
|
|
const width = img.get_width();
|
|
const height = img.get_height();
|
|
const targetAspect = device.width / device.height;
|
|
|
|
let cropX1: number, cropY1: number, cropX2: number, cropY2: number;
|
|
|
|
if (config.crop.mode === 'manual' && config.crop.region) {
|
|
// Manual crop: use the provided region
|
|
const region = config.crop.region;
|
|
cropX1 = Math.max(0, Math.round(region.x));
|
|
cropY1 = Math.max(0, Math.round(region.y));
|
|
cropX2 = Math.min(width, Math.round(region.x + region.width));
|
|
cropY2 = Math.min(height, Math.round(region.y + region.height));
|
|
} else {
|
|
// Auto crop modes: center, top, bottom
|
|
const currentAspect = width / height;
|
|
|
|
if (currentAspect > targetAspect) {
|
|
const newWidth = Math.max(1, Math.floor(height * targetAspect));
|
|
let xOffset: number;
|
|
if (config.crop.mode === 'top' || config.crop.mode === 'bottom') {
|
|
xOffset = Math.floor((width - newWidth) / 2);
|
|
} else {
|
|
xOffset = Math.floor((width - newWidth) / 2);
|
|
}
|
|
cropX1 = Math.max(0, xOffset);
|
|
cropY1 = 0;
|
|
cropX2 = Math.min(width, cropX1 + newWidth);
|
|
cropY2 = height;
|
|
} else {
|
|
const newHeight = Math.max(1, Math.floor(width / targetAspect));
|
|
let yOffset: number;
|
|
if (config.crop.mode === 'top') {
|
|
yOffset = 0;
|
|
} else if (config.crop.mode === 'bottom') {
|
|
yOffset = height - newHeight;
|
|
} else {
|
|
yOffset = Math.floor((height - newHeight) / 2);
|
|
}
|
|
cropX1 = 0;
|
|
cropY1 = Math.max(0, yOffset);
|
|
cropX2 = width;
|
|
cropY2 = Math.min(height, cropY1 + newHeight);
|
|
}
|
|
}
|
|
|
|
if (cropX2 > cropX1 && cropY2 > cropY1) {
|
|
const cropped = p.crop(img, cropX1, cropY1, cropX2, cropY2);
|
|
img.free();
|
|
img = cropped;
|
|
}
|
|
}
|
|
|
|
// Resize
|
|
if (config.resize.enabled) {
|
|
const resized = p.resize(img, device.width, device.height, p.SamplingFilter.Lanczos3);
|
|
img.free();
|
|
img = resized;
|
|
}
|
|
|
|
// Extract pixel data after geometry phase
|
|
const imgData = getImageData(img);
|
|
geometryPixels = imgData.data;
|
|
geometryWidth = imgData.width;
|
|
geometryHeight = imgData.height;
|
|
|
|
// Cache this geometry result
|
|
geometryCache.set(cacheKey, {
|
|
pixels: new Uint8ClampedArray(geometryPixels),
|
|
width: geometryWidth,
|
|
height: geometryHeight
|
|
});
|
|
pruneCache();
|
|
} finally {
|
|
try {
|
|
img.free();
|
|
} catch {
|
|
/* ignore */
|
|
}
|
|
}
|
|
}
|
|
|
|
// Adjustment phase: work on a copy of geometry pixels
|
|
let data = geometryPixels;
|
|
const width = geometryWidth;
|
|
const height = geometryHeight;
|
|
let img = fromImageData(data, width, height);
|
|
|
|
try {
|
|
// Adjustments
|
|
if (config.brightness.enabled && config.brightness.value !== 0) {
|
|
p.adjust_brightness(img, Math.round(config.brightness.value * 0.5));
|
|
}
|
|
if (config.contrast.enabled && config.contrast.value !== 0) {
|
|
p.adjust_contrast(img, config.contrast.value * 0.5);
|
|
}
|
|
if (config.gamma.enabled && config.gamma.value !== 1.0) {
|
|
p.gamma_correction(img, config.gamma.value);
|
|
}
|
|
if (config.greyscale.enabled) {
|
|
p.grayscale(img);
|
|
}
|
|
if (config.sharpen.enabled && config.sharpen.amount > 0) {
|
|
p.sharpen(img);
|
|
}
|
|
|
|
// Get pixel data for manual operations
|
|
const imgData = getImageData(img);
|
|
data = imgData.data;
|
|
|
|
// Auto-levels
|
|
if (config.autoLevels.enabled) {
|
|
autoLevels(data, width, height, config.autoLevels.clipPercent);
|
|
}
|
|
|
|
// Dithering or quantization
|
|
const levels = config.quantize.enabled ? config.quantize.levels : device.greyLevels;
|
|
|
|
if (config.dither.enabled) {
|
|
if (config.dither.algorithm === 'floyd-steinberg') {
|
|
img.free();
|
|
img = fromImageData(data, width, height);
|
|
const depth = Math.max(1, Math.round(Math.log2(levels)));
|
|
p.dither(img, depth);
|
|
} else if (config.dither.algorithm === 'ordered') {
|
|
orderedDither(data, width, levels);
|
|
img.free();
|
|
img = fromImageData(data, width, height);
|
|
} else if (config.dither.algorithm === 'atkinson') {
|
|
atkinsonDither(data, width, height, levels);
|
|
img.free();
|
|
img = fromImageData(data, width, height);
|
|
}
|
|
} else if (config.quantize.enabled) {
|
|
quantizePixels(data, levels);
|
|
img.free();
|
|
img = fromImageData(data, width, height);
|
|
}
|
|
|
|
// Output
|
|
const outWidth = img.get_width();
|
|
const outHeight = img.get_height();
|
|
const outPixels = img.get_raw_pixels();
|
|
|
|
const canvas = new OffscreenCanvas(outWidth, outHeight);
|
|
const ctx = canvas.getContext('2d');
|
|
if (!ctx) throw new Error('Could not get output context');
|
|
|
|
const outImageData = new ImageData(new Uint8ClampedArray(outPixels), outWidth, outHeight);
|
|
ctx.putImageData(outImageData, 0, 0);
|
|
|
|
const outputBlob = await canvas.convertToBlob({ type: 'image/png' });
|
|
|
|
const arrayBuffer = await outputBlob.arrayBuffer();
|
|
const bytes = new Uint8Array(arrayBuffer);
|
|
let binary = '';
|
|
for (let i = 0; i < bytes.length; i++) {
|
|
binary += String.fromCharCode(bytes[i]);
|
|
}
|
|
const base64 = btoa(binary);
|
|
const dataUrl = `data:image/png;base64,${base64}`;
|
|
|
|
return { blob: outputBlob, dataUrl };
|
|
} finally {
|
|
try {
|
|
img.free();
|
|
} catch {
|
|
/* ignore */
|
|
}
|
|
}
|
|
}
|
|
|
|
self.onmessage = async (e: MessageEvent) => {
|
|
const { id, type, payload } = e.data;
|
|
|
|
if (type !== 'process') return;
|
|
|
|
try {
|
|
const { imageData, device, config } = payload;
|
|
const result = await processImage(imageData, device, config);
|
|
|
|
const blobArrayBuffer = await result.blob.arrayBuffer();
|
|
|
|
self.postMessage(
|
|
{
|
|
id,
|
|
success: true,
|
|
result: {
|
|
blobData: blobArrayBuffer,
|
|
blobType: result.blob.type,
|
|
dataUrl: result.dataUrl
|
|
}
|
|
},
|
|
[blobArrayBuffer]
|
|
);
|
|
} catch (error) {
|
|
self.postMessage({
|
|
id,
|
|
success: false,
|
|
error: error instanceof Error ? error.message : String(error)
|
|
});
|
|
}
|
|
};
|
|
|
|
export {};
|