- Full pipeline processing off main thread - Image decoding via createImageBitmap + OffscreenCanvas - All adjustments and dithering algorithms - Blob output with data URL conversion Co-Authored-By: Claude <noreply@anthropic.com>
353 lines
8.9 KiB
TypeScript
353 lines
8.9 KiB
TypeScript
// Web Worker for image processing using Photon WASM
|
|
|
|
import type { Device, PipelineConfig } from '../types';
|
|
|
|
let photon: typeof import('@silvia-odwyer/photon') | null = null;
|
|
|
|
async function initPhoton() {
|
|
if (photon) return photon;
|
|
const module = await import('@silvia-odwyer/photon');
|
|
await module.default();
|
|
photon = module;
|
|
return photon;
|
|
}
|
|
|
|
function getPhoton() {
|
|
if (!photon) throw new Error('Photon not initialized');
|
|
return photon;
|
|
}
|
|
|
|
function getImageData(img: ReturnType<typeof getPhoton.prototype.PhotonImage>): {
|
|
data: Uint8ClampedArray;
|
|
width: number;
|
|
height: number;
|
|
} {
|
|
const p = getPhoton();
|
|
const width = img.get_width();
|
|
const height = img.get_height();
|
|
const rawPixels = img.get_raw_pixels();
|
|
return { data: new Uint8ClampedArray(rawPixels), width, height };
|
|
}
|
|
|
|
function fromImageData(data: Uint8ClampedArray, width: number, height: number) {
|
|
const p = getPhoton();
|
|
return new p.PhotonImage(new Uint8Array(data.buffer), width, height);
|
|
}
|
|
|
|
function quantizePixels(data: Uint8ClampedArray, levels: number): void {
|
|
const step = 255 / (levels - 1);
|
|
for (let i = 0; i < data.length; i += 4) {
|
|
const grey = data[i];
|
|
const quantized = Math.round(grey / step) * step;
|
|
data[i] = quantized;
|
|
data[i + 1] = quantized;
|
|
data[i + 2] = quantized;
|
|
}
|
|
}
|
|
|
|
function autoLevels(
|
|
data: Uint8ClampedArray,
|
|
width: number,
|
|
height: number,
|
|
clipPercent: number
|
|
): void {
|
|
const histogram = new Array(256).fill(0);
|
|
for (let i = 0; i < data.length; i += 4) {
|
|
histogram[data[i]]++;
|
|
}
|
|
|
|
const totalPixels = width * height;
|
|
const clipPixels = Math.floor(totalPixels * (clipPercent / 100));
|
|
|
|
let minLevel = 0,
|
|
count = 0;
|
|
for (let i = 0; i < 256; i++) {
|
|
count += histogram[i];
|
|
if (count > clipPixels) {
|
|
minLevel = i;
|
|
break;
|
|
}
|
|
}
|
|
|
|
let maxLevel = 255;
|
|
count = 0;
|
|
for (let i = 255; i >= 0; i--) {
|
|
count += histogram[i];
|
|
if (count > clipPixels) {
|
|
maxLevel = i;
|
|
break;
|
|
}
|
|
}
|
|
|
|
if (maxLevel <= minLevel) maxLevel = minLevel + 1;
|
|
const range = maxLevel - minLevel;
|
|
|
|
for (let i = 0; i < data.length; i += 4) {
|
|
const stretched = Math.round(((data[i] - minLevel) / range) * 255);
|
|
const clamped = Math.max(0, Math.min(255, stretched));
|
|
data[i] = clamped;
|
|
data[i + 1] = clamped;
|
|
data[i + 2] = clamped;
|
|
}
|
|
}
|
|
|
|
function orderedDither(data: Uint8ClampedArray, width: number, levels: number): void {
|
|
const bayer = [
|
|
[0, 8, 2, 10],
|
|
[12, 4, 14, 6],
|
|
[3, 11, 1, 9],
|
|
[15, 7, 13, 5]
|
|
];
|
|
const step = 255 / (levels - 1);
|
|
|
|
for (let i = 0; i < data.length; i += 4) {
|
|
const pixelIdx = i / 4;
|
|
const x = pixelIdx % width;
|
|
const y = Math.floor(pixelIdx / width);
|
|
const threshold = (bayer[y % 4][x % 4] / 16 - 0.5) * step;
|
|
const grey = data[i] + threshold;
|
|
const quantized = Math.round(grey / step) * step;
|
|
const clamped = Math.max(0, Math.min(255, quantized));
|
|
data[i] = clamped;
|
|
data[i + 1] = clamped;
|
|
data[i + 2] = clamped;
|
|
}
|
|
}
|
|
|
|
function atkinsonDither(
|
|
data: Uint8ClampedArray,
|
|
width: number,
|
|
height: number,
|
|
levels: number
|
|
): void {
|
|
const step = 255 / (levels - 1);
|
|
|
|
for (let y = 0; y < height; y++) {
|
|
for (let x = 0; x < width; x++) {
|
|
const idx = (y * width + x) * 4;
|
|
const oldVal = data[idx];
|
|
const newVal = Math.round(oldVal / step) * step;
|
|
const error = (oldVal - newVal) / 8;
|
|
|
|
data[idx] = newVal;
|
|
data[idx + 1] = newVal;
|
|
data[idx + 2] = newVal;
|
|
|
|
const neighbors = [
|
|
[x + 1, y],
|
|
[x + 2, y],
|
|
[x - 1, y + 1],
|
|
[x, y + 1],
|
|
[x + 1, y + 1],
|
|
[x, y + 2]
|
|
];
|
|
for (const [nx, ny] of neighbors) {
|
|
if (nx >= 0 && nx < width && ny >= 0 && ny < height) {
|
|
const ni = (ny * width + nx) * 4;
|
|
data[ni] = Math.max(0, Math.min(255, data[ni] + error));
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
for (let i = 0; i < data.length; i += 4) {
|
|
data[i + 1] = data[i];
|
|
data[i + 2] = data[i];
|
|
}
|
|
}
|
|
|
|
async function decodeImage(
|
|
imageData: ArrayBuffer
|
|
): Promise<{ pixels: Uint8Array; width: number; height: number }> {
|
|
const blob = new Blob([imageData]);
|
|
const bitmap = await createImageBitmap(blob);
|
|
|
|
const canvas = new OffscreenCanvas(bitmap.width, bitmap.height);
|
|
const ctx = canvas.getContext('2d');
|
|
if (!ctx) throw new Error('Could not get 2d context');
|
|
|
|
ctx.drawImage(bitmap, 0, 0);
|
|
bitmap.close();
|
|
|
|
const imgData = ctx.getImageData(0, 0, canvas.width, canvas.height);
|
|
return {
|
|
pixels: new Uint8Array(imgData.data.buffer),
|
|
width: canvas.width,
|
|
height: canvas.height
|
|
};
|
|
}
|
|
|
|
async function processImage(
|
|
imageData: ArrayBuffer,
|
|
device: Device,
|
|
config: PipelineConfig
|
|
): Promise<{ blob: Blob; dataUrl: string }> {
|
|
await initPhoton();
|
|
const p = getPhoton();
|
|
|
|
let img;
|
|
try {
|
|
const { pixels, width, height } = await decodeImage(imageData);
|
|
img = new p.PhotonImage(pixels, width, height);
|
|
} catch (e) {
|
|
throw new Error(`Failed to load image: ${e}`);
|
|
}
|
|
|
|
try {
|
|
// Crop
|
|
if (config.crop.enabled) {
|
|
const width = img.get_width();
|
|
const height = img.get_height();
|
|
const targetAspect = device.width / device.height;
|
|
const currentAspect = width / height;
|
|
|
|
let cropX1: number, cropY1: number, cropX2: number, cropY2: number;
|
|
|
|
if (currentAspect > targetAspect) {
|
|
const newWidth = Math.max(1, Math.floor(height * targetAspect));
|
|
const xOffset = Math.floor((width - newWidth) / 2);
|
|
cropX1 = Math.max(0, xOffset);
|
|
cropY1 = 0;
|
|
cropX2 = Math.min(width, cropX1 + newWidth);
|
|
cropY2 = height;
|
|
} else {
|
|
const newHeight = Math.max(1, Math.floor(width / targetAspect));
|
|
const yOffset = Math.floor((height - newHeight) / 2);
|
|
cropX1 = 0;
|
|
cropY1 = Math.max(0, yOffset);
|
|
cropX2 = width;
|
|
cropY2 = Math.min(height, cropY1 + newHeight);
|
|
}
|
|
|
|
if (cropX2 > cropX1 && cropY2 > cropY1) {
|
|
const cropped = p.crop(img, cropX1, cropY1, cropX2, cropY2);
|
|
img.free();
|
|
img = cropped;
|
|
}
|
|
}
|
|
|
|
// Resize
|
|
if (config.resize.enabled) {
|
|
const resized = p.resize(img, device.width, device.height, p.SamplingFilter.Lanczos3);
|
|
img.free();
|
|
img = resized;
|
|
}
|
|
|
|
// Adjustments
|
|
if (config.brightness.enabled && config.brightness.value !== 0) {
|
|
p.adjust_brightness(img, Math.round(config.brightness.value * 0.5));
|
|
}
|
|
if (config.contrast.enabled && config.contrast.value !== 0) {
|
|
p.adjust_contrast(img, config.contrast.value * 0.5);
|
|
}
|
|
if (config.gamma.enabled && config.gamma.value !== 1.0) {
|
|
p.gamma_correction(img, config.gamma.value);
|
|
}
|
|
if (config.greyscale.enabled) {
|
|
p.grayscale(img);
|
|
}
|
|
if (config.sharpen.enabled && config.sharpen.amount > 0) {
|
|
p.sharpen(img);
|
|
}
|
|
|
|
// Get pixel data for manual operations
|
|
const imgData = getImageData(img);
|
|
let { data } = imgData;
|
|
const { width, height } = imgData;
|
|
|
|
// Auto-levels
|
|
if (config.autoLevels.enabled) {
|
|
autoLevels(data, width, height, config.autoLevels.clipPercent);
|
|
}
|
|
|
|
// Dithering or quantization
|
|
const levels = config.quantize.enabled ? config.quantize.levels : device.greyLevels;
|
|
|
|
if (config.dither.enabled) {
|
|
if (config.dither.algorithm === 'floyd-steinberg') {
|
|
img.free();
|
|
img = fromImageData(data, width, height);
|
|
const depth = Math.max(1, Math.round(Math.log2(levels)));
|
|
p.dither(img, depth);
|
|
} else if (config.dither.algorithm === 'ordered') {
|
|
orderedDither(data, width, levels);
|
|
img.free();
|
|
img = fromImageData(data, width, height);
|
|
} else if (config.dither.algorithm === 'atkinson') {
|
|
atkinsonDither(data, width, height, levels);
|
|
img.free();
|
|
img = fromImageData(data, width, height);
|
|
}
|
|
} else if (config.quantize.enabled) {
|
|
quantizePixels(data, levels);
|
|
img.free();
|
|
img = fromImageData(data, width, height);
|
|
}
|
|
|
|
// Output
|
|
const outWidth = img.get_width();
|
|
const outHeight = img.get_height();
|
|
const outPixels = img.get_raw_pixels();
|
|
|
|
const canvas = new OffscreenCanvas(outWidth, outHeight);
|
|
const ctx = canvas.getContext('2d');
|
|
if (!ctx) throw new Error('Could not get output context');
|
|
|
|
const outImageData = new ImageData(new Uint8ClampedArray(outPixels), outWidth, outHeight);
|
|
ctx.putImageData(outImageData, 0, 0);
|
|
|
|
const outputBlob = await canvas.convertToBlob({ type: 'image/png' });
|
|
|
|
const arrayBuffer = await outputBlob.arrayBuffer();
|
|
const bytes = new Uint8Array(arrayBuffer);
|
|
let binary = '';
|
|
for (let i = 0; i < bytes.length; i++) {
|
|
binary += String.fromCharCode(bytes[i]);
|
|
}
|
|
const base64 = btoa(binary);
|
|
const dataUrl = `data:image/png;base64,${base64}`;
|
|
|
|
return { blob: outputBlob, dataUrl };
|
|
} finally {
|
|
try {
|
|
img.free();
|
|
} catch {
|
|
/* ignore */
|
|
}
|
|
}
|
|
}
|
|
|
|
self.onmessage = async (e: MessageEvent) => {
|
|
const { id, type, payload } = e.data;
|
|
|
|
if (type !== 'process') return;
|
|
|
|
try {
|
|
const { imageData, device, config } = payload;
|
|
const result = await processImage(imageData, device, config);
|
|
|
|
const blobArrayBuffer = await result.blob.arrayBuffer();
|
|
|
|
self.postMessage(
|
|
{
|
|
id,
|
|
success: true,
|
|
result: {
|
|
blobData: blobArrayBuffer,
|
|
blobType: result.blob.type,
|
|
dataUrl: result.dataUrl
|
|
}
|
|
},
|
|
[blobArrayBuffer]
|
|
);
|
|
} catch (error) {
|
|
self.postMessage({
|
|
id,
|
|
success: false,
|
|
error: error instanceof Error ? error.message : String(error)
|
|
});
|
|
}
|
|
};
|
|
|
|
export {};
|