Files
aza/APP/fotoapp - Kopie/image_ops.py

340 lines
12 KiB
Python
Raw Normal View History

2026-03-25 14:14:07 +01:00
from __future__ import annotations
import io
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Optional, Tuple
import numpy as np
from PIL import Image, ImageCms
from fotoapp.lut import CubeLUT, apply_lut_chain, apply_lut_rgb_float01, blend_rgb
_srgb_profile = ImageCms.createProfile("sRGB")
# ─── Hue-band centres (degrees) for the colour mixer ────────────────────────
HUE_BANDS: dict[str, float] = {
"red": 0.0,
"orange": 30.0,
"yellow": 55.0,
"green": 120.0,
"cyan": 180.0,
"blue": 220.0,
"magenta": 300.0,
}
HUE_BAND_NAMES = list(HUE_BANDS.keys())
_HUE_BANDWIDTH = 30.0 # half-width in degrees for smoothstep falloff
@dataclass
class Adjustments:
brightness: int = 0 # -100..+100
contrast: int = 0 # -100..+100
saturation: int = 0 # -100..+100
temperature: int = 0 # -50..+50
tint: int = 0 # -50..+50
teal_orange: int = 0 # 0..100
def cm_to_px(cm: float, dpi: float) -> int:
return int(round((float(cm) / 2.54) * float(dpi)))
# ─── ICC / colour-space ─────────────────────────────────────────────────────
def ensure_srgb(img: Image.Image) -> Image.Image:
"""
Convert image from its embedded ICC profile (e.g. Display P3, AdobeRGB)
to sRGB. This MUST happen before any pixel math or LUT application,
because LUTs and our adjustments assume sRGB-encoded input.
"""
icc_data = img.info.get("icc_profile")
if not icc_data:
if img.mode != "RGB":
return img.convert("RGB")
return img
try:
src_profile = ImageCms.ImageCmsProfile(io.BytesIO(icc_data))
dst_profile = ImageCms.ImageCmsProfile(_srgb_profile)
if img.mode != "RGB":
img = img.convert("RGB")
transform = ImageCms.buildTransform(
src_profile, dst_profile, "RGB", "RGB",
renderingIntent=ImageCms.Intent.PERCEPTUAL,
)
img = ImageCms.applyTransform(img, transform)
img.info.pop("icc_profile", None)
except (ImageCms.PyCMSError, OSError, ValueError):
if img.mode != "RGB":
img = img.convert("RGB")
return img
# ─── Pixel format helpers ───────────────────────────────────────────────────
def pil_to_rgb_float01(img: Image.Image) -> np.ndarray:
img = ensure_srgb(img)
return np.asarray(img, dtype=np.uint8).astype(np.float32) / 255.0
def rgb_float01_to_pil(rgb: np.ndarray) -> Image.Image:
rgb8 = np.clip(rgb * 255.0 + 0.5, 0, 255).astype(np.uint8)
return Image.fromarray(rgb8, mode="RGB")
# ─── Crop helpers ────────────────────────────────────────────────────────────
def crop_pil(img: Image.Image, box_xyxy: Tuple[int, int, int, int]) -> Image.Image:
x1, y1, x2, y2 = box_xyxy
x1 = max(0, min(img.width, x1))
x2 = max(0, min(img.width, x2))
y1 = max(0, min(img.height, y1))
y2 = max(0, min(img.height, y2))
if x2 <= x1 or y2 <= y1:
return img.copy()
return img.crop((x1, y1, x2, y2))
def crop_center_aspect(img: Image.Image, aspect_w: float, aspect_h: float) -> Image.Image:
w, h = img.size
target_ratio = aspect_w / aspect_h
img_ratio = w / h
if img_ratio > target_ratio:
new_w = int(h * target_ratio)
x1 = (w - new_w) // 2
return img.crop((x1, 0, x1 + new_w, h))
else:
new_h = int(w / target_ratio)
y1 = (h - new_h) // 2
return img.crop((0, y1, w, y1 + new_h))
def crop_circle(img: Image.Image, diameter_px: int = 0) -> Image.Image:
"""Crop to a circle. If *diameter_px* > 0 the result is exactly that size;
otherwise the minimum of width/height is used."""
w, h = img.size
d = max(1, diameter_px) if diameter_px > 0 else min(w, h)
img_sq = img.copy()
side = min(w, h)
x1 = (w - side) // 2
y1 = (h - side) // 2
img_sq = img_sq.crop((x1, y1, x1 + side, y1 + side))
if img_sq.size[0] != d:
img_sq = img_sq.resize((d, d), Image.Resampling.LANCZOS)
mask = Image.new("L", (d, d), 0)
from PIL import ImageDraw
ImageDraw.Draw(mask).ellipse((0, 0, d - 1, d - 1), fill=255)
result = img_sq.convert("RGBA")
result.putalpha(mask)
return result
def resize_to_output(img: Image.Image, out_w: int, out_h: int) -> Image.Image:
return img.resize((max(1, int(out_w)), max(1, int(out_h))),
resample=Image.Resampling.LANCZOS)
# ─── Teal & Orange grading ──────────────────────────────────────────────────
def apply_teal_orange(rgb: np.ndarray, strength: float) -> np.ndarray:
if strength <= 0:
return rgb
luma = 0.2126 * rgb[..., 0] + 0.7152 * rgb[..., 1] + 0.0722 * rgb[..., 2]
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
warmth = r - b
warm_mask = np.clip(warmth * 2.0 + 0.3, 0.0, 1.0)
cool_mask = 1.0 - warm_mask
r_new = r + warm_mask * 0.08 - cool_mask * 0.06
g_new = g + warm_mask * 0.02 + cool_mask * 0.04
b_new = b - warm_mask * 0.06 + cool_mask * 0.08
luma_3d = luma[..., None]
teal_orange = np.stack([r_new, g_new, b_new], axis=-1)
teal_orange = luma_3d + (teal_orange - luma_3d) * 1.15
teal_orange = np.clip(teal_orange, 0.0, 1.0)
s = float(np.clip(strength, 0.0, 1.0))
return rgb * (1.0 - s) + teal_orange * s
# ─── HSL saturation per hue-band (colour mixer) ─────────────────────────────
def apply_hsl_saturation_bands(rgb: np.ndarray, bands: dict) -> np.ndarray:
"""
Per-hue-band saturation adjustment on sRGB float01 array (H, W, 3).
*bands*: ``{band_name: value}`` where value [-100, +100].
Positive values boost saturation in that hue range, negative values reduce.
Uses smoothstep weighting over ±30° bandwidth for soft transitions.
"""
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
cmax = np.maximum(np.maximum(r, g), b)
cmin = np.minimum(np.minimum(r, g), b)
delta = cmax - cmin
hue = np.zeros_like(r)
mask = delta > 1e-8
mr = mask & (cmax == r)
mg = mask & (cmax == g) & ~mr
mb = mask & ~mr & ~mg
hue[mr] = 60.0 * (((g[mr] - b[mr]) / delta[mr]) % 6.0)
hue[mg] = 60.0 * ((b[mg] - r[mg]) / delta[mg] + 2.0)
hue[mb] = 60.0 * ((r[mb] - g[mb]) / delta[mb] + 4.0)
hue = hue % 360.0
_BAND_GAIN = {"cyan": 2.0}
sat_mult = np.ones_like(r)
for band_name, center in HUE_BANDS.items():
adj_val = bands.get(band_name, 0)
if adj_val == 0:
continue
d = np.abs(hue - center)
d = np.minimum(d, 360.0 - d)
t = np.clip(1.0 - d / _HUE_BANDWIDTH, 0.0, 1.0)
weight = t * t * (3.0 - 2.0 * t) # smoothstep
gain = _BAND_GAIN.get(band_name, 1.0)
sat_mult += weight * (adj_val / 100.0) * gain
sat_mult = np.maximum(sat_mult, 0.0)
luma = (0.2126 * r + 0.7152 * g + 0.0722 * b)[..., None]
rgb_out = luma + (rgb - luma) * sat_mult[..., None]
return np.clip(rgb_out, 0.0, 1.0).astype(np.float32)
# ─── Full processing pipeline ────────────────────────────────────────────────
#
# ORDER (must be respected everywhere):
# 1) Basic adjustments (brightness, contrast, saturation, temp, tint,
# teal & orange)
# 2) LUT chain (sequential, each blended with own strength)
# 3) HSL colour mixer (per-hue-band saturation, AFTER LUTs)
# ── steps 4-7 handled externally in the UI processing pipeline ──
# 4) KI mask / segmentation
# 5) Feather (Gaussian blur on alpha mask)
# 6) Background compositing (blur / colour / image / transparent)
# 7) Retouching (skin softening, healing)
# ──
# 8) Post-adjustments (brightness & contrast, applied LAST)
#
def apply_adjustments(
img: Image.Image,
adj: Adjustments,
lut_chain: list | None = None,
hue_bands: dict | None = None,
post_adj: dict | None = None,
) -> Image.Image:
rgb = pil_to_rgb_float01(img)
# ── 1) basic adjustments ──
if adj.brightness != 0:
rgb = rgb + (float(adj.brightness) / 200.0)
if adj.contrast != 0:
factor = max(0.0, (100.0 + adj.contrast) / 100.0)
rgb = 0.5 + (rgb - 0.5) * factor
rgb = np.clip(rgb, 0.0, 1.0)
if adj.temperature != 0 or adj.tint != 0:
t = float(adj.temperature) / 100.0
ti = float(adj.tint) / 100.0
r = rgb[..., 0] + 0.25 * t + 0.10 * ti
g = rgb[..., 1] - 0.20 * ti
b = rgb[..., 2] - 0.25 * t + 0.10 * ti
rgb = np.stack([r, g, b], axis=-1)
rgb = np.clip(rgb, 0.0, 1.0)
if adj.saturation != 0:
s = 1.0 + (float(adj.saturation) / 100.0)
luma = (0.2126 * rgb[..., 0] + 0.7152 * rgb[..., 1]
+ 0.0722 * rgb[..., 2])[..., None]
rgb = luma + (rgb - luma) * s
rgb = np.clip(rgb, 0.0, 1.0)
if adj.teal_orange > 0:
rgb = apply_teal_orange(rgb, adj.teal_orange / 100.0)
rgb = np.clip(rgb, 0.0, 1.0)
# ── 2) LUT chain ──
if lut_chain:
rgb = apply_lut_chain(rgb, lut_chain)
# ── 3) colour mixer (post-LUT) ──
if hue_bands and any(v != 0 for v in hue_bands.values()):
rgb = apply_hsl_saturation_bands(rgb, hue_bands)
# ── 8) post-adjustments (applied LAST, after external steps 4-7) ──
if post_adj:
pb = post_adj.get("brightness", 0)
pc = post_adj.get("contrast", 0)
if pb != 0:
rgb = rgb + (float(pb) / 200.0)
if pc != 0:
factor = max(0.0, (100.0 + pc) / 100.0)
rgb = 0.5 + (rgb - 0.5) * factor
rgb = np.clip(rgb, 0.0, 1.0)
return rgb_float01_to_pil(rgb)
def apply_post_adj(rgb: np.ndarray, post_adj: dict) -> np.ndarray:
"""Apply post-LUT brightness, contrast, blacks & sharpness on float01 array."""
pb = post_adj.get("brightness", 0)
pc = post_adj.get("contrast", 0)
blacks = post_adj.get("blacks", 0)
sharpen = post_adj.get("sharpen", 0)
if pb != 0:
rgb = rgb + (float(pb) / 200.0)
if pc != 0:
factor = max(0.0, (100.0 + pc) / 100.0)
rgb = 0.5 + (rgb - 0.5) * factor
if blacks != 0:
rgb = _apply_blacks(rgb, float(blacks))
if sharpen > 0:
rgb = _apply_sharpen(rgb, float(sharpen))
return np.clip(rgb, 0.0, 1.0).astype(np.float32)
def _apply_blacks(rgb: np.ndarray, strength: float) -> np.ndarray:
"""Deepen shadows (positive) or lift them (negative).
Uses a smooth power-curve approach so the transition from shadows to
mid-tones stays natural (no hard edges).
*strength* in [-100 .. +100]:
+100 strong black crush (gamma ~1.6)
0 no change
-100 lifted shadows (gamma ~0.6)
"""
gamma = 1.0 + strength / 166.0
gamma = max(0.4, min(2.0, gamma))
return np.power(np.clip(rgb, 0.0, 1.0), gamma)
def _apply_sharpen(rgb: np.ndarray, strength: float) -> np.ndarray:
"""Unsharp-mask sharpening.
*strength* 0..100: 0 = no effect, 100 = strong sharpening.
Uses Gaussian blur to create a blurred copy, then adds the difference
back (classic unsharp mask).
"""
import cv2
amount = strength / 50.0
blurred = cv2.GaussianBlur(rgb, (0, 0), sigmaX=1.5)
sharpened = rgb + amount * (rgb - blurred)
return np.clip(sharpened, 0.0, 1.0).astype(np.float32)