This commit is contained in:
2026-03-25 14:14:07 +01:00
parent d6b31e2ef7
commit a0073b4fb1
10368 changed files with 2214340 additions and 0 deletions

View File

@@ -0,0 +1 @@
,SURO/surov,Suro,23.02.2026 14:12,file:///C:/Users/surov/AppData/Roaming/LibreOffice/4;

View File

@@ -0,0 +1,82 @@
# FotoApp Bearbeitung & Batch-Export (PySide6)
Windows-Desktop-App (Python 3.11+) für Foto-Batch-Bearbeitung mit LUT-Support.
## Funktionen
- Fotos laden per Drag & Drop oder Dateidialog (einzeln oder ganze Ordner)
- Zuschnitt-Presets (Quadrat 1:1, Portrait 4:5, 3:4, Passfoto etc.)
- **Interaktiver Maus-Crop**: Bei aktivem Crop-Overlay einfach auf das Bild klicken und ziehen, um ein neues Crop-Rechteck zu zeichnen (hält Seitenverhältnis ein). Außerhalb des Crop-Bereichs wird abgedunkelt.
- Regler: Helligkeit, Kontrast, Sättigung, Temperatur, Tint, Teal & Orange
- **LUT-Kette (Chain)**: Mehrere .cube-LUTs stapelbar mit eigener Stärke (0100 %) pro LUT. Reihenfolge per Drag & Drop oder ▲/▼-Buttons änderbar. LUT-Dateien werden gecacht.
- **Farbmischer (nach LUT)**: HSL-Sättigungsregler pro Farbband (Orange, Gelb, Grün, Cyan, Blau, Magenta) wird NACH der LUT-Kette angewendet.
- **Look-System**: Kompletten Bearbeitungszustand (Regler + LUT-Kette + Farbmischer + KI-Parameter) als benannten Look speichern/laden. Ein Klick stellt alles wieder her.
- **KI-Freistellung**: Person per U²-Net (rembg) freistellen komplett lokal, kein Cloud-Dienst. Modell wird beim ersten Start automatisch heruntergeladen.
- **Weiche Kanten (Feather)**: Kanten der Freistellungsmaske per Gaussian Blur stufenlos weichzeichnen.
- **Hintergrund-Modi**: Original, Unscharf (Portrait-Blur), Einfarbig, Transparent (PNG), Eigenes Hintergrundbild.
- **Manuelle Masken-Korrektur**: Pinsel-Werkzeug (+ / ) mit einstellbarer Größe und Härte, Undo-Funktion.
- **Retusche**: Haut weichzeichnen (bilateral Filter) im Personenbereich.
- Export als JPG/PNG einzeln oder Batch
## Verarbeitungsreihenfolge
1. **Basis-Anpassungen**: Helligkeit, Kontrast, Sättigung, Temperatur, Tint, Teal & Orange
2. **LUT-Kette**: Sequentielle Anwendung aller LUTs mit je eigener Stärke
3. **Farbmischer**: HSL-Sättigung pro Hue-Band (nach LUT)
4. **KI-Maske / Freistellung**: Person erkennen + Alpha-Maske generieren
5. **Feather**: Gaussian Blur auf Alpha-Maske
6. **Hintergrund-Compositing**: fg × alpha + bg × (1 alpha)
7. **Retusche**: Haut weichzeichnen im Maskenbereich
8. **Nachbearbeitung**: Helligkeit & Kontrast als Fein-Tuning (ganz zum Schluss)
## Config-Pfad
Alle Einstellungen, Looks und App-State werden unter:
```
%APPDATA%\FotoApp\config.json
```
gespeichert. Beim ersten Start wird ein eventuell vorhandenes altes `settings.json` migriert.
## Setup (Windows)
Doppelklick auf `start.bat` erstellt automatisch venv und installiert Abhängigkeiten.
Oder manuell:
```bash
py -3 -m venv .venv
.venv\Scripts\pip install -r requirements.txt
cd ..
.venv\Scripts\python -m fotoapp.main
```
## Farbmanagement
Bilder mit eingebettetem ICC-Profil (z.B. iPhone Display P3, AdobeRGB)
werden automatisch nach **sRGB** konvertiert, bevor Anpassungen oder LUTs
angewendet werden.
## LUT-Hinweise
- **Nur sRGB/Rec.709-LUTs** funktionieren korrekt ohne zusätzliche Transforms.
- LUTs für **Log-Footage** (S-Log, C-Log, V-Log etc.) oder **ACES** liefern
falsche Ergebnisse ohne spezielle Input-Transformation.
- Unterstützt werden Standard `.cube` 3D-LUTs (Größen 17, 33, 65 üblich).
- Dateien mit kombinierter 1D+3D LUT werden korrekt geparst (1D-Anteil wird übersprungen).
## PyInstaller Build (EXE)
### Onefile (einzelne EXE)
```bash
pyinstaller --noconfirm --clean --onefile --windowed ^
--name FotoApp ^
--collect-all PySide6 ^
fotoapp\main.py
```
### Onedir (oft stabiler bei Qt)
```bash
pyinstaller --noconfirm --clean --onedir --windowed ^
--name FotoApp ^
--collect-all PySide6 ^
fotoapp\main.py
```

File diff suppressed because it is too large Load Diff

Binary file not shown.

View File

View File

@@ -0,0 +1,339 @@
from __future__ import annotations
import io
from dataclasses import dataclass, field
from pathlib import Path
from typing import List, Optional, Tuple
import numpy as np
from PIL import Image, ImageCms
from fotoapp.lut import CubeLUT, apply_lut_chain, apply_lut_rgb_float01, blend_rgb
_srgb_profile = ImageCms.createProfile("sRGB")
# ─── Hue-band centres (degrees) for the colour mixer ────────────────────────
HUE_BANDS: dict[str, float] = {
"red": 0.0,
"orange": 30.0,
"yellow": 55.0,
"green": 120.0,
"cyan": 180.0,
"blue": 220.0,
"magenta": 300.0,
}
HUE_BAND_NAMES = list(HUE_BANDS.keys())
_HUE_BANDWIDTH = 30.0 # half-width in degrees for smoothstep falloff
@dataclass
class Adjustments:
brightness: int = 0 # -100..+100
contrast: int = 0 # -100..+100
saturation: int = 0 # -100..+100
temperature: int = 0 # -50..+50
tint: int = 0 # -50..+50
teal_orange: int = 0 # 0..100
def cm_to_px(cm: float, dpi: float) -> int:
return int(round((float(cm) / 2.54) * float(dpi)))
# ─── ICC / colour-space ─────────────────────────────────────────────────────
def ensure_srgb(img: Image.Image) -> Image.Image:
"""
Convert image from its embedded ICC profile (e.g. Display P3, AdobeRGB)
to sRGB. This MUST happen before any pixel math or LUT application,
because LUTs and our adjustments assume sRGB-encoded input.
"""
icc_data = img.info.get("icc_profile")
if not icc_data:
if img.mode != "RGB":
return img.convert("RGB")
return img
try:
src_profile = ImageCms.ImageCmsProfile(io.BytesIO(icc_data))
dst_profile = ImageCms.ImageCmsProfile(_srgb_profile)
if img.mode != "RGB":
img = img.convert("RGB")
transform = ImageCms.buildTransform(
src_profile, dst_profile, "RGB", "RGB",
renderingIntent=ImageCms.Intent.PERCEPTUAL,
)
img = ImageCms.applyTransform(img, transform)
img.info.pop("icc_profile", None)
except (ImageCms.PyCMSError, OSError, ValueError):
if img.mode != "RGB":
img = img.convert("RGB")
return img
# ─── Pixel format helpers ───────────────────────────────────────────────────
def pil_to_rgb_float01(img: Image.Image) -> np.ndarray:
img = ensure_srgb(img)
return np.asarray(img, dtype=np.uint8).astype(np.float32) / 255.0
def rgb_float01_to_pil(rgb: np.ndarray) -> Image.Image:
rgb8 = np.clip(rgb * 255.0 + 0.5, 0, 255).astype(np.uint8)
return Image.fromarray(rgb8, mode="RGB")
# ─── Crop helpers ────────────────────────────────────────────────────────────
def crop_pil(img: Image.Image, box_xyxy: Tuple[int, int, int, int]) -> Image.Image:
x1, y1, x2, y2 = box_xyxy
x1 = max(0, min(img.width, x1))
x2 = max(0, min(img.width, x2))
y1 = max(0, min(img.height, y1))
y2 = max(0, min(img.height, y2))
if x2 <= x1 or y2 <= y1:
return img.copy()
return img.crop((x1, y1, x2, y2))
def crop_center_aspect(img: Image.Image, aspect_w: float, aspect_h: float) -> Image.Image:
w, h = img.size
target_ratio = aspect_w / aspect_h
img_ratio = w / h
if img_ratio > target_ratio:
new_w = int(h * target_ratio)
x1 = (w - new_w) // 2
return img.crop((x1, 0, x1 + new_w, h))
else:
new_h = int(w / target_ratio)
y1 = (h - new_h) // 2
return img.crop((0, y1, w, y1 + new_h))
def crop_circle(img: Image.Image, diameter_px: int = 0) -> Image.Image:
"""Crop to a circle. If *diameter_px* > 0 the result is exactly that size;
otherwise the minimum of width/height is used."""
w, h = img.size
d = max(1, diameter_px) if diameter_px > 0 else min(w, h)
img_sq = img.copy()
side = min(w, h)
x1 = (w - side) // 2
y1 = (h - side) // 2
img_sq = img_sq.crop((x1, y1, x1 + side, y1 + side))
if img_sq.size[0] != d:
img_sq = img_sq.resize((d, d), Image.Resampling.LANCZOS)
mask = Image.new("L", (d, d), 0)
from PIL import ImageDraw
ImageDraw.Draw(mask).ellipse((0, 0, d - 1, d - 1), fill=255)
result = img_sq.convert("RGBA")
result.putalpha(mask)
return result
def resize_to_output(img: Image.Image, out_w: int, out_h: int) -> Image.Image:
return img.resize((max(1, int(out_w)), max(1, int(out_h))),
resample=Image.Resampling.LANCZOS)
# ─── Teal & Orange grading ──────────────────────────────────────────────────
def apply_teal_orange(rgb: np.ndarray, strength: float) -> np.ndarray:
if strength <= 0:
return rgb
luma = 0.2126 * rgb[..., 0] + 0.7152 * rgb[..., 1] + 0.0722 * rgb[..., 2]
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
warmth = r - b
warm_mask = np.clip(warmth * 2.0 + 0.3, 0.0, 1.0)
cool_mask = 1.0 - warm_mask
r_new = r + warm_mask * 0.08 - cool_mask * 0.06
g_new = g + warm_mask * 0.02 + cool_mask * 0.04
b_new = b - warm_mask * 0.06 + cool_mask * 0.08
luma_3d = luma[..., None]
teal_orange = np.stack([r_new, g_new, b_new], axis=-1)
teal_orange = luma_3d + (teal_orange - luma_3d) * 1.15
teal_orange = np.clip(teal_orange, 0.0, 1.0)
s = float(np.clip(strength, 0.0, 1.0))
return rgb * (1.0 - s) + teal_orange * s
# ─── HSL saturation per hue-band (colour mixer) ─────────────────────────────
def apply_hsl_saturation_bands(rgb: np.ndarray, bands: dict) -> np.ndarray:
"""
Per-hue-band saturation adjustment on sRGB float01 array (H, W, 3).
*bands*: ``{band_name: value}`` where value ∈ [-100, +100].
Positive values boost saturation in that hue range, negative values reduce.
Uses smoothstep weighting over ±30° bandwidth for soft transitions.
"""
r, g, b = rgb[..., 0], rgb[..., 1], rgb[..., 2]
cmax = np.maximum(np.maximum(r, g), b)
cmin = np.minimum(np.minimum(r, g), b)
delta = cmax - cmin
hue = np.zeros_like(r)
mask = delta > 1e-8
mr = mask & (cmax == r)
mg = mask & (cmax == g) & ~mr
mb = mask & ~mr & ~mg
hue[mr] = 60.0 * (((g[mr] - b[mr]) / delta[mr]) % 6.0)
hue[mg] = 60.0 * ((b[mg] - r[mg]) / delta[mg] + 2.0)
hue[mb] = 60.0 * ((r[mb] - g[mb]) / delta[mb] + 4.0)
hue = hue % 360.0
_BAND_GAIN = {"cyan": 2.0}
sat_mult = np.ones_like(r)
for band_name, center in HUE_BANDS.items():
adj_val = bands.get(band_name, 0)
if adj_val == 0:
continue
d = np.abs(hue - center)
d = np.minimum(d, 360.0 - d)
t = np.clip(1.0 - d / _HUE_BANDWIDTH, 0.0, 1.0)
weight = t * t * (3.0 - 2.0 * t) # smoothstep
gain = _BAND_GAIN.get(band_name, 1.0)
sat_mult += weight * (adj_val / 100.0) * gain
sat_mult = np.maximum(sat_mult, 0.0)
luma = (0.2126 * r + 0.7152 * g + 0.0722 * b)[..., None]
rgb_out = luma + (rgb - luma) * sat_mult[..., None]
return np.clip(rgb_out, 0.0, 1.0).astype(np.float32)
# ─── Full processing pipeline ────────────────────────────────────────────────
#
# ORDER (must be respected everywhere):
# 1) Basic adjustments (brightness, contrast, saturation, temp, tint,
# teal & orange)
# 2) LUT chain (sequential, each blended with own strength)
# 3) HSL colour mixer (per-hue-band saturation, AFTER LUTs)
# ── steps 4-7 handled externally in the UI processing pipeline ──
# 4) KI mask / segmentation
# 5) Feather (Gaussian blur on alpha mask)
# 6) Background compositing (blur / colour / image / transparent)
# 7) Retouching (skin softening, healing)
# ──
# 8) Post-adjustments (brightness & contrast, applied LAST)
#
def apply_adjustments(
img: Image.Image,
adj: Adjustments,
lut_chain: list | None = None,
hue_bands: dict | None = None,
post_adj: dict | None = None,
) -> Image.Image:
rgb = pil_to_rgb_float01(img)
# ── 1) basic adjustments ──
if adj.brightness != 0:
rgb = rgb + (float(adj.brightness) / 200.0)
if adj.contrast != 0:
factor = max(0.0, (100.0 + adj.contrast) / 100.0)
rgb = 0.5 + (rgb - 0.5) * factor
rgb = np.clip(rgb, 0.0, 1.0)
if adj.temperature != 0 or adj.tint != 0:
t = float(adj.temperature) / 100.0
ti = float(adj.tint) / 100.0
r = rgb[..., 0] + 0.25 * t + 0.10 * ti
g = rgb[..., 1] - 0.20 * ti
b = rgb[..., 2] - 0.25 * t + 0.10 * ti
rgb = np.stack([r, g, b], axis=-1)
rgb = np.clip(rgb, 0.0, 1.0)
if adj.saturation != 0:
s = 1.0 + (float(adj.saturation) / 100.0)
luma = (0.2126 * rgb[..., 0] + 0.7152 * rgb[..., 1]
+ 0.0722 * rgb[..., 2])[..., None]
rgb = luma + (rgb - luma) * s
rgb = np.clip(rgb, 0.0, 1.0)
if adj.teal_orange > 0:
rgb = apply_teal_orange(rgb, adj.teal_orange / 100.0)
rgb = np.clip(rgb, 0.0, 1.0)
# ── 2) LUT chain ──
if lut_chain:
rgb = apply_lut_chain(rgb, lut_chain)
# ── 3) colour mixer (post-LUT) ──
if hue_bands and any(v != 0 for v in hue_bands.values()):
rgb = apply_hsl_saturation_bands(rgb, hue_bands)
# ── 8) post-adjustments (applied LAST, after external steps 4-7) ──
if post_adj:
pb = post_adj.get("brightness", 0)
pc = post_adj.get("contrast", 0)
if pb != 0:
rgb = rgb + (float(pb) / 200.0)
if pc != 0:
factor = max(0.0, (100.0 + pc) / 100.0)
rgb = 0.5 + (rgb - 0.5) * factor
rgb = np.clip(rgb, 0.0, 1.0)
return rgb_float01_to_pil(rgb)
def apply_post_adj(rgb: np.ndarray, post_adj: dict) -> np.ndarray:
"""Apply post-LUT brightness, contrast, blacks & sharpness on float01 array."""
pb = post_adj.get("brightness", 0)
pc = post_adj.get("contrast", 0)
blacks = post_adj.get("blacks", 0)
sharpen = post_adj.get("sharpen", 0)
if pb != 0:
rgb = rgb + (float(pb) / 200.0)
if pc != 0:
factor = max(0.0, (100.0 + pc) / 100.0)
rgb = 0.5 + (rgb - 0.5) * factor
if blacks != 0:
rgb = _apply_blacks(rgb, float(blacks))
if sharpen > 0:
rgb = _apply_sharpen(rgb, float(sharpen))
return np.clip(rgb, 0.0, 1.0).astype(np.float32)
def _apply_blacks(rgb: np.ndarray, strength: float) -> np.ndarray:
"""Deepen shadows (positive) or lift them (negative).
Uses a smooth power-curve approach so the transition from shadows to
mid-tones stays natural (no hard edges).
*strength* in [-100 .. +100]:
+100 → strong black crush (gamma ~1.6)
0 → no change
-100 → lifted shadows (gamma ~0.6)
"""
gamma = 1.0 + strength / 166.0
gamma = max(0.4, min(2.0, gamma))
return np.power(np.clip(rgb, 0.0, 1.0), gamma)
def _apply_sharpen(rgb: np.ndarray, strength: float) -> np.ndarray:
"""Unsharp-mask sharpening.
*strength* 0..100: 0 = no effect, 100 = strong sharpening.
Uses Gaussian blur to create a blurred copy, then adds the difference
back (classic unsharp mask).
"""
import cv2
amount = strength / 50.0
blurred = cv2.GaussianBlur(rgb, (0, 0), sigmaX=1.5)
sharpened = rgb + amount * (rgb - blurred)
return np.clip(sharpened, 0.0, 1.0).astype(np.float32)

222
APP/fotoapp - Kopie/lut.py Normal file
View File

@@ -0,0 +1,222 @@
from __future__ import annotations
from dataclasses import dataclass
from pathlib import Path
from typing import Optional, Tuple
import numpy as np
@dataclass
class CubeLUT:
size: int
table: np.ndarray # shape: (size, size, size, 3), float32 in [0,1]
domain_min: np.ndarray # shape (3,)
domain_max: np.ndarray # shape (3,)
class CubeParseError(Exception):
pass
def load_cube(path: str | Path) -> CubeLUT:
"""
.cube parser for 3D LUTs (common sizes 17/33/65).
Handles: TITLE, LUT_3D_SIZE, LUT_1D_SIZE (skipped),
DOMAIN_MIN, DOMAIN_MAX, comments (#), blank lines.
"""
p = Path(path)
if not p.exists():
raise CubeParseError(f"LUT file not found: {p}")
size_3d: Optional[int] = None
size_1d: Optional[int] = None
domain_min = np.array([0.0, 0.0, 0.0], dtype=np.float32)
domain_max = np.array([1.0, 1.0, 1.0], dtype=np.float32)
data_3d: list[Tuple[float, float, float]] = []
collecting_3d = False
rows_1d_remaining = 0
with p.open("r", encoding="utf-8", errors="ignore") as f:
for raw in f:
line = raw.strip()
if not line or line.startswith("#"):
continue
if "#" in line:
line = line.split("#", 1)[0].strip()
if not line:
continue
parts = line.split()
key = parts[0].upper()
if key == "TITLE":
continue
if key == "LUT_3D_SIZE":
if len(parts) != 2:
raise CubeParseError("Invalid LUT_3D_SIZE line")
size_3d = int(parts[1])
collecting_3d = True
continue
if key == "LUT_1D_SIZE":
if len(parts) == 2:
size_1d = int(parts[1])
rows_1d_remaining = size_1d
continue
if key == "DOMAIN_MIN":
if len(parts) != 4:
raise CubeParseError("Invalid DOMAIN_MIN line")
domain_min = np.array(list(map(float, parts[1:4])), dtype=np.float32)
continue
if key == "DOMAIN_MAX":
if len(parts) != 4:
raise CubeParseError("Invalid DOMAIN_MAX line")
domain_max = np.array(list(map(float, parts[1:4])), dtype=np.float32)
continue
if len(parts) >= 3:
try:
r, g, b = float(parts[0]), float(parts[1]), float(parts[2])
except ValueError:
continue
# Skip 1D LUT rows that appear before the 3D data
if rows_1d_remaining > 0 and not collecting_3d:
rows_1d_remaining -= 1
continue
data_3d.append((r, g, b))
if size_3d is None:
raise CubeParseError("Missing LUT_3D_SIZE in .cube file")
expected = size_3d ** 3
if len(data_3d) != expected:
raise CubeParseError(f"Expected {expected} LUT rows, got {len(data_3d)}")
arr = np.array(data_3d, dtype=np.float32)
# .cube standard: R varies fastest, then G, then B (slowest).
# C-order reshape gives arr[B, G, R, channels].
# Transpose axes 0↔2 so table[R, G, B] = output RGB for intuitive lookup.
arr = arr.reshape((size_3d, size_3d, size_3d, 3), order="C")
arr = arr.transpose(2, 1, 0, 3).copy()
return CubeLUT(size=size_3d, table=arr, domain_min=domain_min, domain_max=domain_max)
# Set True to swap R↔B channels before/after LUT lookup (debug only)
_DEBUG_SWAP_RB = False
def apply_lut_rgb_float01(
rgb: np.ndarray,
lut: CubeLUT,
swap_rb: Optional[bool] = None,
) -> np.ndarray:
"""
Apply a 3D LUT with trilinear interpolation.
Input must be sRGB-encoded float32 in [0,1], shape (H, W, 3).
LUTs designed for Log/ACES input will NOT produce correct results
without an additional input transform.
swap_rb: if True, swap R↔B before lookup (debug for channel order issues).
Defaults to module-level _DEBUG_SWAP_RB flag.
"""
do_swap = swap_rb if swap_rb is not None else _DEBUG_SWAP_RB
if rgb.dtype != np.float32:
rgb = rgb.astype(np.float32)
if do_swap:
rgb = rgb[..., ::-1].copy()
# Map from domain to [0,1]
dmin = lut.domain_min.reshape((1, 1, 3))
dmax = lut.domain_max.reshape((1, 1, 3))
denom = np.maximum(dmax - dmin, 1e-12)
x = np.clip((rgb - dmin) / denom, 0.0, 1.0)
n = lut.size
xg = x * (n - 1)
# Floor indices clamped to [0, n-2] so i0+1 is always valid
i0 = np.clip(np.floor(xg).astype(np.int32), 0, n - 2)
i1 = i0 + 1
t = np.clip(xg - i0.astype(np.float32), 0.0, 1.0)
r0, g0, b0 = i0[..., 0], i0[..., 1], i0[..., 2]
r1, g1, b1 = i1[..., 0], i1[..., 1], i1[..., 2]
tr = t[..., 0:1]
tg = t[..., 1:2]
tb = t[..., 2:3]
# 8 corners for trilinear interpolation
c000 = lut.table[r0, g0, b0]
c001 = lut.table[r0, g0, b1]
c010 = lut.table[r0, g1, b0]
c011 = lut.table[r0, g1, b1]
c100 = lut.table[r1, g0, b0]
c101 = lut.table[r1, g0, b1]
c110 = lut.table[r1, g1, b0]
c111 = lut.table[r1, g1, b1]
# Interpolate along B axis
c00 = c000 + (c001 - c000) * tb
c01 = c010 + (c011 - c010) * tb
c10 = c100 + (c101 - c100) * tb
c11 = c110 + (c111 - c110) * tb
# Interpolate along G axis
c0 = c00 + (c01 - c00) * tg
c1 = c10 + (c11 - c10) * tg
# Interpolate along R axis
out = c0 + (c1 - c0) * tr
out = np.clip(out, 0.0, 1.0).astype(np.float32)
if do_swap:
out = out[..., ::-1].copy()
return out
def blend_rgb(a: np.ndarray, b: np.ndarray, strength_0_1: float) -> np.ndarray:
s = float(np.clip(strength_0_1, 0.0, 1.0))
return (a * (1.0 - s) + b * s).astype(np.float32)
# ─── LUT cache & chain ──────────────────────────────────────────────────────
_lut_cache: dict[str, CubeLUT] = {}
def get_cached_lut(path: str | Path) -> CubeLUT:
"""Load a .cube LUT, returning a cached copy on repeat calls."""
key = str(Path(path).resolve())
if key not in _lut_cache:
_lut_cache[key] = load_cube(path)
return _lut_cache[key]
def clear_lut_cache():
_lut_cache.clear()
def apply_lut_chain(
rgb: np.ndarray,
chain: list[tuple[CubeLUT, float]],
) -> np.ndarray:
"""
Sequential LUT application. Each ``(lut, strength_0_100)`` is blended::
out = lerp(input, apply_lut(input), strength / 100)
The result of each step feeds the next.
"""
for lut, strength in chain:
if strength <= 0:
continue
lut_rgb = apply_lut_rgb_float01(rgb, lut)
rgb = blend_rgb(rgb, lut_rgb, strength / 100.0)
rgb = np.clip(rgb, 0.0, 1.0)
return rgb.astype(np.float32)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,4 @@
{
"Teal And Orange": "C:\\Users\\surov\\Documents\\AZA\\APP\\fotoapp\\luts\\Teal And Orange.cube",
"Rec709_LPP Tetrachrome 400_CIN": "C:\\Users\\surov\\Documents\\AZA\\APP\\fotoapp\\luts\\Rec709_LPP Tetrachrome 400_CIN.cube"
}

View File

@@ -0,0 +1,14 @@
import sys
from PySide6.QtWidgets import QApplication
from fotoapp.ui import MainWindow
def main() -> int:
app = QApplication(sys.argv)
w = MainWindow()
w.show()
return app.exec()
if __name__ == "__main__":
raise SystemExit(main())

View File

@@ -0,0 +1,6 @@
PySide6>=6.6
Pillow>=10.2
numpy>=1.26
pyinstaller>=6.3
mediapipe>=0.10.20
opencv-python-headless>=4.8

View File

@@ -0,0 +1,53 @@
"""Basic retouching tools: healing / inpainting and skin softening."""
from __future__ import annotations
from typing import Tuple
import numpy as np
from PIL import Image
def heal_spot(
img: Image.Image,
center: Tuple[int, int],
radius: int,
) -> Image.Image:
"""Remove blemish at *center* using OpenCV Telea inpainting."""
import cv2
arr = np.asarray(img.convert("RGB"))
bgr = cv2.cvtColor(arr, cv2.COLOR_RGB2BGR)
mask = np.zeros(arr.shape[:2], dtype=np.uint8)
cv2.circle(mask, center, radius, 255, -1)
result = cv2.inpaint(bgr, mask, inpaintRadius=radius * 2, flags=cv2.INPAINT_TELEA)
result = cv2.cvtColor(result, cv2.COLOR_BGR2RGB)
return Image.fromarray(result)
def soften_skin(
rgb_float01: np.ndarray,
mask: np.ndarray,
strength: float,
) -> np.ndarray:
"""Edge-preserving skin softening (bilateral filter) within *mask* area.
*strength*: 0..100. 0 = no effect.
*mask*: float01 (H, W) typically the person/face mask.
"""
if strength <= 0:
return rgb_float01
import cv2
img8 = np.clip(rgb_float01 * 255, 0, 255).astype(np.uint8)
d = max(3, int(strength / 10))
sigma_color = 30 + strength * 0.7
sigma_space = 30 + strength * 0.7
smoothed = cv2.bilateralFilter(img8, d, sigma_color, sigma_space)
smoothed_f = smoothed.astype(np.float32) / 255.0
alpha = np.clip(mask * min(strength / 100.0, 1.0), 0.0, 1.0)[..., None]
result = rgb_float01 * (1.0 - alpha) + smoothed_f * alpha
return np.clip(result, 0.0, 1.0).astype(np.float32)

View File

@@ -0,0 +1,206 @@
"""KI-based person segmentation, mask utilities and compositing."""
from __future__ import annotations
import os
from collections import deque
from pathlib import Path
from typing import Optional, Tuple
import numpy as np
from PIL import Image
_MODEL_URL = (
"https://storage.googleapis.com/mediapipe-models/"
"image_segmenter/selfie_multiclass_256x256/float32/latest/"
"selfie_multiclass_256x256.tflite"
)
_MODEL_DIR = Path(os.environ.get("APPDATA", "")) / "FotoApp"
_MODEL_PATH = _MODEL_DIR / "selfie_multiclass.tflite"
_segmenter = None
def _ensure_model(progress_cb=None) -> str:
"""Download the segmentation model on first use (~16 MB)."""
_MODEL_DIR.mkdir(parents=True, exist_ok=True)
path = str(_MODEL_PATH)
if os.path.isfile(path) and os.path.getsize(path) > 1_000_000:
return path
if progress_cb:
progress_cb("KI-Modell wird heruntergeladen (~16 MB) …")
import urllib.request
urllib.request.urlretrieve(_MODEL_URL, path)
return path
def _get_segmenter(progress_cb=None):
"""Lazy-init the MediaPipe ImageSegmenter (cached)."""
global _segmenter
if _segmenter is not None:
return _segmenter
import mediapipe as mp
from mediapipe.tasks.python import BaseOptions, vision
model_path = _ensure_model(progress_cb)
if progress_cb:
progress_cb("KI-Modell wird geladen …")
options = vision.ImageSegmenterOptions(
base_options=BaseOptions(model_asset_path=model_path),
output_category_mask=True,
)
_segmenter = vision.ImageSegmenter.create_from_options(options)
return _segmenter
# ─── KI segmentation ────────────────────────────────────────────────────────
def segment_person(img: Image.Image, progress_cb=None) -> np.ndarray:
"""Run MediaPipe Selfie Segmentation (Tasks API), returning float01 alpha mask.
Very fast (~1-2 s), lightweight (~16 MB model, low RAM).
The model is automatically downloaded on first use.
Category 0 = background, categories 1-5 = person parts.
"""
import mediapipe as mp
segmenter = _get_segmenter(progress_cb)
if progress_cb:
progress_cb("Segmentierung läuft …")
rgb_arr = np.asarray(img.convert("RGB"))
mp_img = mp.Image(image_format=mp.ImageFormat.SRGB, data=rgb_arr)
result = segmenter.segment(mp_img)
cat_mask = result.category_mask.numpy_view()
# category 0 = background; anything > 0 = person
mask = (cat_mask.squeeze() > 0).astype(np.float32)
return mask
# ─── Mask operations ────────────────────────────────────────────────────────
def feather_mask(mask: np.ndarray, radius_px: float) -> np.ndarray:
"""Gaussian blur on alpha mask for soft edges."""
if radius_px <= 0:
return mask
import cv2
ksize = int(radius_px) * 2 + 1
blurred = cv2.GaussianBlur(mask, (ksize, ksize), sigmaX=radius_px / 2.0)
return np.clip(blurred, 0.0, 1.0).astype(np.float32)
def apply_brush_stroke(
mask: np.ndarray,
points: list[Tuple[int, int]],
radius: int,
hardness: float,
add: bool,
) -> np.ndarray:
"""Paint on *mask* along *points* with a circular brush.
*hardness* 0..1 (0 = very soft, 1 = hard edge).
*add* True = paint foreground (white), False = erase (black).
"""
h, w = mask.shape[:2]
for cx, cy in points:
y_min = max(0, cy - radius)
y_max = min(h, cy + radius + 1)
x_min = max(0, cx - radius)
x_max = min(w, cx + radius + 1)
if y_min >= y_max or x_min >= x_max:
continue
yy, xx = np.mgrid[y_min:y_max, x_min:x_max]
dist = np.sqrt((xx - cx) ** 2 + (yy - cy) ** 2).astype(np.float32)
if hardness >= 0.99:
strength = (dist <= radius).astype(np.float32)
else:
inner = radius * hardness
outer = float(radius)
t = np.clip((dist - inner) / max(outer - inner, 1e-6), 0.0, 1.0)
strength = 1.0 - t
strength[dist > radius] = 0.0
patch = mask[y_min:y_max, x_min:x_max]
if add:
mask[y_min:y_max, x_min:x_max] = np.maximum(patch, strength)
else:
mask[y_min:y_max, x_min:x_max] = np.minimum(patch, 1.0 - strength)
return mask
# ─── Compositing ─────────────────────────────────────────────────────────────
def composite_fg_bg(
fg_rgb: np.ndarray,
alpha: np.ndarray,
bg_mode: str,
bg_color: Tuple[int, int, int] = (255, 255, 255),
bg_blur_radius: float = 0.0,
bg_image: Optional[np.ndarray] = None,
) -> np.ndarray:
"""Composite foreground over background.
*fg_rgb*: float01 (H, W, 3) the colour-graded image.
*alpha*: float01 (H, W) person mask (feathered).
*bg_mode*: ``"original"`` | ``"blur"`` | ``"color"`` | ``"transparent"`` | ``"image"``.
Returns float01 (H, W, 3) for non-transparent modes or (H, W, 4) for transparent.
"""
a = alpha[..., None]
if bg_mode == "original":
return fg_rgb
if bg_mode == "transparent":
rgba = np.concatenate([fg_rgb, alpha[..., None]], axis=-1)
return np.clip(rgba, 0.0, 1.0).astype(np.float32)
if bg_mode == "blur":
import cv2
ksize = max(1, int(bg_blur_radius)) * 2 + 1
bg = cv2.GaussianBlur(fg_rgb, (ksize, ksize), sigmaX=bg_blur_radius / 2.0)
elif bg_mode == "color":
bg = np.full_like(fg_rgb, [c / 255.0 for c in bg_color])
elif bg_mode == "image" and bg_image is not None:
h, w = fg_rgb.shape[:2]
bg = np.asarray(
Image.fromarray((bg_image * 255).astype(np.uint8)).resize(
(w, h), Image.Resampling.LANCZOS
),
dtype=np.float32,
) / 255.0
else:
return fg_rgb
result = fg_rgb * a + bg * (1.0 - a)
return np.clip(result, 0.0, 1.0).astype(np.float32)
# ─── Mask undo stack ─────────────────────────────────────────────────────────
class MaskHistory:
"""Simple undo buffer for mask edits (max *maxlen* snapshots)."""
def __init__(self, maxlen: int = 20):
self._stack: deque[np.ndarray] = deque(maxlen=maxlen)
def push(self, mask: np.ndarray):
self._stack.append(mask.copy())
def undo(self) -> Optional[np.ndarray]:
if self._stack:
return self._stack.pop()
return None
def clear(self):
self._stack.clear()
@property
def can_undo(self) -> bool:
return len(self._stack) > 0

View File

@@ -0,0 +1,19 @@
{
"last_browse_dir": "C:\\Users\\surov\\Pictures",
"out_dir": "C:\\Users\\surov\\Pictures\\neue Fotos",
"brightness": 0,
"contrast": 0,
"saturation": 0,
"temperature": 0,
"tint": 0,
"teal_orange": 0,
"lut_strength": 0,
"selected_lut": "",
"crop_preset": 0,
"format": "JPG",
"quality": 95,
"dpi": 300,
"w_cm": 5.0,
"h_cm": 5.0,
"use_cm": false
}

View File

@@ -0,0 +1,16 @@
@echo off
cd /d "%~dp0"
if not exist ".venv" (
echo Erstelle virtuelle Umgebung...
py -3 -m venv .venv
echo Installiere Abhaengigkeiten...
.venv\Scripts\pip install -r requirements.txt
echo.
echo Installation abgeschlossen!
echo.
)
cd ..
"%~dp0.venv\Scripts\python.exe" -m fotoapp.main
pause

2391
APP/fotoapp - Kopie/ui.py Normal file

File diff suppressed because it is too large Load Diff