diff --git a/Kapitel 13/Tutorial.md b/Kapitel 13/Tutorial.md index e81fa81..c171821 100644 --- a/Kapitel 13/Tutorial.md +++ b/Kapitel 13/Tutorial.md @@ -825,10 +825,22 @@ Das folgende Schaubild zeigt dir die konkrete Verkabelung ``` ```bash - #!/bin/bash +#!/bin/bash set -euo pipefail +# Cleanup bei CTRL+C, SIGTERM, etc. +cleanup() { + echo "[WARN] Analyse abgebrochen – prüfe auf ffmpeg-Leichen..." + pkill -P $$ ffmpeg || true + pkill -f "ffmpeg.*$VOD_ID" || true + # FIFO säubern (nur wenn gesetzt & vorhanden) + if [ -n "${FIFO_PATH-}" ] && [ -p "$FIFO_PATH" ]; then rm -f "$FIFO_PATH"; fi + exit 1 +} + +trap cleanup SIGINT SIGTERM + # Umgebungsvariablen laden if [ -f "/etc/clipper/clipper.env" ]; then set -a @@ -861,7 +873,7 @@ echo "[INFO] VOD gefunden: $VOD_PATH" echo "[INFO] Extrahiere WAV aus $VOD_PATH → $TMP_AUDIO" # Audio extrahieren mit Logging -ffmpeg -v warning -i "$VOD_PATH" -ac 1 -ar 16000 -vn "$TMP_AUDIO" 2> "$TMP_LOG_AUDIO" +nice -n 10 ffmpeg -v warning -threads 1 -i "$VOD_PATH" -ac 1 -ar 16000 -vn "$TMP_AUDIO" 2> "$TMP_LOG_AUDIO" echo "[OK] Audio extrahiert: $TMP_AUDIO ($(du -h "$TMP_AUDIO" | cut -f1))" echo "[INFO] Verwende Schwelle: $CLIPPER_PEAK_THRESHOLD" @@ -931,6 +943,208 @@ with open(outfile, 'w') as f: json.dump(candidates, f) print(f"[DONE] {outfile} geschrieben mit {len(candidates)} Clip(s)") EOF + +echo "[INFO] Starte visuelle Analyse (alle 3 Sekunden ein Frame)..." + +TMP_FRAME_DIR="$TMP_DIR/frames" +mkdir -p "$TMP_FRAME_DIR" + +# Videodauer ermitteln (in Sekunden) +DURATION=$(ffprobe -v error -show_entries format=duration \ + -of default=noprint_wrappers=1:nokey=1 "$VOD_PATH") +DURATION=${DURATION%.*} + +INTERVAL=3 +EXPECTED_FRAMES=$(printf "%.0f" "$(echo "$DURATION / $INTERVAL" | bc -l)") + +echo "[INFO] VOD-Dauer: ${DURATION}s → erwarte ca. ${EXPECTED_FRAMES} Bilder bei fps=1/${INTERVAL}" + +# ffmpeg im Hintergrund +START_TIME=$(date +%s) +nice -n 10 ffmpeg -v warning -i "$VOD_PATH" \ + -vf "fps=1/${INTERVAL},scale=160:90" \ + "$TMP_FRAME_DIR/frame_%06d.bmp" & +FFMPEG_PID=$! + +EXTRACTED_FRAMES=0 +LAST_LOGGED_FRAMES=0 + +while kill -0 $FFMPEG_PID 2>/dev/null; do + EXTRACTED_FRAMES=$(find "$TMP_FRAME_DIR" -name 'frame_*.bmp' | wc -l) + + if (( EXTRACTED_FRAMES > LAST_LOGGED_FRAMES )); then + NOW=$(date +%s) + ELAPSED=$((NOW - START_TIME)) + ELAPSED_FMT=$(printf "%02d:%02d:%02d" $((ELAPSED/3600)) $((ELAPSED%3600/60)) $((ELAPSED%60))) + + if (( EXTRACTED_FRAMES > 0 )); then + ESTIMATED_TOTAL=$((ELAPSED * EXPECTED_FRAMES / EXTRACTED_FRAMES)) + ETA_SECONDS=$((ESTIMATED_TOTAL - ELAPSED)) + ETA_FMT=$(printf "%02d:%02d:%02d" $((ETA_SECONDS/3600)) $((ETA_SECONDS%3600/60)) $((ETA_SECONDS%60))) + else + ETA_FMT="??:??:??" + fi + + PERCENT=$(awk "BEGIN { printf \"%.2f\", $EXTRACTED_FRAMES * 100 / $EXPECTED_FRAMES }") + echo "[INFO] Lade Frame $EXTRACTED_FRAMES / $EXPECTED_FRAMES (${PERCENT}%) | ETA: $ETA_FMT | Elapsed: $ELAPSED_FMT" + + + LAST_LOGGED_FRAMES=$EXTRACTED_FRAMES + fi + + sleep 1 +done + +# Final-Log +EXTRACTED_FRAMES=$(find "$TMP_FRAME_DIR" -name 'frame_*.bmp' | wc -l) +END_TIME=$(date +%s) +TOTAL_TIME=$((END_TIME - START_TIME)) +TOTAL_FMT=$(printf "%02d:%02d:%02d" $((TOTAL_TIME/3600)) $((TOTAL_TIME%3600/60)) $((TOTAL_TIME%60))) + +echo "[INFO] Frame-Export abgeschlossen mit $EXTRACTED_FRAMES Bildern. Dauer: $TOTAL_FMT" + +echo "[INFO] Starte visuelle Analyse basierend auf Bewegung zwischen Frames..." + +TMP_JSON_VISUAL="$TMP_DIR/candidates.visual.json" + +/srv/clipper/.venv/bin/python3 < movement_threshold: + if active_start is None: + active_start = t + else: + if active_start is not None and (t - active_start) >= min_duration: + # Pre/Post-Puffer hinzufügen + start_time = max(0, active_start - 2) + end_time = min(t + 2, len(frame_files) * 3) + candidates.append({ + "start": round(start_time, 2), + "end": round(end_time, 2) + }) + active_start = None + + prev_frame = frame + +# Letzter offener Clip am Ende +if active_start is not None: + t = len(frame_files) * 3 + if t - active_start >= min_duration: + start_time = max(0, active_start - 2) + end_time = min(t + 2, len(frame_files) * 3) + candidates.append({ + "start": round(start_time, 2), + "end": round(end_time, 2) + }) + +with open(output_json, "w") as f: + json.dump(candidates, f) + +print(f"[DONE] Bewegungserkennung abgeschlossen → {len(candidates)} Clip(s) geschrieben in {output_json}") +EOF + + +echo "[INFO] Vergleiche Audio- und visuelle Kandidaten..." + +TMP_JSON_FINAL="$TMP_DIR/candidates.final.json" +CLIPPER_MATCH_TOLERANCE="${CLIPPER_MATCH_TOLERANCE:-4.0}" + +/srv/clipper/.venv/bin/python3 <