From 5c4dfad3831128baedc2b1e19ae676445289280d Mon Sep 17 00:00:00 2001 From: Thomas Dannenberg Date: Thu, 18 Sep 2025 19:58:21 +0000 Subject: [PATCH] Kapitel 13/Tutorial.md aktualisiert --- Kapitel 13/Tutorial.md | 82 ++++++++++++++++++++++++++---------------- 1 file changed, 52 insertions(+), 30 deletions(-) diff --git a/Kapitel 13/Tutorial.md b/Kapitel 13/Tutorial.md index c171821..7897c0b 100644 --- a/Kapitel 13/Tutorial.md +++ b/Kapitel 13/Tutorial.md @@ -1021,47 +1021,69 @@ frame_files = sorted([ if f.endswith(".bmp") ]) -movement_threshold = 12000 -min_duration = 5 -candidates = [] +# 3 Sekunden pro Frame fix +SECONDS_PER_FRAME = 3 +MIN_CLIP_DURATION = 15 # Sekunden +PRE_POST_BUFFER = 2 # Sekunden -prev_frame = None -active_start = None +movement_scores = [] +frames = [] +# Scores sammeln for idx, frame_path in enumerate(frame_files): frame = cv2.imread(frame_path, cv2.IMREAD_GRAYSCALE) if frame is None: continue - if prev_frame is not None: - diff = cv2.absdiff(prev_frame, frame) - score = int(np.sum(diff)) - print(f"[DEBUG] Frame {idx}: Bewegungsscore = {score}") + if idx == 0: + prev_frame = frame + continue - t = idx * 3 - - if score > movement_threshold: - if active_start is None: - active_start = t - else: - if active_start is not None and (t - active_start) >= min_duration: - # Pre/Post-Puffer hinzufügen - start_time = max(0, active_start - 2) - end_time = min(t + 2, len(frame_files) * 3) - candidates.append({ - "start": round(start_time, 2), - "end": round(end_time, 2) - }) - active_start = None + diff = cv2.absdiff(prev_frame, frame) + score = int(np.sum(diff)) + movement_scores.append(score) + frames.append((idx, score)) + print(f"[DEBUG] Frame {idx}: Bewegungsscore = {score}") prev_frame = frame -# Letzter offener Clip am Ende +if not movement_scores: + print("[WARN] Keine Bewegungsscores ermittelt.") + with open(output_json, 'w') as f: + json.dump([], f) + exit(0) + +# Schwelle dynamisch bestimmen +cutoff = np.quantile(movement_scores, 0.99) +print(f"[INFO] Dynamische Bewegungsschwelle (99%): {int(cutoff)}") +print(f"[INFO] Min: {min(movement_scores)}, Max: {max(movement_scores)}, Median: {int(np.median(movement_scores))}") + +# Kandidaten suchen +candidates = [] +active_start = None + +for idx, score in frames: + t = idx * SECONDS_PER_FRAME + + if score > cutoff: + if active_start is None: + active_start = t + else: + if active_start is not None and (t - active_start) >= MIN_CLIP_DURATION: + start_time = max(0, active_start - PRE_POST_BUFFER) + end_time = min(t + PRE_POST_BUFFER, len(frame_files) * SECONDS_PER_FRAME) + candidates.append({ + "start": round(start_time, 2), + "end": round(end_time, 2) + }) + active_start = None + +# Offener Clip am Ende if active_start is not None: - t = len(frame_files) * 3 - if t - active_start >= min_duration: - start_time = max(0, active_start - 2) - end_time = min(t + 2, len(frame_files) * 3) + t = len(frame_files) * SECONDS_PER_FRAME + if t - active_start >= MIN_CLIP_DURATION: + start_time = max(0, active_start - PRE_POST_BUFFER) + end_time = min(t + PRE_POST_BUFFER, len(frame_files) * SECONDS_PER_FRAME) candidates.append({ "start": round(start_time, 2), "end": round(end_time, 2) @@ -1073,7 +1095,6 @@ with open(output_json, "w") as f: print(f"[DONE] Bewegungserkennung abgeschlossen → {len(candidates)} Clip(s) geschrieben in {output_json}") EOF - echo "[INFO] Vergleiche Audio- und visuelle Kandidaten..." TMP_JSON_FINAL="$TMP_DIR/candidates.final.json" @@ -1145,6 +1166,7 @@ EOF echo "[INFO] Ersetze ursprüngliches JSON mit finaler Version..." mv "$TMP_JSON_FINAL" "$TMP_JSON" + ``` 1. **SSH Node – Analyze VOD** (Node-Name: Analyze VOD) - Node-Typ: SSH