# -*- coding: utf-8 -*- from flask import Flask, render_template, Response, request, redirect, url_for import cv2 import json from datetime import datetime from gestures.gestures_debug_test import detect_hand_gesture app = Flask(__name__) # Kamera initialisieren cam = cv2.VideoCapture(0) cam.set(cv2.CAP_PROP_FRAME_WIDTH, 320) cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 240) def gen_frames(): while True: success, frame = cam.read() if not success: print("[error] Kein Kamerabild.") break # Optional spiegeln (für Selfie-Effekt) frame = cv2.flip(frame, 1) # Geste erkennen + ROI anzeigen gesture, roi_coords = detect_hand_gesture(frame) x, y, w, h = roi_coords # Rechteck einzeichnen cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2) cv2.putText(frame, f"Geste: {gesture}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2) # MJPEG-Streaming ret, buffer = cv2.imencode('.jpg', frame) frame_bytes = buffer.tobytes() yield (b'--frame\r\n' b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n') @app.route('/') def index(): return render_template('index.html') @app.route('/video_feed') def video_feed(): return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame') @app.route('/log_answer', methods=['POST']) def log_answer(): answer = request.form.get("antwort", "").strip() mood = request.form.get("mood", "neutral") gesture = request.form.get("gesture", "none") log_entry = { "timestamp": datetime.now().isoformat(), "antwort": answer, "mood": mood, "gesture": gesture } with open("snake_log.jsonl", "a") as logfile: logfile.write(json.dumps(log_entry) + "\n") print(f"[log] Eingeloggt: {log_entry}") return redirect(url_for('index')) @app.route('/shutdown') def shutdown(): cam.release() print("[info] Kamera wurde sauber freigegeben.") return "Kamera freigegeben." if __name__ == '__main__': print("Starte SnakeCam ...") app.run(host='0.0.0.0')