🧹 Großes Aufräumen - Repo schlank gemacht!

**Gelöscht:**

📁 17 alte Log-Directories:
- ./.tobi_logs, ./.eule_logs, ./.funkfox_logs
- ./.schraubaer_logs, ./.schnecki_logs, ./.deepbit_logs
- ./.bugsy_logs, ./.schnippsi_logs, ./.templatus_logs
- ./.snake_logs, ./.taube_logs, ./.bits_logs
- ./.dumbo_logs, ./.pepper_logs, ./.stage_logs
- ./.missionstage_log, ./.bugsy_log
→ Jetzt: logs/{character}/ statt versteckt!

📁 7 Config/Cache Directories:
- ./.config, ./.local, ./.cache, ./.dotnet
- ./.crumbair, ./.claude, ./.pp_backup
→ Gehören nicht ins Repo!

📦 Alte Backups & Temp Files:
- bits_logs/ (leer)
- crumbforest_roles_v2/ (alter Backup)
- crumbforest_zero_backup_*.zip (2 Backups)
- snake_camera_vision/ & _v2/ (alte Python Apps)
- snake_camera_vision.zip
- mission_deepbit_dns_*.log (3 alte Logs)
- vegeta_disk.sh.save, backup_zero.sh
- fix_token_logs.sh (nicht mehr nötig)

**Behalten:**
 logs/ (neue Struktur!)
 crumbforest_roles/, missions/, lib/
 crumb-mission-selector.sh
 CLAUDE.md, README.md, KEKSHANDBUCH

**Ergebnis:** Repo ist jetzt viel übersichtlicher! 🌲

🤖 Generated with Claude Code
Co-Authored-By: Claude Sonnet 4.5 <noreply@anthropic.com>
This commit is contained in:
Branko May Trinkwald
2025-12-21 16:34:44 +01:00
parent 271008ea10
commit 39011a555f
26 changed files with 0 additions and 1179 deletions

View File

@@ -1,52 +0,0 @@
#!/bin/bash
echo "📦 Starte Backup des Crumbforest Zero Systems..."
# Zielname
BACKUP_NAME="crumbforest_zero_backup_$(date +%Y%m%d_%H%M%S).zip"
DEST_DIR="/home/zero"
FULL_PATH="$DEST_DIR/$BACKUP_NAME"
# Verzeichnisse und Dateien sammeln
INCLUDE_PATHS=(
"/usr/local/bin/crumbmission"
"/home/zero/crumbforest_backup"
"/home/zero/.bits_logs"
"/home/zero/.eule_logs"
"/home/zero/.snake_logs"
"/home/zero/.pepper_logs"
"/home/zero/.bugsy_logs"
"/home/zero/.deepbit_logs"
"/home/zero/.dumbo_logs"
"/home/zero/.funkfox_logs"
"/home/zero/.schnecki_logs"
"/home/zero/.schnippsi_logs"
"/home/zero/.schraubaer_logs"
"/home/zero/.stage_logs"
"/home/zero/.taube_logs"
"/home/zero/.templatus_logs"
"/home/zero/.tobi_logs"
"/home/zero/.missionstage_log"
)
# Existierende Pfade prüfen und nur diese einfügen
EXISTING_PATHS=()
for path in "${INCLUDE_PATHS[@]}"; do
if [ -e "$path" ]; then
echo "✅ Hinzufügen: $path"
EXISTING_PATHS+=("$path")
else
echo "⚠️ Nicht gefunden (wird übersprungen): $path"
fi
done
# Archiv erstellen
cd /
zip -r "$FULL_PATH" "${EXISTING_PATHS[@]}" > /dev/null
if [ $? -eq 0 ]; then
echo "🎉 Backup erfolgreich erstellt: $FULL_PATH"
else
echo "❌ Fehler beim Erstellen des Backups."
fi

View File

@@ -1,56 +0,0 @@
#!/bin/bash
QUESTION="$*"
MODEL="openai/gpt-3.5-turbo"
API_KEY="${OPENROUTER_API_KEY}"
LOGDIR="$HOME/.bugsy_logs"
mkdir -p "$LOGDIR"
HISTORY_FILE="$LOGDIR/bugsy_history.json"
TMP_REQUEST="$LOGDIR/bugsy_request.json"
TMP_RESPONSE="$LOGDIR/bugsy_response.json"
LOG_FILE="$LOGDIR/token_log.json"
[ ! -f "$HISTORY_FILE" ] && echo "[]" > "$HISTORY_FILE"
[ ! -f "$LOG_FILE" ] && echo "[]" > "$LOG_FILE"
echo "🌳 Bugsy fragt über OpenRouter: $QUESTION"
if [ -z "$API_KEY" ]; then
echo "❗ Kein API-Key gesetzt. Bitte export OPENROUTER_API_KEY=... setzen"
exit 1
fi
jq -n \
--arg model "$MODEL" \
--arg system_prompt "Du bist Bugsy ein kleiner Käfer, der Kindern hilft, Fehlermeldungen zu verstehen. Du bleibst freundlich, erklärend und ermutigend." \
--arg user "$QUESTION" \
'{"model": $model, "temperature": 0.5, "messages": [{"role": "system", "content": $system_prompt}, {"role": "user", "content": $user}]}' > "$TMP_REQUEST"
curl -s https://openrouter.ai/api/v1/chat/completions \
-H "Authorization: Bearer $API_KEY" \
-H "Content-Type: application/json" \
-d @"$TMP_REQUEST" > "$TMP_RESPONSE"
RESPONSE_TEXT=$(jq -r '.choices[0].message.content // empty' "$TMP_RESPONSE")
if [[ -z "$RESPONSE_TEXT" ]]; then
echo "🚫 Keine Antwort vom Modell erhalten."
exit 1
else
echo -e "$RESPONSE_TEXT"
jq -n --arg role "assistant" --arg content "$RESPONSE_TEXT" \
'{"role": $role, "content": $content}' > "$LOGDIR/new_entry.json"
jq -s '.[0] + [.[1]]' "$HISTORY_FILE" "$LOGDIR/new_entry.json" > "$LOGDIR/new_history.json" && \
cp "$LOGDIR/new_history.json" "$HISTORY_FILE" && rm "$LOGDIR/new_history.json"
fi
# Token Logging
if jq -e '.usage' "$TMP_RESPONSE" > /dev/null; then
TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S")
jq -n \
--arg zeit "$TIMESTAMP" \
--arg rolle "bugsy" \
--arg usage "$(jq -c '.usage' "$TMP_RESPONSE")" \
'{zeit: $zeit, rolle: $rolle, usage: $usage}' >> "$LOG_FILE"
fi

View File

@@ -1,56 +0,0 @@
#!/bin/bash
QUESTION="$*"
MODEL="openai/gpt-3.5-turbo"
API_KEY="${OPENROUTER_API_KEY}"
LOGDIR="$HOME/.deepbit_logs"
mkdir -p "$LOGDIR"
HISTORY_FILE="$LOGDIR/deepbit_history.json"
TMP_REQUEST="$LOGDIR/deepbit_request.json"
TMP_RESPONSE="$LOGDIR/deepbit_response.json"
LOG_FILE="$LOGDIR/token_log.json"
[ ! -f "$HISTORY_FILE" ] && echo "[]" > "$HISTORY_FILE"
[ ! -f "$LOG_FILE" ] && echo "[]" > "$LOG_FILE"
echo "🌳 Deepbit fragt über OpenRouter: $QUESTION"
if [ -z "$API_KEY" ]; then
echo "❗ Kein API-Key gesetzt. Bitte export OPENROUTER_API_KEY=... setzen"
exit 1
fi
jq -n \
--arg model "$MODEL" \
--arg system_prompt "Du bist Deepbit ein poetischer Oktopus, der Kindern die Bash-Shell erklärt. Du denkst in Schleifen, Bildsprache und Frequenzen." \
--arg user "$QUESTION" \
'{"model": $model, "temperature": 0.5, "messages": [{"role": "system", "content": $system_prompt}, {"role": "user", "content": $user}]}' > "$TMP_REQUEST"
curl -s https://openrouter.ai/api/v1/chat/completions \
-H "Authorization: Bearer $API_KEY" \
-H "Content-Type: application/json" \
-d @"$TMP_REQUEST" > "$TMP_RESPONSE"
RESPONSE_TEXT=$(jq -r '.choices[0].message.content // empty' "$TMP_RESPONSE")
if [[ -z "$RESPONSE_TEXT" ]]; then
echo "🚫 Keine Antwort vom Modell erhalten."
exit 1
else
echo -e "$RESPONSE_TEXT"
jq -n --arg role "assistant" --arg content "$RESPONSE_TEXT" \
'{"role": $role, "content": $content}' > "$LOGDIR/new_entry.json"
jq -s '.[0] + [.[1]]' "$HISTORY_FILE" "$LOGDIR/new_entry.json" > "$LOGDIR/new_history.json" && \
cp "$LOGDIR/new_history.json" "$HISTORY_FILE" && rm "$LOGDIR/new_history.json"
fi
# Token Logging
if jq -e '.usage' "$TMP_RESPONSE" > /dev/null; then
TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S")
jq -n \
--arg zeit "$TIMESTAMP" \
--arg rolle "deepbit" \
--arg usage "$(jq -c '.usage' "$TMP_RESPONSE")" \
'{zeit: $zeit, rolle: $rolle, usage: $usage}' >> "$LOG_FILE"
fi

View File

@@ -1,56 +0,0 @@
#!/bin/bash
QUESTION="$*"
MODEL="openai/gpt-3.5-turbo"
API_KEY="${OPENROUTER_API_KEY}"
LOGDIR="$HOME/.schnippsi_logs"
mkdir -p "$LOGDIR"
HISTORY_FILE="$LOGDIR/schnippsi_history.json"
TMP_REQUEST="$LOGDIR/schnippsi_request.json"
TMP_RESPONSE="$LOGDIR/schnippsi_response.json"
LOG_FILE="$LOGDIR/token_log.json"
[ ! -f "$HISTORY_FILE" ] && echo "[]" > "$HISTORY_FILE"
[ ! -f "$LOG_FILE" ] && echo "[]" > "$LOG_FILE"
echo "🌳 Schnippsi fragt über OpenRouter: $QUESTION"
if [ -z "$API_KEY" ]; then
echo "❗ Kein API-Key gesetzt. Bitte export OPENROUTER_API_KEY=... setzen"
exit 1
fi
jq -n \
--arg model "$MODEL" \
--arg system_prompt "Du bist Schnippsi eine verspielte UI/UX-Ninja, die HTML, CSS und Barrierefreiheit kindgerecht erklärt." \
--arg user "$QUESTION" \
'{"model": $model, "temperature": 0.5, "messages": [{"role": "system", "content": $system_prompt}, {"role": "user", "content": $user}]}' > "$TMP_REQUEST"
curl -s https://openrouter.ai/api/v1/chat/completions \
-H "Authorization: Bearer $API_KEY" \
-H "Content-Type: application/json" \
-d @"$TMP_REQUEST" > "$TMP_RESPONSE"
RESPONSE_TEXT=$(jq -r '.choices[0].message.content // empty' "$TMP_RESPONSE")
if [[ -z "$RESPONSE_TEXT" ]]; then
echo "🚫 Keine Antwort vom Modell erhalten."
exit 1
else
echo -e "$RESPONSE_TEXT"
jq -n --arg role "assistant" --arg content "$RESPONSE_TEXT" \
'{"role": $role, "content": $content}' > "$LOGDIR/new_entry.json"
jq -s '.[0] + [.[1]]' "$HISTORY_FILE" "$LOGDIR/new_entry.json" > "$LOGDIR/new_history.json" && \
cp "$LOGDIR/new_history.json" "$HISTORY_FILE" && rm "$LOGDIR/new_history.json"
fi
# Token Logging
if jq -e '.usage' "$TMP_RESPONSE" > /dev/null; then
TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S")
jq -n \
--arg zeit "$TIMESTAMP" \
--arg rolle "schnippsi" \
--arg usage "$(jq -c '.usage' "$TMP_RESPONSE")" \
'{zeit: $zeit, rolle: $rolle, usage: $usage}' >> "$LOG_FILE"
fi

View File

@@ -1,56 +0,0 @@
#!/bin/bash
QUESTION="$*"
MODEL="openai/gpt-3.5-turbo"
API_KEY="${OPENROUTER_API_KEY}"
LOGDIR="$HOME/.tobi_logs"
mkdir -p "$LOGDIR"
HISTORY_FILE="$LOGDIR/tobi_history.json"
TMP_REQUEST="$LOGDIR/tobi_request.json"
TMP_RESPONSE="$LOGDIR/tobi_response.json"
LOG_FILE="$LOGDIR/token_log.json"
[ ! -f "$HISTORY_FILE" ] && echo "[]" > "$HISTORY_FILE"
[ ! -f "$LOG_FILE" ] && echo "[]" > "$LOG_FILE"
echo "🌳 Tobi fragt über OpenRouter: $QUESTION"
if [ -z "$API_KEY" ]; then
echo "❗ Kein API-Key gesetzt. Bitte export OPENROUTER_API_KEY=... setzen"
exit 1
fi
jq -n \
--arg model "$MODEL" \
--arg system_prompt "Du bist CapaciTobi ein quirliges Eichhörnchen, das Kindern Strom, Spannung, Widerstand und Kapazität erklärt." \
--arg user "$QUESTION" \
'{"model": $model, "temperature": 0.5, "messages": [{"role": "system", "content": $system_prompt}, {"role": "user", "content": $user}]}' > "$TMP_REQUEST"
curl -s https://openrouter.ai/api/v1/chat/completions \
-H "Authorization: Bearer $API_KEY" \
-H "Content-Type: application/json" \
-d @"$TMP_REQUEST" > "$TMP_RESPONSE"
RESPONSE_TEXT=$(jq -r '.choices[0].message.content // empty' "$TMP_RESPONSE")
if [[ -z "$RESPONSE_TEXT" ]]; then
echo "🚫 Keine Antwort vom Modell erhalten."
exit 1
else
echo -e "$RESPONSE_TEXT"
jq -n --arg role "assistant" --arg content "$RESPONSE_TEXT" \
'{"role": $role, "content": $content}' > "$LOGDIR/new_entry.json"
jq -s '.[0] + [.[1]]' "$HISTORY_FILE" "$LOGDIR/new_entry.json" > "$LOGDIR/new_history.json" && \
cp "$LOGDIR/new_history.json" "$HISTORY_FILE" && rm "$LOGDIR/new_history.json"
fi
# Token Logging
if jq -e '.usage' "$TMP_RESPONSE" > /dev/null; then
TIMESTAMP=$(date +"%Y-%m-%d %H:%M:%S")
jq -n \
--arg zeit "$TIMESTAMP" \
--arg rolle "tobi" \
--arg usage "$(jq -c '.usage' "$TMP_RESPONSE")" \
'{zeit: $zeit, rolle: $rolle, usage: $usage}' >> "$LOG_FILE"
fi

View File

@@ -1,63 +0,0 @@
#!/bin/bash
echo "🔧 Repariere Crumbforest Token-Logs mit eingebetteten JSON-Strings …"
LOG_DIRS=(
"/home/zero/.bugsy_logs"
"/home/zero/.deepbit_logs"
"/home/zero/.dumbo_logs"
"/home/zero/.funkfox_logs"
"/home/zero/.pepper_logs"
"/home/zero/.schnecki_logs"
"/home/zero/.schnippsi_logs"
"/home/zero/.schraubaer_logs"
"/home/zero/.snake_logs"
"/home/zero/.taube_logs"
"/home/zero/.templatus_logs"
"/home/zero/.tobi_logs"
)
for dir in "${LOG_DIRS[@]}"; do
FILE="$dir/token_log.json"
if [ -f "$FILE" ]; then
echo "🔍 Prüfe $FILE"
TMP="$FILE.fixed"
# Filter & reparieren Zeile für Zeile
jq -c '.' "$FILE" 2>/dev/null | while read -r line; do
usage_raw=$(echo "$line" | jq -r '.usage')
if [[ "$usage_raw" =~ ^\{.*\}$ ]]; then
# usage ist korrektes Objekt direkt übernehmen
echo "$line" >> "$TMP"
else
# usage ist String versuche zu reparieren
usage_fixed=$(echo "$usage_raw" | jq '.' 2>/dev/null)
if [ $? -eq 0 ]; then
zeit=$(echo "$line" | jq -r '.zeit')
rolle=$(echo "$line" | jq -r '.rolle')
jq -n \
--arg zeit "$zeit" \
--arg rolle "$rolle" \
--argjson usage "$usage_fixed" \
'{zeit: $zeit, rolle: $rolle, usage: $usage}' >> "$TMP"
else
echo "⚠️ Ungültige Zeile übersprungen in $FILE:"
echo "$line"
fi
fi
done
# Nur ersetzen, wenn wir etwas geschrieben haben
if [ -s "$TMP" ]; then
mv "$TMP" "$FILE"
echo "✅ Repariert: $FILE"
else
echo " Keine gültigen Einträge in $FILE"
rm -f "$TMP"
fi
else
echo "❌ Datei nicht gefunden: $FILE"
fi
done
echo "🎉 Alle Token-Logs geprüft und repariert (sofern nötig)."

View File

@@ -1,52 +0,0 @@
from flask import Flask, render_template, Response, request, redirect
import cv2
import json
from datetime import datetime
app = Flask(__name__)
def gen_frames():
cam = cv2.VideoCapture(0)
if not cam.isOpened():
print("[WARNUNG] Kamera konnte nicht geöffnet werden.")
return
try:
while True:
success, frame = cam.read()
if not success:
break
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
finally:
cam.release()
print("[info] Kamera wurde sauber freigegeben.")
@app.route('/')
def index():
return render_template('index.html')
@app.route('/video_feed')
def video_feed():
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/log_answer', methods=['POST'])
def log_answer():
user_input = request.form.get('antwort', 'nichts gesagt')
mood = request.form.get('mood', 'unspecified')
gesture = request.form.get('gesture', 'none')
timestamp = datetime.now().isoformat()
log_entry = {
'timestamp': timestamp,
'antwort': user_input,
'mood': mood,
'gesture': gesture
}
with open("snake_log.jsonl", "a") as log_file:
log_file.write(json.dumps(log_entry) + "\n")
return redirect("/")

View File

@@ -1,13 +0,0 @@
import cv2
def gen_frames():
cap = cv2.VideoCapture(0)
while True:
success, frame = cap.read()
if not success:
break
else:
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')

View File

@@ -1,3 +0,0 @@
#!/bin/bash
echo "🐍 Starte SnakeCam ..."
exec python3 app.py

View File

@@ -1,15 +0,0 @@
# snakecam_module.py
import cv2
def init_camera(index=0):
cam = cv2.VideoCapture(index)
if not cam.isOpened():
print("[WARNUNG] Kamera konnte nicht geöffnet werden.")
return None
print("[OK] Kamera erfolgreich geöffnet.")
return cam
def release_camera(cam):
if cam and cam.isOpened():
cam.release()
print("[INFO] Kamera wurde freigegeben.")

View File

@@ -1,9 +0,0 @@
body {
background-color: #f0fff0;
font-family: 'Comic Sans MS', cursive, sans-serif;
text-align: center;
color: #006400;
}
h1 {
margin-top: 20px;
}

View File

@@ -1,80 +0,0 @@
<!DOCTYPE html>
<html lang="de">
<head>
<meta charset="UTF-8">
<title>🐍 SnakeCam Krümelblick ins Versteck</title>
<style>
body {
font-family: 'Comic Sans MS', sans-serif;
background-color: #e9f5e9;
color: #333;
text-align: center;
padding: 2rem;
}
img {
border: 4px dashed #4caf50;
border-radius: 12px;
max-width: 100%;
height: auto;
}
form {
margin-top: 2rem;
}
input, select {
padding: 0.5rem;
font-size: 1rem;
margin: 0.5rem;
border-radius: 6px;
border: 1px solid #ccc;
}
button {
padding: 0.7rem 1.2rem;
font-size: 1rem;
background-color: #4caf50;
color: white;
border: none;
border-radius: 8px;
cursor: pointer;
}
button:hover {
background-color: #388e3c;
}
</style>
</head>
<body>
<h1>🐍 SnakeCam</h1>
<p>Willkommen kleiner Krümel! Hier siehst du, was deine Kamera entdeckt.</p>
<!-- Live-Stream -->
<img src="/video_feed" alt="Live-Übertragung von SnakeCam 🐍" />
<!-- Eingabeformular -->
<form action="/log_answer" method="POST">
<p><strong>Was fühlst du gerade?</strong></p>
<input type="text" name="antwort" placeholder="Ich sehe einen ... 🐞🌳🤖" required>
<p><strong>Wie ist deine Stimmung?</strong></p>
<select name="mood">
<option value="happy">😊 Glücklich</option>
<option value="curious">🤔 Neugierig</option>
<option value="calm">😌 Ruhig</option>
<option value="excited">😃 Aufgeregt</option>
<option value="unspecified">🤷 Keine Angabe</option>
</select>
<p><strong>Hast du eine Geste gemacht?</strong></p>
<select name="gesture">
<option value="none">🚫 Keine</option>
<option value="wave">👋 Winken</option>
<option value="thumbs_up">👍 Daumen hoch</option>
<option value="peace">✌️ Peace</option>
<option value="other">✨ Etwas anderes</option>
</select>
<br><br>
<button type="submit">🎯 Eintragen</button>
</form>
</body>
</html>

View File

@@ -1,73 +0,0 @@
# -*- coding: utf-8 -*-
from flask import Flask, render_template, Response, request, redirect, url_for
import cv2
import json
from datetime import datetime
from gestures.gestures_debug_test import detect_hand_gesture
app = Flask(__name__)
# Kamera initialisieren
cam = cv2.VideoCapture(0)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
def gen_frames():
while True:
success, frame = cam.read()
if not success:
print("[error] Kein Kamerabild.")
break
# Optional spiegeln (für Selfie-Effekt)
frame = cv2.flip(frame, 1)
# Geste erkennen + ROI anzeigen
gesture, roi_coords = detect_hand_gesture(frame)
x, y, w, h = roi_coords
# Rechteck einzeichnen
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(frame, f"Geste: {gesture}", (10, 30), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# MJPEG-Streaming
ret, buffer = cv2.imencode('.jpg', frame)
frame_bytes = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
@app.route('/')
def index():
return render_template('index.html')
@app.route('/video_feed')
def video_feed():
return Response(gen_frames(),
mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/log_answer', methods=['POST'])
def log_answer():
answer = request.form.get("antwort", "").strip()
mood = request.form.get("mood", "neutral")
gesture = request.form.get("gesture", "none")
log_entry = {
"timestamp": datetime.now().isoformat(),
"antwort": answer,
"mood": mood,
"gesture": gesture
}
with open("snake_log.jsonl", "a") as logfile:
logfile.write(json.dumps(log_entry) + "\n")
print(f"[log] Eingeloggt: {log_entry}")
return redirect(url_for('index'))
@app.route('/shutdown')
def shutdown():
cam.release()
print("[info] Kamera wurde sauber freigegeben.")
return "Kamera freigegeben."
if __name__ == '__main__':
print("Starte SnakeCam ...")
app.run(host='0.0.0.0')

View File

@@ -1,59 +0,0 @@
print("[SnakeCam] Initialisierung beginnt...")
from flask import Flask, render_template, Response, request, redirect
import cv2
import json
from datetime import datetime
print("[SnakeCam] Imports erfolgreich.")
app = Flask(__name__)
def gen_frames():
cam = cv2.VideoCapture(0)
if not cam.isOpened():
print("[WARNUNG] Kamera konnte nicht geöffnet werden.")
return
try:
while True:
success, frame = cam.read()
if not success:
break
ret, buffer = cv2.imencode('.jpg', frame)
frame = buffer.tobytes()
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame + b'\r\n')
finally:
cam.release()
print("[info] Kamera wurde sauber freigegeben.")
@app.route('/')
def index():
return render_template('index.html')
@app.route('/video_feed')
def video_feed():
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/log_answer', methods=['POST'])
def log_answer():
user_input = request.form.get('antwort', 'nichts gesagt')
mood = request.form.get('mood', 'unspecified')
gesture = request.form.get('gesture', 'none')
timestamp = datetime.now().isoformat()
log_entry = {
'timestamp': timestamp,
'antwort': user_input,
'mood': mood,
'gesture': gesture
}
with open("snake_log.jsonl", "a") as log_file:
log_file.write(json.dumps(log_entry) + "\n")
return redirect("/")
if __name__ == "__main__":
print("[SnakeCam] Starte Flask Webserver ...")
app.run(host='0.0.0.0', port=5000)

View File

@@ -1,72 +0,0 @@
from flask import Flask, render_template, Response, request, redirect
import cv2
import datetime
import json
from gestures_debug import detect_hand_gesture
app = Flask(__name__)
def gen_frames():
cam = cv2.VideoCapture(0)
cam.set(cv2.CAP_PROP_FRAME_WIDTH, 320)
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, 240)
if not cam.isOpened():
print("[WARNUNG] Kamera konnte nicht geöffnet werden.")
return
try:
while True:
success, frame = cam.read()
if not success:
break
# Flip horizontal für Spiegelbild
frame = cv2.flip(frame, 1)
# Geste erkennen
gesture, roi = detect_hand_gesture(frame)
if isinstance(roi, tuple) and len(roi) == 4:
x, y, w, h = roi
cv2.rectangle(frame, (x, y), (x + w, y + h), (255, 0, 0), 2)
cv2.putText(frame, gesture, (x, y - 10),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 255, 0), 2)
# Frame in JPEG konvertieren
ret, buffer = cv2.imencode('.jpg', frame)
frame_bytes = buffer.tobytes()
# MJPEG-Stream liefern
yield (b'--frame\r\n'
b'Content-Type: image/jpeg\r\n\r\n' + frame_bytes + b'\r\n')
finally:
cam.release()
print("[info] Kamera wurde sauber freigegeben.")
@app.route('/')
def index():
return render_template('index.html')
@app.route('/video_feed')
def video_feed():
return Response(gen_frames(), mimetype='multipart/x-mixed-replace; boundary=frame')
@app.route('/log_answer', methods=['POST'])
def log_answer():
data = {
"timestamp": datetime.datetime.now().isoformat(),
"antwort": request.form.get("antwort", ""),
"mood": request.form.get("mood", ""),
"gesture": request.form.get("gesture", "")
}
with open("snake_log.jsonl", "a") as f:
f.write(json.dumps(data) + "\n")
return redirect("/")
if __name__ == '__main__':
print("[SnakeCam] Initialisierung beginnt...")
print("[SnakeCam] Imports erfolgreich.")
print("[SnakeCam] Starte Flask Webserver ...")
app.run(host='0.0.0.0', port=5000)

View File

@@ -1,77 +0,0 @@
# gestures_v3.py
import cv2
import numpy as np
from datetime import datetime
# Pfad zum temporären Snapshot zur Diagnose
DEBUG_SNAPSHOT_PATH = "/tmp/roi_snapshot.jpg"
def detect_hand_gesture(frame):
"""
Erkenne einfache Handgesten wie 'wave' (offene Hand) und 'fist' (geschlossene Faust)
durch Analyse der Konturen im unteren rechten Bildbereich.
Die Erkennung basiert auf konvexer Hüllenerkennung.
Args:
frame (ndarray): Das aktuelle Kamerabild
Returns:
(str, ndarray): (Geste, Region-of-Interest-Ausschnitt)
"""
height, width, _ = frame.shape
# ROI: untere rechte Ecke (¼ des Bildes)
roi = frame[int(height * 0.6):height, int(width * 0.6):width]
# Speichere Snapshot für Debug-Zwecke
cv2.imwrite(DEBUG_SNAPSHOT_PATH, roi)
# Konvertiere zu HSV
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# Hautfarbmaske (angepasst für unterschiedliche Beleuchtung)
lower_skin = np.array([0, 30, 60], dtype=np.uint8)
upper_skin = np.array([20, 150, 255], dtype=np.uint8)
mask = cv2.inRange(hsv, lower_skin, upper_skin)
# Glätten und Konturen erkennen
mask = cv2.GaussianBlur(mask, (7, 7), 0)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if contours and len(contours) > 0:
# Größte Kontur (Handfläche)
contour = max(contours, key=cv2.contourArea)
# Fehler vermeiden: Fläche zu klein
if cv2.contourArea(contour) < 1000:
return ("none", roi)
# Konvexe Hülle und Defekte
hull = cv2.convexHull(contour, returnPoints=False)
if hull is not None and len(hull) > 3:
defects = cv2.convexityDefects(contour, hull)
if defects is not None:
finger_count = 0
for i in range(defects.shape[0]):
s, e, f, d = defects[i, 0]
start = tuple(contour[s][0])
end = tuple(contour[e][0])
far = tuple(contour[f][0])
# Abstand analysieren je mehr Defekte, desto mehr Finger offen
a = np.linalg.norm(np.array(start) - np.array(end))
b = np.linalg.norm(np.array(start) - np.array(far))
c = np.linalg.norm(np.array(end) - np.array(far))
angle = np.arccos((b ** 2 + c ** 2 - a ** 2) / (2 * b * c + 1e-5))
if angle <= np.pi / 2: # < 90°
finger_count += 1
# Auswertung basierend auf Fingeranzahl
if finger_count >= 3:
return ("wave", roi)
elif finger_count == 0:
return ("fist", roi)
return ("none", roi)

View File

@@ -1,35 +0,0 @@
import cv2
import numpy as np
# --- Handgestenerkennung: Einfacher Hautfarbfilter + Konturanalyse ---
def detect_hand_gesture(frame):
# Region of Interest (z.B. linke obere Ecke) definieren
roi = frame[20:120, 20:120]
# Konvertiere in HSV-Farbraum
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# Hautfarb-Bereich (kann je nach Licht angepasst werden)
lower_skin = np.array([0, 30, 60], dtype=np.uint8)
upper_skin = np.array([20, 150, 255], dtype=np.uint8)
# Maske für Hautfarbe erzeugen
mask = cv2.inRange(hsv, lower_skin, upper_skin)
# Konturen finden
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
gesture = "none"
# Wenn große Kontur gefunden → einfache „Winken“-Simulation
if contours:
largest_contour = max(contours, key=cv2.contourArea)
if cv2.contourArea(largest_contour) > 1000:
gesture = "wave"
# ROI im Hauptframe markieren
cv2.rectangle(frame, (20, 20), (120, 120), (0, 255, 0), 2)
cv2.putText(frame, f"Gesture: {gesture}", (20, 160),
cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
return gesture, frame

View File

@@ -1,50 +0,0 @@
import cv2
import numpy as np
def detect_hand_gesture(frame):
height, width, _ = frame.shape
# Dynamische ROI: Mitte des Bildes, zentriert
roi_w, roi_h = 200, 200
x_start = width // 2 - roi_w // 2
y_start = height // 2 - roi_h // 2
roi = frame[y_start:y_start + roi_h, x_start:x_start + roi_w]
# Zeichne das ROI-Fenster im Originalbild
cv2.rectangle(frame, (x_start, y_start), (x_start + roi_w, y_start + roi_h), (0, 255, 0), 2)
# Konvertiere zu HSV-Farbraum für bessere Hauterkennung
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# Hautfarbmaske (getestet für mittlere Hauttöne)
lower_skin = np.array([0, 30, 60], dtype=np.uint8)
upper_skin = np.array([20, 150, 255], dtype=np.uint8)
mask = cv2.inRange(hsv, lower_skin, upper_skin)
# Glättung und Konturen finden
mask = cv2.GaussianBlur(mask, (5, 5), 0)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
if contours:
# Größte Kontur annehmen
max_contour = max(contours, key=cv2.contourArea)
area = cv2.contourArea(max_contour)
if area > 2000:
# Konvexitätsdefekte (Finger) berechnen
hull = cv2.convexHull(max_contour, returnPoints=False)
if hull is not None and len(hull) > 3:
defects = cv2.convexityDefects(max_contour, hull)
finger_count = 0
if defects is not None:
for i in range(defects.shape[0]):
s, e, f, d = defects[i, 0]
if d > 10000:
finger_count += 1
if finger_count >= 3:
return "wave", roi
else:
return "fist", roi
return "none", roi

View File

@@ -1,50 +0,0 @@
# gestures_debug.py
import cv2
import numpy as np
def detect_hand_gesture(frame):
height, width, _ = frame.shape
# --- Fallback-ROI: Mitte des Bildes ---
w, h = 100, 100
x = width // 2 - w // 2
y = height // 2 - h // 2
roi = frame[y:y+h, x:x+w]
if roi.size == 0:
print("[warn] ROI leer kein Bildausschnitt verarbeitet")
return "none", (x, y, w, h)
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
lower_skin = np.array([0, 20, 70], dtype=np.uint8)
upper_skin = np.array([20, 255, 255], dtype=np.uint8)
mask = cv2.inRange(hsv, lower_skin, upper_skin)
mask = cv2.dilate(mask, np.ones((3, 3), np.uint8), iterations=4)
mask = cv2.GaussianBlur(mask, (5, 5), 100)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
gesture = "none"
if contours and len(contours) > 0:
max_contour = max(contours, key=cv2.contourArea)
hull = cv2.convexHull(max_contour, returnPoints=False)
if hull is not None and len(hull) > 3:
defects = cv2.convexityDefects(max_contour, hull)
if defects is not None:
cnt_defects = defects.shape[0]
if cnt_defects >= 4:
gesture = "wave"
elif cnt_defects <= 1:
gesture = "fist"
else:
gesture = "unknown"
print(f"[debug] Defekte: {len(defects) if defects is not None else 'None'}")
else:
print("[debug] Keine Konturen erkannt")
print(f"[result] Geste erkannt: {gesture}")
return gesture, (x, y, w, h)

View File

@@ -1,29 +0,0 @@
import cv2
import numpy as np
def detect_hand_gesture(frame):
x, y, w, h = 100, 100, 200, 150
roi = frame[y:y+h, x:x+w]
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# Hautfarbe grob einschränken
lower_skin = np.array([0, 30, 60], dtype=np.uint8)
upper_skin = np.array([20, 150, 255], dtype=np.uint8)
mask = cv2.inRange(hsv, lower_skin, upper_skin)
mask = cv2.GaussianBlur(mask, (5, 5), 0)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
gesture = "none"
if contours:
contour = max(contours, key=cv2.contourArea)
area = cv2.contourArea(contour)
if area > 3000:
print(f"[debug] Fläche erkannt: {int(area)}")
gesture = "hand"
return gesture, (x, y, w, h)

View File

@@ -1,39 +0,0 @@
import cv2
import numpy as np
def detect_hand_gesture(frame):
x, y, w, h = 100, 100, 150, 150
roi = frame[y:y+h, x:x+w]
hsv = cv2.cvtColor(roi, cv2.COLOR_BGR2HSV)
# einfache Hautfarbe (hell bis leicht gebräunt)
lower_skin = np.array([0, 30, 60], dtype=np.uint8)
upper_skin = np.array([20, 150, 255], dtype=np.uint8)
mask = cv2.inRange(hsv, lower_skin, upper_skin)
mask = cv2.GaussianBlur(mask, (5, 5), 0)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
gesture = "none"
if contours:
contour = max(contours, key=cv2.contourArea)
area = cv2.contourArea(contour)
if area > 2500:
hull = cv2.convexHull(contour, returnPoints=False)
if hull is not None and len(hull) > 3:
defects = cv2.convexityDefects(contour, hull)
if defects is not None:
defect_count = defects.shape[0]
ratio = float(w) / h
print(f"[debug] Defekte: {defect_count}, Fläche: {int(area)}, Ratio: {ratio:.2f}")
# 🌊 einfache Regel für Winken
if 3 <= defect_count <= 10 and 2000 < area < 15000 and 1.3 < ratio < 2.3:
gesture = "wave"
return gesture, (x, y, w, h)

View File

@@ -1,3 +0,0 @@
#!/bin/bash
echo "🐍 Starte SnakeCam ..."
python3 app.py

View File

@@ -1,62 +0,0 @@
body {
font-family: Arial, sans-serif;
background-color: #111;
color: #eee;
text-align: center;
padding: 20px;
}
h1 {
margin-bottom: 20px;
color: #90ee90;
}
.video-container {
margin-bottom: 20px;
}
img {
width: 320px;
height: auto;
border: 2px solid #555;
border-radius: 8px;
}
img.mirrored {
/* transform: scaleX(-1); Spiegelung horizontal */
transform: none;
}
form {
display: inline-block;
text-align: left;
background: #222;
padding: 15px;
border-radius: 10px;
}
label, select, input, button {
display: block;
width: 100%;
margin-bottom: 10px;
}
input, select {
padding: 6px;
border-radius: 4px;
border: none;
}
button {
background-color: #28a745;
color: white;
font-weight: bold;
cursor: pointer;
border: none;
padding: 10px;
border-radius: 5px;
}
button:hover {
background-color: #218838;
}

View File

@@ -1,115 +0,0 @@
<!DOCTYPE html>
<html lang="de">
<head>
<meta charset="UTF-8">
<title>🐍 SnakeCam Vision</title>
<style>
body {
font-family: Arial, sans-serif;
background-color: #111;
color: #eee;
text-align: center;
padding: 20px;
}
h1 {
margin-bottom: 20px;
color: #90ee90;
}
.video-container {
display: inline-block;
border: 4px solid #333;
background: black;
}
img {
width: 320px;
height: 240px;
object-fit: cover;
}
.video-container {
margin-bottom: 20px;
}
form {
display: inline-block;
text-align: left;
background: #222;
padding: 15px;
border-radius: 10px;
}
label, select, input, button {
display: block;
width: 100%;
margin-bottom: 10px;
}
input, select {
padding: 6px;
border-radius: 4px;
border: none;
}
button {
background-color: #28a745;
color: white;
font-weight: bold;
cursor: pointer;
border: none;
padding: 10px;
border-radius: 5px;
}
button:hover {
background-color: #218838;
}
</style>
</head>
<body>
<h1>🐍 SnakeCam Vision</h1>
<div class="video-container">
<img src="{{ url_for('video_feed') }}" alt="Live Video Feed">
</div>
<br clear=all/>
<form id="logForm">
<p>
<label for="antwort">Antwort:</label><br>
<input type="text" id="antwort" name="antwort" placeholder="Was denkst du?">
</p>
<p>
<label for="mood">Stimmung:</label><br>
<select id="mood" name="mood">
<option value="happy">😄 Happy</option>
<option value="excited">🤩 Excited</option>
<option value="neutral">😐 Neutral</option>
<option value="confused">😕 Confused</option>
</select>
</p>
<p>
<label for="gesture">Geste:</label><br>
<select id="gesture" name="gesture">
<option value="wave">👋 Wave</option>
<option value="fist">✊ Faust</option>
<option value="none">🚫 Keine</option>
</select>
</p>
<button type="submit">📝 Speichern</button>
</form>
<script>
document.getElementById("logForm").addEventListener("submit", async function(e) {
e.preventDefault(); // Kein Reload!
const formData = new FormData(this);
await fetch("/log_answer", {
method: "POST",
body: formData
});
console.log("✅ Antwort wurde gespeichert.");
this.reset(); // Eingabefelder leeren
});
</script>
</body>
</html>

View File

@@ -1,4 +0,0 @@