feat: Fix vector indexing stability, add Gitea linking, enhance admin dashboard

This commit is contained in:
2025-12-07 18:42:38 +01:00
parent 7b300d1ba1
commit 9f2e599846
58 changed files with 12197 additions and 503 deletions

View File

@@ -96,13 +96,14 @@ class OpenRouterProvider(BaseProvider):
except KeyError as e:
raise RuntimeError(f"Unexpected OpenRouter API response format: {str(e)}")
def get_completion(self, prompt: str, context: str = "") -> str:
def get_completion(self, prompt: str, context: str = "", model: str = None) -> str:
"""
Generate a completion using OpenRouter API.
Args:
prompt: The user's question
context: Optional context from retrieved documents
model: Optional model to override default
Returns:
The generated response
@@ -124,11 +125,20 @@ class OpenRouterProvider(BaseProvider):
messages = []
# Add system message with context if provided
# NOTE: If detailed system prompt is passed as context (like in RAGChatService),
# we should treat it as system prompt, not just "Answer based on..." wrapper.
if context:
messages.append({
"role": "system",
"content": f"Answer the user's question based on the following context:\n\n{context}"
})
# Check if context looks like a full system prompt or just data
if "You are" in context or "Du bist" in context or "Tu es" in context:
messages.append({
"role": "system",
"content": context
})
else:
messages.append({
"role": "system",
"content": f"Answer the user's question based on the following context:\n\n{context}"
})
# Add user message
messages.append({
@@ -137,7 +147,7 @@ class OpenRouterProvider(BaseProvider):
})
data = {
"model": self.completion_model,
"model": model or self.completion_model,
"messages": messages,
"max_tokens": 2000,
"temperature": 0.7