first pass async feedback complete, regressions added
This commit is contained in:
@@ -2,7 +2,7 @@
|
||||
Feedback-related API endpoints.
|
||||
"""
|
||||
|
||||
from typing import List, Dict
|
||||
from typing import List, Dict, Any
|
||||
from fastapi import APIRouter, HTTPException, Depends, status
|
||||
from pydantic import BaseModel
|
||||
|
||||
@@ -18,11 +18,90 @@ class GenerateFeedbackWordsResponse(BaseModel):
|
||||
theme_words: List[str]
|
||||
count: int = 6
|
||||
|
||||
class FeedbackQueuedWordsResponse(BaseModel):
|
||||
"""Response model for queued feedback words."""
|
||||
queued_words: List[FeedbackWord]
|
||||
count: int
|
||||
|
||||
class FeedbackActiveWordsResponse(BaseModel):
|
||||
"""Response model for active feedback words."""
|
||||
active_words: List[FeedbackWord]
|
||||
count: int
|
||||
|
||||
class FeedbackHistoricResponse(BaseModel):
|
||||
"""Response model for full feedback history."""
|
||||
feedback_history: List[Dict[str, Any]]
|
||||
count: int
|
||||
|
||||
# Service dependency
|
||||
async def get_prompt_service() -> PromptService:
|
||||
"""Dependency to get PromptService instance."""
|
||||
return PromptService()
|
||||
|
||||
@router.get("/queued", response_model=FeedbackQueuedWordsResponse)
|
||||
async def get_queued_feedback_words(
|
||||
prompt_service: PromptService = Depends(get_prompt_service)
|
||||
):
|
||||
"""
|
||||
Get queued feedback words (positions 0-5) for user weighting.
|
||||
|
||||
Returns:
|
||||
List of queued feedback words with weights
|
||||
"""
|
||||
try:
|
||||
# Get queued feedback words from PromptService
|
||||
queued_feedback_items = await prompt_service.get_feedback_queued_words()
|
||||
|
||||
# Convert to FeedbackWord models
|
||||
queued_words = []
|
||||
for i, item in enumerate(queued_feedback_items):
|
||||
key = list(item.keys())[0]
|
||||
word = item[key]
|
||||
weight = item.get("weight", 3) # Default weight is 3
|
||||
queued_words.append(FeedbackWord(key=key, word=word, weight=weight))
|
||||
|
||||
return FeedbackQueuedWordsResponse(
|
||||
queued_words=queued_words,
|
||||
count=len(queued_words)
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Error getting queued feedback words: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/active", response_model=FeedbackActiveWordsResponse)
|
||||
async def get_active_feedback_words(
|
||||
prompt_service: PromptService = Depends(get_prompt_service)
|
||||
):
|
||||
"""
|
||||
Get active feedback words (positions 6-11) for prompt generation.
|
||||
|
||||
Returns:
|
||||
List of active feedback words with weights
|
||||
"""
|
||||
try:
|
||||
# Get active feedback words from PromptService
|
||||
active_feedback_items = await prompt_service.get_feedback_active_words()
|
||||
|
||||
# Convert to FeedbackWord models
|
||||
active_words = []
|
||||
for i, item in enumerate(active_feedback_items):
|
||||
key = list(item.keys())[0]
|
||||
word = item[key]
|
||||
weight = item.get("weight", 3) # Default weight is 3
|
||||
active_words.append(FeedbackWord(key=key, word=word, weight=weight))
|
||||
|
||||
return FeedbackActiveWordsResponse(
|
||||
active_words=active_words,
|
||||
count=len(active_words)
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Error getting active feedback words: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/generate", response_model=GenerateFeedbackWordsResponse)
|
||||
async def generate_feedback_words(
|
||||
prompt_service: PromptService = Depends(get_prompt_service)
|
||||
@@ -89,40 +168,23 @@ async def rate_feedback_words(
|
||||
detail=f"Error rating feedback words: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/current", response_model=List[FeedbackWord])
|
||||
async def get_current_feedback_words(
|
||||
prompt_service: PromptService = Depends(get_prompt_service)
|
||||
):
|
||||
"""
|
||||
Get current feedback words with weights.
|
||||
|
||||
Returns:
|
||||
List of current feedback words with weights
|
||||
"""
|
||||
try:
|
||||
# This would need to be implemented in PromptService
|
||||
# For now, return empty list
|
||||
return []
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
detail=f"Error getting current feedback words: {str(e)}"
|
||||
)
|
||||
|
||||
@router.get("/history")
|
||||
@router.get("/history", response_model=FeedbackHistoricResponse)
|
||||
async def get_feedback_history(
|
||||
prompt_service: PromptService = Depends(get_prompt_service)
|
||||
):
|
||||
"""
|
||||
Get feedback word history.
|
||||
Get full feedback word history.
|
||||
|
||||
Returns:
|
||||
List of historic feedback words
|
||||
Full feedback history with weights
|
||||
"""
|
||||
try:
|
||||
# This would need to be implemented in PromptService
|
||||
# For now, return empty list
|
||||
return []
|
||||
feedback_historic = await prompt_service.get_feedback_historic()
|
||||
|
||||
return FeedbackHistoricResponse(
|
||||
feedback_history=feedback_historic,
|
||||
count=len(feedback_historic)
|
||||
)
|
||||
except Exception as e:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
|
||||
|
||||
@@ -52,8 +52,8 @@ class Settings(BaseSettings):
|
||||
# Data File Names (relative to DATA_DIR)
|
||||
PROMPTS_HISTORIC_FILE: str = "prompts_historic.json"
|
||||
PROMPTS_POOL_FILE: str = "prompts_pool.json"
|
||||
FEEDBACK_WORDS_FILE: str = "feedback_words.json"
|
||||
FEEDBACK_HISTORIC_FILE: str = "feedback_historic.json"
|
||||
# Note: feedback_words.json is deprecated and merged into feedback_historic.json
|
||||
|
||||
@validator("BACKEND_CORS_ORIGINS", pre=True)
|
||||
def assemble_cors_origins(cls, v: str | List[str]) -> List[str] | str:
|
||||
|
||||
@@ -211,8 +211,8 @@ class AIService:
|
||||
self,
|
||||
feedback_template: str,
|
||||
historic_prompts: List[Dict[str, str]],
|
||||
current_feedback_words: Optional[List[Dict[str, Any]]] = None,
|
||||
historic_feedback_words: Optional[List[Dict[str, str]]] = None
|
||||
queued_feedback_words: Optional[List[Dict[str, Any]]] = None,
|
||||
historic_feedback_words: Optional[List[Dict[str, Any]]] = None
|
||||
) -> List[str]:
|
||||
"""
|
||||
Generate theme feedback words using AI.
|
||||
@@ -220,8 +220,8 @@ class AIService:
|
||||
Args:
|
||||
feedback_template: Feedback analysis template
|
||||
historic_prompts: List of historic prompts for context
|
||||
current_feedback_words: Current feedback words with weights
|
||||
historic_feedback_words: Historic feedback words (just words)
|
||||
queued_feedback_words: Queued feedback words with weights (positions 0-5)
|
||||
historic_feedback_words: Historic feedback words with weights (all positions)
|
||||
|
||||
Returns:
|
||||
List of 6 theme words
|
||||
@@ -230,7 +230,7 @@ class AIService:
|
||||
full_prompt = self._prepare_feedback_prompt(
|
||||
feedback_template,
|
||||
historic_prompts,
|
||||
current_feedback_words,
|
||||
queued_feedback_words,
|
||||
historic_feedback_words
|
||||
)
|
||||
|
||||
@@ -275,8 +275,8 @@ class AIService:
|
||||
self,
|
||||
template: str,
|
||||
historic_prompts: List[Dict[str, str]],
|
||||
current_feedback_words: Optional[List[Dict[str, Any]]],
|
||||
historic_feedback_words: Optional[List[Dict[str, str]]]
|
||||
queued_feedback_words: Optional[List[Dict[str, Any]]],
|
||||
historic_feedback_words: Optional[List[Dict[str, Any]]]
|
||||
) -> str:
|
||||
"""Prepare the full feedback prompt."""
|
||||
if not historic_prompts:
|
||||
@@ -284,14 +284,29 @@ class AIService:
|
||||
|
||||
full_prompt = f"{template}\n\nPrevious prompts:\n{json.dumps(historic_prompts, indent=2)}"
|
||||
|
||||
# Add current feedback words if available
|
||||
if current_feedback_words:
|
||||
feedback_context = json.dumps(current_feedback_words, indent=2)
|
||||
full_prompt = f"{full_prompt}\n\nCurrent feedback themes (with weights):\n{feedback_context}"
|
||||
# Add queued feedback words if available (these have user-adjusted weights)
|
||||
if queued_feedback_words:
|
||||
# Extract just the words and weights for clarity
|
||||
queued_words_with_weights = []
|
||||
for item in queued_feedback_words:
|
||||
key = list(item.keys())[0]
|
||||
word = item[key]
|
||||
weight = item.get("weight", 3)
|
||||
queued_words_with_weights.append({"word": word, "weight": weight})
|
||||
|
||||
# Add historic feedback words if available
|
||||
feedback_context = json.dumps(queued_words_with_weights, indent=2)
|
||||
full_prompt = f"{full_prompt}\n\nQueued feedback themes (with user-adjusted weights):\n{feedback_context}"
|
||||
|
||||
# Add historic feedback words if available (these may have weights too)
|
||||
if historic_feedback_words:
|
||||
feedback_historic_context = json.dumps(historic_feedback_words, indent=2)
|
||||
# Extract just the words for historic context
|
||||
historic_words = []
|
||||
for item in historic_feedback_words:
|
||||
key = list(item.keys())[0]
|
||||
word = item[key]
|
||||
historic_words.append(word)
|
||||
|
||||
feedback_historic_context = json.dumps(historic_words, indent=2)
|
||||
full_prompt = f"{full_prompt}\n\nHistoric feedback themes (just words):\n{feedback_historic_context}"
|
||||
|
||||
return full_prompt
|
||||
|
||||
@@ -107,28 +107,32 @@ class DataService:
|
||||
"""Save prompt pool to JSON file."""
|
||||
return await self.save_json(settings.PROMPTS_POOL_FILE, prompts)
|
||||
|
||||
async def load_feedback_words(self) -> List[Dict[str, Any]]:
|
||||
"""Load feedback words from JSON file."""
|
||||
return await self.load_json(
|
||||
settings.FEEDBACK_WORDS_FILE,
|
||||
default=[]
|
||||
)
|
||||
|
||||
async def save_feedback_words(self, feedback_words: List[Dict[str, Any]]) -> bool:
|
||||
"""Save feedback words to JSON file."""
|
||||
return await self.save_json(settings.FEEDBACK_WORDS_FILE, feedback_words)
|
||||
|
||||
async def load_feedback_historic(self) -> List[Dict[str, str]]:
|
||||
async def load_feedback_historic(self) -> List[Dict[str, Any]]:
|
||||
"""Load historic feedback words from JSON file."""
|
||||
return await self.load_json(
|
||||
settings.FEEDBACK_HISTORIC_FILE,
|
||||
default=[]
|
||||
)
|
||||
|
||||
async def save_feedback_historic(self, feedback_words: List[Dict[str, str]]) -> bool:
|
||||
async def save_feedback_historic(self, feedback_words: List[Dict[str, Any]]) -> bool:
|
||||
"""Save historic feedback words to JSON file."""
|
||||
return await self.save_json(settings.FEEDBACK_HISTORIC_FILE, feedback_words)
|
||||
|
||||
async def get_feedback_queued_words(self) -> List[Dict[str, Any]]:
|
||||
"""Get queued feedback words (positions 0-5) for user weighting."""
|
||||
feedback_historic = await self.load_feedback_historic()
|
||||
return feedback_historic[:6] if len(feedback_historic) >= 6 else feedback_historic
|
||||
|
||||
async def get_feedback_active_words(self) -> List[Dict[str, Any]]:
|
||||
"""Get active feedback words (positions 6-11) for prompt generation."""
|
||||
feedback_historic = await self.load_feedback_historic()
|
||||
if len(feedback_historic) >= 12:
|
||||
return feedback_historic[6:12]
|
||||
elif len(feedback_historic) > 6:
|
||||
return feedback_historic[6:]
|
||||
else:
|
||||
return []
|
||||
|
||||
async def load_prompt_template(self) -> str:
|
||||
"""Load prompt template from file."""
|
||||
template_path = Path(settings.PROMPT_TEMPLATE_PATH)
|
||||
|
||||
@@ -62,18 +62,27 @@ class PromptService:
|
||||
self._prompts_pool_cache = await self.data_service.load_prompts_pool()
|
||||
return self._prompts_pool_cache
|
||||
|
||||
async def get_feedback_words(self) -> List[Dict[str, Any]]:
|
||||
"""Get feedback words with caching."""
|
||||
if self._feedback_words_cache is None:
|
||||
self._feedback_words_cache = await self.data_service.load_feedback_words()
|
||||
return self._feedback_words_cache
|
||||
|
||||
async def get_feedback_historic(self) -> List[Dict[str, str]]:
|
||||
async def get_feedback_historic(self) -> List[Dict[str, Any]]:
|
||||
"""Get historic feedback words with caching."""
|
||||
if self._feedback_historic_cache is None:
|
||||
self._feedback_historic_cache = await self.data_service.load_feedback_historic()
|
||||
return self._feedback_historic_cache
|
||||
|
||||
async def get_feedback_queued_words(self) -> List[Dict[str, Any]]:
|
||||
"""Get queued feedback words (positions 0-5) for user weighting."""
|
||||
feedback_historic = await self.get_feedback_historic()
|
||||
return feedback_historic[:6] if len(feedback_historic) >= 6 else feedback_historic
|
||||
|
||||
async def get_feedback_active_words(self) -> List[Dict[str, Any]]:
|
||||
"""Get active feedback words (positions 6-11) for prompt generation."""
|
||||
feedback_historic = await self.get_feedback_historic()
|
||||
if len(feedback_historic) >= 12:
|
||||
return feedback_historic[6:12]
|
||||
elif len(feedback_historic) > 6:
|
||||
return feedback_historic[6:]
|
||||
else:
|
||||
return []
|
||||
|
||||
async def get_prompt_template(self) -> str:
|
||||
"""Get prompt template with caching."""
|
||||
if self._prompt_template_cache is None:
|
||||
@@ -186,7 +195,7 @@ class PromptService:
|
||||
raise ValueError("Prompt template not found")
|
||||
|
||||
historic_prompts = await self.get_prompts_historic() if use_history else []
|
||||
feedback_words = await self.get_feedback_words() if use_feedback else None
|
||||
feedback_words = await self.get_feedback_active_words() if use_feedback else None
|
||||
|
||||
# Generate prompts using AI
|
||||
new_prompts = await self.ai_service.generate_prompts(
|
||||
@@ -313,13 +322,13 @@ class PromptService:
|
||||
if not historic_prompts:
|
||||
raise ValueError("No historic prompts available for feedback analysis")
|
||||
|
||||
current_feedback_words = await self.get_feedback_words()
|
||||
queued_feedback_words = await self.get_feedback_queued_words()
|
||||
historic_feedback_words = await self.get_feedback_historic()
|
||||
|
||||
theme_words = await self.ai_service.generate_theme_feedback_words(
|
||||
feedback_template=feedback_template,
|
||||
historic_prompts=historic_prompts,
|
||||
current_feedback_words=current_feedback_words,
|
||||
queued_feedback_words=queued_feedback_words,
|
||||
historic_feedback_words=historic_feedback_words
|
||||
)
|
||||
|
||||
@@ -338,70 +347,84 @@ class PromptService:
|
||||
if len(ratings) != 6:
|
||||
raise ValueError(f"Expected 6 ratings, got {len(ratings)}")
|
||||
|
||||
feedback_items = []
|
||||
# Get current feedback historic
|
||||
feedback_historic = await self.get_feedback_historic()
|
||||
|
||||
# Update weights for queued words (positions 0-5)
|
||||
for i, (word, rating) in enumerate(ratings.items()):
|
||||
if not 0 <= rating <= 6:
|
||||
raise ValueError(f"Rating for '{word}' must be between 0 and 6, got {rating}")
|
||||
|
||||
feedback_key = f"feedback{i:02d}"
|
||||
feedback_items.append({
|
||||
feedback_key: word,
|
||||
"weight": rating
|
||||
})
|
||||
if i < len(feedback_historic):
|
||||
# Update the weight for the queued word
|
||||
feedback_key = f"feedback{i:02d}"
|
||||
feedback_historic[i] = {
|
||||
feedback_key: word,
|
||||
"weight": rating
|
||||
}
|
||||
else:
|
||||
# If we don't have enough items, add a new one
|
||||
feedback_key = f"feedback{i:02d}"
|
||||
feedback_historic.append({
|
||||
feedback_key: word,
|
||||
"weight": rating
|
||||
})
|
||||
|
||||
# Update cache and save
|
||||
self._feedback_words_cache = feedback_items
|
||||
await self.data_service.save_feedback_words(feedback_items)
|
||||
self._feedback_historic_cache = feedback_historic
|
||||
await self.data_service.save_feedback_historic(feedback_historic)
|
||||
|
||||
# Also add to historic feedback
|
||||
await self._add_feedback_words_to_history(feedback_items)
|
||||
# Generate new feedback words and insert at position 0
|
||||
await self._generate_and_insert_new_feedback_words(feedback_historic)
|
||||
|
||||
# Get updated queued words for response
|
||||
updated_queued_words = feedback_historic[:6] if len(feedback_historic) >= 6 else feedback_historic
|
||||
|
||||
# Convert to FeedbackWord models
|
||||
feedback_words = []
|
||||
for item in feedback_items:
|
||||
for i, item in enumerate(updated_queued_words):
|
||||
key = list(item.keys())[0]
|
||||
word = item[key]
|
||||
weight = item["weight"]
|
||||
weight = item.get("weight", 3) # Default weight is 3
|
||||
feedback_words.append(FeedbackWord(key=key, word=word, weight=weight))
|
||||
|
||||
logger.info(f"Updated feedback words with {len(feedback_words)} items")
|
||||
return feedback_words
|
||||
|
||||
async def _add_feedback_words_to_history(self, feedback_items: List[Dict[str, Any]]) -> None:
|
||||
"""Add feedback words to historic buffer."""
|
||||
historic_feedback = await self.get_feedback_historic()
|
||||
async def _generate_and_insert_new_feedback_words(self, feedback_historic: List[Dict[str, Any]]) -> None:
|
||||
"""Generate new feedback words and insert at position 0."""
|
||||
try:
|
||||
# Generate 6 new feedback words
|
||||
new_words = await self.generate_theme_feedback_words()
|
||||
|
||||
# Extract just the words from current feedback
|
||||
new_feedback_words = []
|
||||
for i, item in enumerate(feedback_items):
|
||||
feedback_key = f"feedback{i:02d}"
|
||||
if feedback_key in item:
|
||||
word = item[feedback_key]
|
||||
new_feedback_words.append({feedback_key: word})
|
||||
if len(new_words) != 6:
|
||||
logger.warning(f"Expected 6 new feedback words, got {len(new_words)}. Not inserting.")
|
||||
return
|
||||
|
||||
if len(new_feedback_words) != 6:
|
||||
logger.warning(f"Expected 6 feedback words, got {len(new_feedback_words)}. Not adding to history.")
|
||||
return
|
||||
# Create new feedback items with default weight of 3
|
||||
new_feedback_items = []
|
||||
for i, word in enumerate(new_words):
|
||||
feedback_key = f"feedback{i:02d}"
|
||||
new_feedback_items.append({
|
||||
feedback_key: word,
|
||||
"weight": 3 # Default weight
|
||||
})
|
||||
|
||||
# Shift all existing feedback words down by 6 positions
|
||||
updated_feedback_historic = new_feedback_words
|
||||
# Insert new words at position 0
|
||||
# Keep only FEEDBACK_HISTORY_SIZE items total
|
||||
updated_feedback_historic = new_feedback_items + feedback_historic
|
||||
if len(updated_feedback_historic) > settings.FEEDBACK_HISTORY_SIZE:
|
||||
updated_feedback_historic = updated_feedback_historic[:settings.FEEDBACK_HISTORY_SIZE]
|
||||
|
||||
# Add all existing feedback words, shifting their numbers down by 6
|
||||
for i, feedback_dict in enumerate(historic_feedback):
|
||||
if i >= settings.FEEDBACK_HISTORY_SIZE - 6: # Keep only FEEDBACK_HISTORY_SIZE items
|
||||
break
|
||||
# Update cache and save
|
||||
self._feedback_historic_cache = updated_feedback_historic
|
||||
await self.data_service.save_feedback_historic(updated_feedback_historic)
|
||||
|
||||
feedback_key = list(feedback_dict.keys())[0]
|
||||
word = feedback_dict[feedback_key]
|
||||
logger.info(f"Inserted 6 new feedback words at position 0, history size: {len(updated_feedback_historic)}")
|
||||
|
||||
new_feedback_key = f"feedback{i+6:02d}"
|
||||
updated_feedback_historic.append({new_feedback_key: word})
|
||||
|
||||
# Update cache and save
|
||||
self._feedback_historic_cache = updated_feedback_historic
|
||||
await self.data_service.save_feedback_historic(updated_feedback_historic)
|
||||
|
||||
logger.info(f"Added 6 feedback words to history, history size: {len(updated_feedback_historic)}")
|
||||
except Exception as e:
|
||||
logger.error(f"Error generating and inserting new feedback words: {e}")
|
||||
raise
|
||||
|
||||
# Utility methods for API endpoints
|
||||
def get_pool_size(self) -> int:
|
||||
|
||||
Reference in New Issue
Block a user