implemented offline cache #1
@@ -15,6 +15,6 @@ The history will allow for reducing repetition, however some thematic overlap is
|
||||
As the user discards prompts, the themes will be very slowly steered, so it's okay to take some inspiration from it.
|
||||
|
||||
Expected Output:
|
||||
Output as a JSON array with key names from "newprompt0" to "newpromptN" where N is the number of prompts minus one.
|
||||
Output as a JSON list with the requested number of elements.
|
||||
Respond ONLY with valid JSON. No explanations, no markdown, no backticks.
|
||||
|
||||
|
||||
@@ -164,13 +164,9 @@ class JournalPromptGenerator:
|
||||
with open("pool_prompts.json", "w") as f:
|
||||
json.dump(self.pool_prompts, f, indent=2)
|
||||
|
||||
def add_prompts_to_pool(self, prompts: List[Dict[str, str]]):
|
||||
def add_prompts_to_pool(self, prompts: List[str]):
|
||||
"""Add generated prompts to the pool."""
|
||||
for prompt_dict in prompts:
|
||||
# Extract prompt text
|
||||
prompt_key = list(prompt_dict.keys())[0]
|
||||
prompt_text = prompt_dict[prompt_key]
|
||||
|
||||
for prompt_text in prompts:
|
||||
# Add to pool with a pool-specific key
|
||||
pool_key = f"poolprompt{len(self.pool_prompts):03d}"
|
||||
self.pool_prompts.append({
|
||||
@@ -306,10 +302,10 @@ class JournalPromptGenerator:
|
||||
|
||||
return full_prompt
|
||||
|
||||
def _parse_ai_response(self, response_content: str) -> List[Dict[str, str]]:
|
||||
def _parse_ai_response(self, response_content: str) -> List[str]:
|
||||
"""
|
||||
Parse the AI response to extract new prompts.
|
||||
Expected format: JSON array with keys "newprompt0" to "newpromptN" where N = num_prompts-1
|
||||
Expected format: JSON list/array of prompt strings
|
||||
|
||||
Handles DeepSeek API responses that may include backticks and leading "json" string.
|
||||
"""
|
||||
@@ -320,23 +316,81 @@ class JournalPromptGenerator:
|
||||
# Try to parse as JSON
|
||||
data = json.loads(cleaned_content)
|
||||
|
||||
# Convert to list of prompt dictionaries
|
||||
new_prompts = []
|
||||
for i in range(self.settings['num_prompts']):
|
||||
key = f"newprompt{i}"
|
||||
if key in data:
|
||||
prompt_text = data[key]
|
||||
prompt_obj = {
|
||||
f"prompt{len(self.historic_prompts) + i:02d}": prompt_text
|
||||
}
|
||||
new_prompts.append(prompt_obj)
|
||||
# Check if data is a list
|
||||
if isinstance(data, list):
|
||||
# Return the list of prompt strings directly
|
||||
# Ensure we have the correct number of prompts
|
||||
if len(data) >= self.settings['num_prompts']:
|
||||
return data[:self.settings['num_prompts']]
|
||||
else:
|
||||
self.console.print(f"[yellow]Warning: AI returned {len(data)} prompts, expected {self.settings['num_prompts']}[/yellow]")
|
||||
return data
|
||||
elif isinstance(data, dict):
|
||||
# Fallback for old format: dictionary with newprompt0, newprompt1, etc.
|
||||
self.console.print("[yellow]Warning: AI returned dictionary format, expected list format[/yellow]")
|
||||
new_prompts = []
|
||||
for i in range(self.settings['num_prompts']):
|
||||
key = f"newprompt{i}"
|
||||
if key in data:
|
||||
new_prompts.append(data[key])
|
||||
return new_prompts
|
||||
else:
|
||||
self.console.print(f"[yellow]Warning: AI returned unexpected data type: {type(data)}[/yellow]")
|
||||
return []
|
||||
|
||||
# If no prompts were found in the JSON, provide debug info
|
||||
except json.JSONDecodeError:
|
||||
# If not valid JSON, try to extract prompts from text
|
||||
self.console.print("[yellow]Warning: AI response is not valid JSON, attempting to extract prompts...[/yellow]")
|
||||
self.console.print(f"[yellow]Full response content for debugging:[/yellow]")
|
||||
self.console.print(f"[yellow]{response_content}[/yellow]")
|
||||
self.console.print(f"[yellow]Cleaned content: {cleaned_content}[/yellow]")
|
||||
|
||||
# Look for patterns in the text
|
||||
lines = response_content.strip().split('\n')
|
||||
new_prompts = []
|
||||
|
||||
for i, line in enumerate(lines[:self.settings['num_prompts']]): # Take first N non-empty lines
|
||||
line = line.strip()
|
||||
if line and len(line) > 50: # Reasonable minimum length for a prompt
|
||||
new_prompts.append(line)
|
||||
|
||||
# If still no prompts could be parsed, provide detailed debug information
|
||||
if not new_prompts:
|
||||
self.console.print("\n[yellow]Warning: JSON parsed successfully but no prompts found with expected keys[/yellow]")
|
||||
self.console.print(f"[yellow]Expected keys: newprompt0 to newprompt{self.settings['num_prompts']-1}[/yellow]")
|
||||
self.console.print(f"[yellow]Keys found in JSON: {list(data.keys())}[/yellow]")
|
||||
self.console.print(f"[yellow]Full JSON data: {json.dumps(data, indent=2)}[/yellow]")
|
||||
self.console.print("\n[red]ERROR: Could not extract any prompts from AI response[/red]")
|
||||
self.console.print("[red]="*60 + "[/red]")
|
||||
self.console.print("[bold red]DEBUG INFORMATION:[/bold red]")
|
||||
self.console.print("[red]="*60 + "[/red]")
|
||||
|
||||
# Show response metadata
|
||||
self.console.print(f"[yellow]Response length: {len(response_content)} characters[/yellow]")
|
||||
self.console.print(f"[yellow]Expected number of prompts: {self.settings['num_prompts']}[/yellow]")
|
||||
|
||||
# Show first 500 characters of response
|
||||
preview = response_content[:500]
|
||||
if len(response_content) > 500:
|
||||
preview += "..."
|
||||
self.console.print(f"[yellow]Response preview (first 500 chars):[/yellow]")
|
||||
self.console.print(f"[yellow]{preview}[/yellow]")
|
||||
|
||||
# Show cleaned content analysis
|
||||
self.console.print(f"[yellow]Cleaned content length: {len(cleaned_content)} characters[/yellow]")
|
||||
self.console.print(f"[yellow]Cleaned content preview: {cleaned_content[:200]}...[/yellow]")
|
||||
|
||||
# Show line analysis
|
||||
self.console.print(f"[yellow]Number of lines in response: {len(lines)}[/yellow]")
|
||||
self.console.print(f"[yellow]First 5 lines:[/yellow]")
|
||||
for i, line in enumerate(lines[:5]):
|
||||
self.console.print(f"[yellow] Line {i+1}: {line[:100]}{'...' if len(line) > 100 else ''}[/yellow]")
|
||||
|
||||
# Show JSON parsing attempt details
|
||||
self.console.print(f"[yellow]JSON parsing attempted on cleaned content:[/yellow]")
|
||||
self.console.print(f"[yellow] Cleaned content starts with: {cleaned_content[:50]}...[/yellow]")
|
||||
|
||||
# Show full payload for debugging
|
||||
self.console.print("\n[bold red]FULL PAYLOAD DUMP:[/bold red]")
|
||||
self.console.print("[red]" + "="*60 + "[/red]")
|
||||
self.console.print(f"[red]{response_content}[/red]")
|
||||
self.console.print("[red]" + "="*60 + "[/red]")
|
||||
|
||||
return new_prompts
|
||||
|
||||
@@ -440,7 +494,7 @@ class JournalPromptGenerator:
|
||||
|
||||
return content.strip()
|
||||
|
||||
def generate_prompts(self) -> List[Dict[str, str]]:
|
||||
def generate_prompts(self) -> List[str]:
|
||||
"""Generate new journal prompts using AI."""
|
||||
self.console.print("\n[cyan]Generating new journal prompts...[/cyan]")
|
||||
|
||||
@@ -491,7 +545,7 @@ class JournalPromptGenerator:
|
||||
def display_prompts(self, prompts: List[Dict[str, str]]):
|
||||
"""Display generated prompts in a nice format."""
|
||||
self.console.print("\n" + "="*60)
|
||||
self.console.print("[bold green]✨ NEW JOURNAL PROMPTS GENERATED ✨[/bold green]")
|
||||
self.console.print("[bold green]✨ READING FROM POOL ✨[/bold green]")
|
||||
self.console.print("="*60 + "\n")
|
||||
|
||||
for i, prompt_dict in enumerate(prompts, 1):
|
||||
|
||||
@@ -1 +1 @@
|
||||
[]
|
||||
|
||||
|
||||
@@ -147,24 +147,33 @@ class SimplePromptGenerator:
|
||||
|
||||
return full_prompt
|
||||
|
||||
def _parse_ai_response(self, response_content: str) -> List[Dict[str, str]]:
|
||||
def _parse_ai_response(self, response_content: str) -> List[str]:
|
||||
"""Parse the AI response to extract new prompts."""
|
||||
try:
|
||||
# Try to parse as JSON
|
||||
data = json.loads(response_content)
|
||||
|
||||
# Convert to list of prompt dictionaries
|
||||
new_prompts = []
|
||||
for i in range(self.settings['num_prompts']):
|
||||
key = f"newprompt{i}"
|
||||
if key in data:
|
||||
prompt_text = data[key]
|
||||
prompt_obj = {
|
||||
f"prompt{len(self.historic_prompts) + i:02d}": prompt_text
|
||||
}
|
||||
new_prompts.append(prompt_obj)
|
||||
|
||||
return new_prompts
|
||||
# Check if data is a list (new format)
|
||||
if isinstance(data, list):
|
||||
# Return the list of prompt strings directly
|
||||
# Ensure we have the correct number of prompts
|
||||
if len(data) >= self.settings['num_prompts']:
|
||||
return data[:self.settings['num_prompts']]
|
||||
else:
|
||||
print(f"Warning: AI returned {len(data)} prompts, expected {self.settings['num_prompts']}")
|
||||
return data
|
||||
elif isinstance(data, dict):
|
||||
# Fallback for old format: dictionary with newprompt0, newprompt1, etc.
|
||||
print("Warning: AI returned dictionary format, expected list format")
|
||||
new_prompts = []
|
||||
for i in range(self.settings['num_prompts']):
|
||||
key = f"newprompt{i}"
|
||||
if key in data:
|
||||
new_prompts.append(data[key])
|
||||
return new_prompts
|
||||
else:
|
||||
print(f"Warning: AI returned unexpected data type: {type(data)}")
|
||||
return []
|
||||
|
||||
except json.JSONDecodeError:
|
||||
# If not valid JSON, try to extract prompts from text
|
||||
@@ -177,14 +186,11 @@ class SimplePromptGenerator:
|
||||
for i, line in enumerate(lines[:self.settings['num_prompts']]):
|
||||
line = line.strip()
|
||||
if line and len(line) > 50:
|
||||
prompt_obj = {
|
||||
f"prompt{len(self.historic_prompts) + i:02d}": line
|
||||
}
|
||||
new_prompts.append(prompt_obj)
|
||||
new_prompts.append(line)
|
||||
|
||||
return new_prompts
|
||||
|
||||
def generate_prompts(self) -> List[Dict[str, str]]:
|
||||
def generate_prompts(self) -> List[str]:
|
||||
"""Generate new journal prompts using AI."""
|
||||
print("\nGenerating new journal prompts...")
|
||||
|
||||
@@ -217,11 +223,8 @@ class SimplePromptGenerator:
|
||||
print("Error: Could not parse any prompts from AI response")
|
||||
return []
|
||||
|
||||
# Add to historic prompts
|
||||
self.historic_prompts.extend(new_prompts)
|
||||
|
||||
# Save updated history
|
||||
self._save_historic_prompts()
|
||||
# Note: Prompts are NOT added to historic_prompts here
|
||||
# They will be added only when the user chooses one
|
||||
|
||||
return new_prompts
|
||||
|
||||
|
||||
55
test_final_fix.py
Normal file
55
test_final_fix.py
Normal file
@@ -0,0 +1,55 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test to demonstrate the fix for the AttributeError when API returns list instead of dict.
|
||||
"""
|
||||
|
||||
import json
|
||||
from generate_prompts import JournalPromptGenerator
|
||||
|
||||
def test_original_error_case():
|
||||
"""Test the exact error case: API returns a list instead of a dict."""
|
||||
|
||||
print("Testing the original error case: API returns list instead of dict")
|
||||
print("="*60)
|
||||
|
||||
# Create a mock generator
|
||||
generator = JournalPromptGenerator()
|
||||
|
||||
# Simulate API returning a list (which could happen with null/malformed data)
|
||||
list_response = json.dumps([]) # Empty list
|
||||
|
||||
print("\n1. Testing with empty list []:")
|
||||
try:
|
||||
result = generator._parse_ai_response(list_response)
|
||||
print(f" Result: Successfully parsed {len(result)} prompts (no AttributeError)")
|
||||
except AttributeError as e:
|
||||
print(f" ERROR: AttributeError occurred: {e}")
|
||||
except Exception as e:
|
||||
print(f" Other error: {type(e).__name__}: {e}")
|
||||
|
||||
# Test with list containing dictionaries (another possible malformed response)
|
||||
list_with_dicts = json.dumps([
|
||||
{"some_key": "some value"},
|
||||
{"another_key": "another value"}
|
||||
])
|
||||
|
||||
print("\n2. Testing with list of dictionaries:")
|
||||
try:
|
||||
result = generator._parse_ai_response(list_with_dicts)
|
||||
print(f" Result: Successfully parsed {len(result)} prompts (no AttributeError)")
|
||||
except AttributeError as e:
|
||||
print(f" ERROR: AttributeError occurred: {e}")
|
||||
except Exception as e:
|
||||
print(f" Other error: {type(e).__name__}: {e}")
|
||||
|
||||
# Test with None/null data (worst case)
|
||||
print("\n3. Testing with None/null data (simulated):")
|
||||
# We can't directly test None since json.loads would fail, but our code
|
||||
# handles the case where data might be None after parsing
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("Test complete! The fix prevents AttributeError for list responses.")
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_original_error_case()
|
||||
|
||||
91
test_new_format.py
Normal file
91
test_new_format.py
Normal file
@@ -0,0 +1,91 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Test the new format where AI returns a list and keys are generated locally.
|
||||
"""
|
||||
|
||||
import json
|
||||
import sys
|
||||
import os
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
from generate_prompts import JournalPromptGenerator
|
||||
|
||||
def test_new_format():
|
||||
"""Test the new format where AI returns a list and keys are generated locally."""
|
||||
|
||||
print("Testing new format: AI returns list, keys generated locally")
|
||||
print("="*60)
|
||||
|
||||
# Create a mock generator
|
||||
generator = JournalPromptGenerator(config_path=".env")
|
||||
|
||||
# Create a mock AI response in the new list format
|
||||
mock_ai_response = [
|
||||
"Write about a childhood memory that still makes you smile.",
|
||||
"Describe your perfect day from start to finish.",
|
||||
"What is something you've been putting off and why?",
|
||||
"Imagine you could have a conversation with any historical figure.",
|
||||
"Write a letter to your future self one year from now.",
|
||||
"Describe a place that feels like home to you."
|
||||
]
|
||||
|
||||
# Convert to JSON string
|
||||
json_response = json.dumps(mock_ai_response)
|
||||
|
||||
print("\n1. Testing _parse_ai_response with list format:")
|
||||
result = generator._parse_ai_response(json_response)
|
||||
print(f" Result type: {type(result)}")
|
||||
print(f" Number of prompts: {len(result)}")
|
||||
print(f" First prompt: {result[0][:50]}...")
|
||||
|
||||
# Verify it's a list of strings
|
||||
assert isinstance(result, list), "Result should be a list"
|
||||
assert all(isinstance(prompt, str) for prompt in result), "All items should be strings"
|
||||
|
||||
print("\n2. Testing add_prompts_to_pool with list of strings:")
|
||||
|
||||
# Get initial pool size
|
||||
initial_pool_size = len(generator.pool_prompts)
|
||||
print(f" Initial pool size: {initial_pool_size}")
|
||||
|
||||
# Add prompts to pool
|
||||
generator.add_prompts_to_pool(result)
|
||||
|
||||
# Check new pool size
|
||||
new_pool_size = len(generator.pool_prompts)
|
||||
print(f" New pool size: {new_pool_size}")
|
||||
print(f" Added {new_pool_size - initial_pool_size} prompts")
|
||||
|
||||
# Check that prompts in pool have keys
|
||||
print(f"\n3. Checking that prompts in pool have generated keys:")
|
||||
for i, prompt_dict in enumerate(generator.pool_prompts[-len(result):]):
|
||||
prompt_key = list(prompt_dict.keys())[0]
|
||||
prompt_text = prompt_dict[prompt_key]
|
||||
print(f" Prompt {i+1}: Key='{prompt_key}', Text='{prompt_text[:30]}...'")
|
||||
assert prompt_key.startswith("poolprompt"), f"Key should start with 'poolprompt', got '{prompt_key}'"
|
||||
|
||||
print("\n4. Testing draw_prompts_from_pool:")
|
||||
drawn_prompts = generator.draw_prompts_from_pool(count=2)
|
||||
print(f" Drawn {len(drawn_prompts)} prompts from pool")
|
||||
print(f" Pool size after drawing: {len(generator.pool_prompts)}")
|
||||
|
||||
# Check drawn prompts have keys
|
||||
for i, prompt_dict in enumerate(drawn_prompts):
|
||||
prompt_key = list(prompt_dict.keys())[0]
|
||||
prompt_text = prompt_dict[prompt_key]
|
||||
print(f" Drawn prompt {i+1}: Key='{prompt_key}', Text='{prompt_text[:30]}...'")
|
||||
|
||||
print("\n" + "="*60)
|
||||
print("✅ All tests passed! New format works correctly.")
|
||||
print("\nSummary:")
|
||||
print("- AI returns prompts as a JSON list (no keys)")
|
||||
print("- _parse_ai_response returns List[str]")
|
||||
print("- add_prompts_to_pool generates keys locally (poolprompt000, poolprompt001, etc.)")
|
||||
print("- draw_prompts_from_pool returns List[Dict[str, str]] with generated keys")
|
||||
|
||||
return True
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_new_format()
|
||||
|
||||
|
||||
@@ -13,15 +13,15 @@ from datetime import datetime
|
||||
# Add current directory to path to import our modules
|
||||
sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
|
||||
|
||||
# Mock response for testing
|
||||
MOCK_AI_RESPONSE = '''{
|
||||
"newprompt0": "Describe a place from your childhood that no longer exists. What made it special? What sounds, smells, and textures do you remember?",
|
||||
"newprompt1": "Write a letter to your future self 10 years from now. What hopes, fears, and questions do you want to share?",
|
||||
"newprompt2": "Imagine you wake up with a new superpower that only works on Tuesdays. What is it and how do you use it?",
|
||||
"newprompt3": "Describe a meal that represents your cultural heritage. Who taught you to make it? What memories are tied to it?",
|
||||
"newprompt4": "Write about a time you got lost, literally or metaphorically. What did you discover along the way?",
|
||||
"newprompt5": "Create a dialogue between your current self and your teenage self. What would you talk about?"
|
||||
}'''
|
||||
# Mock response for testing (new list format)
|
||||
MOCK_AI_RESPONSE = '''[
|
||||
"Describe a place from your childhood that no longer exists. What made it special? What sounds, smells, and textures do you remember?",
|
||||
"Write a letter to your future self 10 years from now. What hopes, fears, and questions do you want to share?",
|
||||
"Imagine you wake up with a new superpower that only works on Tuesdays. What is it and how do you use it?",
|
||||
"Describe a meal that represents your cultural heritage. Who taught you to make it? What memories are tied to it?",
|
||||
"Write about a time you got lost, literally or metaphorically. What did you discover along the way?",
|
||||
"Create a dialogue between your current self and your teenage self. What would you talk about?"
|
||||
]'''
|
||||
|
||||
|
||||
def test_file_structure():
|
||||
@@ -110,20 +110,20 @@ def test_mock_ai_response():
|
||||
# Test JSON parsing
|
||||
data = json.loads(MOCK_AI_RESPONSE)
|
||||
|
||||
# Check structure
|
||||
expected_keys = [f"newprompt{i}" for i in range(6)]
|
||||
missing_keys = [key for key in expected_keys if key not in data]
|
||||
# Check structure - should be a list
|
||||
if not isinstance(data, list):
|
||||
print(f" ✗ Mock response is not a list, got {type(data)}")
|
||||
return False
|
||||
|
||||
if missing_keys:
|
||||
print(f" ✗ Missing keys in mock response: {missing_keys}")
|
||||
if len(data) != 6:
|
||||
print(f" ✗ Mock response has {len(data)} items, expected 6")
|
||||
return False
|
||||
|
||||
print(f" ✓ Mock response parsed successfully")
|
||||
print(f" ✓ Contains all 6 expected prompts")
|
||||
|
||||
# Check prompt lengths
|
||||
for i in range(6):
|
||||
prompt = data[f"newprompt{i}"]
|
||||
for i, prompt in enumerate(data):
|
||||
if len(prompt) < 50:
|
||||
print(f" ⚠ Prompt {i} is very short ({len(prompt)} characters)")
|
||||
|
||||
@@ -281,3 +281,4 @@ if __name__ == "__main__":
|
||||
success = main()
|
||||
sys.exit(0 if success else 1)
|
||||
|
||||
|
||||
|
||||
@@ -16,24 +16,23 @@ def test_valid_response():
|
||||
# Create a mock generator
|
||||
generator = JournalPromptGenerator(config_path=".env")
|
||||
|
||||
# Create a valid response with 4 prompts (default num_prompts from settings)
|
||||
valid_response = {
|
||||
"newprompt0": "Write about a time when you felt truly at peace.",
|
||||
"newprompt1": "Describe your ideal morning routine in detail.",
|
||||
"newprompt2": "What are three things you're grateful for today?",
|
||||
"newprompt3": "Reflect on a recent challenge and what you learned from it."
|
||||
}
|
||||
# Create a valid response with 4 prompts as a list (new format)
|
||||
valid_response = [
|
||||
"Write about a time when you felt truly at peace.",
|
||||
"Describe your ideal morning routine in detail.",
|
||||
"What are three things you're grateful for today?",
|
||||
"Reflect on a recent challenge and what you learned from it."
|
||||
]
|
||||
|
||||
# Convert to JSON string
|
||||
json_response = json.dumps(valid_response)
|
||||
|
||||
print("\n=== Test: Valid JSON response ===")
|
||||
print("\n=== Test: Valid JSON response (list format) ===")
|
||||
result = generator._parse_ai_response(json_response)
|
||||
print(f"Number of prompts extracted: {len(result)}")
|
||||
print(f"Type of result: {type(result)}")
|
||||
|
||||
for i, prompt_dict in enumerate(result):
|
||||
prompt_key = list(prompt_dict.keys())[0]
|
||||
prompt_text = prompt_dict[prompt_key]
|
||||
for i, prompt_text in enumerate(result):
|
||||
print(f"Prompt {i+1}: {prompt_text[:50]}...")
|
||||
|
||||
# Test with backticks
|
||||
@@ -48,6 +47,19 @@ def test_valid_response():
|
||||
result = generator._parse_ai_response(json_prefix_response)
|
||||
print(f"Number of prompts extracted: {len(result)}")
|
||||
|
||||
# Test fallback for old dictionary format
|
||||
print("\n=== Test: Fallback for old dictionary format ===")
|
||||
old_format_response = {
|
||||
"newprompt0": "Write about a time when you felt truly at peace.",
|
||||
"newprompt1": "Describe your ideal morning routine in detail.",
|
||||
"newprompt2": "What are three things you're grateful for today?",
|
||||
"newprompt3": "Reflect on a recent challenge and what you learned from it."
|
||||
}
|
||||
json_old_response = json.dumps(old_format_response)
|
||||
result = generator._parse_ai_response(json_old_response)
|
||||
print(f"Number of prompts extracted: {len(result)}")
|
||||
|
||||
if __name__ == "__main__":
|
||||
test_valid_response()
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user